]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/tg3.c
81e19d44e584f661e969a53d42e1e0d92efde8bf
[karo-tx-linux.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
46
47 #include <net/checksum.h>
48 #include <net/ip.h>
49
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     119
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "May 18, 2011"
95
96 #define TG3_DEF_MAC_MODE        0
97 #define TG3_DEF_RX_MODE         0
98 #define TG3_DEF_TX_MODE         0
99 #define TG3_DEF_MSG_ENABLE        \
100         (NETIF_MSG_DRV          | \
101          NETIF_MSG_PROBE        | \
102          NETIF_MSG_LINK         | \
103          NETIF_MSG_TIMER        | \
104          NETIF_MSG_IFDOWN       | \
105          NETIF_MSG_IFUP         | \
106          NETIF_MSG_RX_ERR       | \
107          NETIF_MSG_TX_ERR)
108
109 /* length of time before we decide the hardware is borked,
110  * and dev->tx_timeout() should be called to fix the problem
111  */
112
113 #define TG3_TX_TIMEOUT                  (5 * HZ)
114
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU                     60
117 #define TG3_MAX_MTU(tp) \
118         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
119
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121  * You can't change the ring sizes, but you can change where you place
122  * them in the NIC onboard memory.
123  */
124 #define TG3_RX_STD_RING_SIZE(tp) \
125         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING         200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
132 #define TG3_RSS_INDIR_TBL_SIZE          128
133
134 /* Do not place this n-ring entries value into the tp struct itself,
135  * we really want to expose these constants to GCC so that modulo et
136  * al.  operations are done with shifts and masks instead of with
137  * hw multiply/modulo instructions.  Another solution would be to
138  * replace things like '% foo' with '& (foo - 1)'.
139  */
140
141 #define TG3_TX_RING_SIZE                512
142 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
143
144 #define TG3_RX_STD_RING_BYTES(tp) \
145         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
151                                  TG3_TX_RING_SIZE)
152 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
153
154 #define TG3_DMA_BYTE_ENAB               64
155
156 #define TG3_RX_STD_DMA_SZ               1536
157 #define TG3_RX_JMB_DMA_SZ               9046
158
159 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
160
161 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
163
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
166
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
169
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171  * that are at least dword aligned when used in PCIX mode.  The driver
172  * works around this bug by double copying the packet.  This workaround
173  * is built into the normal double copy length check for efficiency.
174  *
175  * However, the double copy is only necessary on those architectures
176  * where unaligned memory accesses are inefficient.  For those architectures
177  * where unaligned memory accesses incur little penalty, we can reintegrate
178  * the 5701 in the normal rx path.  Doing so saves a device structure
179  * dereference by hardcoding the double copy threshold in place.
180  */
181 #define TG3_RX_COPY_THRESHOLD           256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
184 #else
185         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
186 #endif
187
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
190
191 #define TG3_RAW_IP_ALIGN 2
192
193 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
194
195 #define FIRMWARE_TG3            "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
198
199 static char version[] __devinitdata =
200         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
201
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
209
210 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
213
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
295         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
296         {}
297 };
298
299 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
300
301 static const struct {
302         const char string[ETH_GSTRING_LEN];
303 } ethtool_stats_keys[] = {
304         { "rx_octets" },
305         { "rx_fragments" },
306         { "rx_ucast_packets" },
307         { "rx_mcast_packets" },
308         { "rx_bcast_packets" },
309         { "rx_fcs_errors" },
310         { "rx_align_errors" },
311         { "rx_xon_pause_rcvd" },
312         { "rx_xoff_pause_rcvd" },
313         { "rx_mac_ctrl_rcvd" },
314         { "rx_xoff_entered" },
315         { "rx_frame_too_long_errors" },
316         { "rx_jabbers" },
317         { "rx_undersize_packets" },
318         { "rx_in_length_errors" },
319         { "rx_out_length_errors" },
320         { "rx_64_or_less_octet_packets" },
321         { "rx_65_to_127_octet_packets" },
322         { "rx_128_to_255_octet_packets" },
323         { "rx_256_to_511_octet_packets" },
324         { "rx_512_to_1023_octet_packets" },
325         { "rx_1024_to_1522_octet_packets" },
326         { "rx_1523_to_2047_octet_packets" },
327         { "rx_2048_to_4095_octet_packets" },
328         { "rx_4096_to_8191_octet_packets" },
329         { "rx_8192_to_9022_octet_packets" },
330
331         { "tx_octets" },
332         { "tx_collisions" },
333
334         { "tx_xon_sent" },
335         { "tx_xoff_sent" },
336         { "tx_flow_control" },
337         { "tx_mac_errors" },
338         { "tx_single_collisions" },
339         { "tx_mult_collisions" },
340         { "tx_deferred" },
341         { "tx_excessive_collisions" },
342         { "tx_late_collisions" },
343         { "tx_collide_2times" },
344         { "tx_collide_3times" },
345         { "tx_collide_4times" },
346         { "tx_collide_5times" },
347         { "tx_collide_6times" },
348         { "tx_collide_7times" },
349         { "tx_collide_8times" },
350         { "tx_collide_9times" },
351         { "tx_collide_10times" },
352         { "tx_collide_11times" },
353         { "tx_collide_12times" },
354         { "tx_collide_13times" },
355         { "tx_collide_14times" },
356         { "tx_collide_15times" },
357         { "tx_ucast_packets" },
358         { "tx_mcast_packets" },
359         { "tx_bcast_packets" },
360         { "tx_carrier_sense_errors" },
361         { "tx_discards" },
362         { "tx_errors" },
363
364         { "dma_writeq_full" },
365         { "dma_write_prioq_full" },
366         { "rxbds_empty" },
367         { "rx_discards" },
368         { "rx_errors" },
369         { "rx_threshold_hit" },
370
371         { "dma_readq_full" },
372         { "dma_read_prioq_full" },
373         { "tx_comp_queue_full" },
374
375         { "ring_set_send_prod_index" },
376         { "ring_status_update" },
377         { "nic_irqs" },
378         { "nic_avoided_irqs" },
379         { "nic_tx_threshold_hit" },
380
381         { "mbuf_lwm_thresh_hit" },
382 };
383
384 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
385
386
387 static const struct {
388         const char string[ETH_GSTRING_LEN];
389 } ethtool_test_keys[] = {
390         { "nvram test     (online) " },
391         { "link test      (online) " },
392         { "register test  (offline)" },
393         { "memory test    (offline)" },
394         { "loopback test  (offline)" },
395         { "interrupt test (offline)" },
396 };
397
398 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
399
400
401 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
402 {
403         writel(val, tp->regs + off);
404 }
405
406 static u32 tg3_read32(struct tg3 *tp, u32 off)
407 {
408         return readl(tp->regs + off);
409 }
410
411 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
412 {
413         writel(val, tp->aperegs + off);
414 }
415
416 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
417 {
418         return readl(tp->aperegs + off);
419 }
420
421 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
422 {
423         unsigned long flags;
424
425         spin_lock_irqsave(&tp->indirect_lock, flags);
426         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
427         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
428         spin_unlock_irqrestore(&tp->indirect_lock, flags);
429 }
430
431 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
432 {
433         writel(val, tp->regs + off);
434         readl(tp->regs + off);
435 }
436
437 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
438 {
439         unsigned long flags;
440         u32 val;
441
442         spin_lock_irqsave(&tp->indirect_lock, flags);
443         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
444         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
445         spin_unlock_irqrestore(&tp->indirect_lock, flags);
446         return val;
447 }
448
449 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
450 {
451         unsigned long flags;
452
453         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
454                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
455                                        TG3_64BIT_REG_LOW, val);
456                 return;
457         }
458         if (off == TG3_RX_STD_PROD_IDX_REG) {
459                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
460                                        TG3_64BIT_REG_LOW, val);
461                 return;
462         }
463
464         spin_lock_irqsave(&tp->indirect_lock, flags);
465         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
466         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
467         spin_unlock_irqrestore(&tp->indirect_lock, flags);
468
469         /* In indirect mode when disabling interrupts, we also need
470          * to clear the interrupt bit in the GRC local ctrl register.
471          */
472         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
473             (val == 0x1)) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
475                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
476         }
477 }
478
479 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
480 {
481         unsigned long flags;
482         u32 val;
483
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
487         spin_unlock_irqrestore(&tp->indirect_lock, flags);
488         return val;
489 }
490
491 /* usec_wait specifies the wait time in usec when writing to certain registers
492  * where it is unsafe to read back the register without some delay.
493  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
494  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
495  */
496 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
497 {
498         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
499                 /* Non-posted methods */
500                 tp->write32(tp, off, val);
501         else {
502                 /* Posted method */
503                 tg3_write32(tp, off, val);
504                 if (usec_wait)
505                         udelay(usec_wait);
506                 tp->read32(tp, off);
507         }
508         /* Wait again after the read for the posted method to guarantee that
509          * the wait time is met.
510          */
511         if (usec_wait)
512                 udelay(usec_wait);
513 }
514
515 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
516 {
517         tp->write32_mbox(tp, off, val);
518         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
519                 tp->read32_mbox(tp, off);
520 }
521
522 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
523 {
524         void __iomem *mbox = tp->regs + off;
525         writel(val, mbox);
526         if (tg3_flag(tp, TXD_MBOX_HWBUG))
527                 writel(val, mbox);
528         if (tg3_flag(tp, MBOX_WRITE_REORDER))
529                 readl(mbox);
530 }
531
532 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
533 {
534         return readl(tp->regs + off + GRCMBOX_BASE);
535 }
536
537 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
538 {
539         writel(val, tp->regs + off + GRCMBOX_BASE);
540 }
541
542 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
543 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
544 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
545 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
546 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
547
548 #define tw32(reg, val)                  tp->write32(tp, reg, val)
549 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
550 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
551 #define tr32(reg)                       tp->read32(tp, reg)
552
553 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
554 {
555         unsigned long flags;
556
557         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
558             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
559                 return;
560
561         spin_lock_irqsave(&tp->indirect_lock, flags);
562         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
563                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
564                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
565
566                 /* Always leave this as zero. */
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
568         } else {
569                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
570                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
571
572                 /* Always leave this as zero. */
573                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
574         }
575         spin_unlock_irqrestore(&tp->indirect_lock, flags);
576 }
577
578 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
579 {
580         unsigned long flags;
581
582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
583             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
584                 *val = 0;
585                 return;
586         }
587
588         spin_lock_irqsave(&tp->indirect_lock, flags);
589         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
590                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
591                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
592
593                 /* Always leave this as zero. */
594                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
595         } else {
596                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
597                 *val = tr32(TG3PCI_MEM_WIN_DATA);
598
599                 /* Always leave this as zero. */
600                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
601         }
602         spin_unlock_irqrestore(&tp->indirect_lock, flags);
603 }
604
605 static void tg3_ape_lock_init(struct tg3 *tp)
606 {
607         int i;
608         u32 regbase;
609
610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
611                 regbase = TG3_APE_LOCK_GRANT;
612         else
613                 regbase = TG3_APE_PER_LOCK_GRANT;
614
615         /* Make sure the driver hasn't any stale locks. */
616         for (i = 0; i < 8; i++)
617                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
618 }
619
620 static int tg3_ape_lock(struct tg3 *tp, int locknum)
621 {
622         int i, off;
623         int ret = 0;
624         u32 status, req, gnt;
625
626         if (!tg3_flag(tp, ENABLE_APE))
627                 return 0;
628
629         switch (locknum) {
630         case TG3_APE_LOCK_GRC:
631         case TG3_APE_LOCK_MEM:
632                 break;
633         default:
634                 return -EINVAL;
635         }
636
637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
638                 req = TG3_APE_LOCK_REQ;
639                 gnt = TG3_APE_LOCK_GRANT;
640         } else {
641                 req = TG3_APE_PER_LOCK_REQ;
642                 gnt = TG3_APE_PER_LOCK_GRANT;
643         }
644
645         off = 4 * locknum;
646
647         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
648
649         /* Wait for up to 1 millisecond to acquire lock. */
650         for (i = 0; i < 100; i++) {
651                 status = tg3_ape_read32(tp, gnt + off);
652                 if (status == APE_LOCK_GRANT_DRIVER)
653                         break;
654                 udelay(10);
655         }
656
657         if (status != APE_LOCK_GRANT_DRIVER) {
658                 /* Revoke the lock request. */
659                 tg3_ape_write32(tp, gnt + off,
660                                 APE_LOCK_GRANT_DRIVER);
661
662                 ret = -EBUSY;
663         }
664
665         return ret;
666 }
667
668 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
669 {
670         u32 gnt;
671
672         if (!tg3_flag(tp, ENABLE_APE))
673                 return;
674
675         switch (locknum) {
676         case TG3_APE_LOCK_GRC:
677         case TG3_APE_LOCK_MEM:
678                 break;
679         default:
680                 return;
681         }
682
683         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
684                 gnt = TG3_APE_LOCK_GRANT;
685         else
686                 gnt = TG3_APE_PER_LOCK_GRANT;
687
688         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
689 }
690
691 static void tg3_disable_ints(struct tg3 *tp)
692 {
693         int i;
694
695         tw32(TG3PCI_MISC_HOST_CTRL,
696              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
697         for (i = 0; i < tp->irq_max; i++)
698                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
699 }
700
701 static void tg3_enable_ints(struct tg3 *tp)
702 {
703         int i;
704
705         tp->irq_sync = 0;
706         wmb();
707
708         tw32(TG3PCI_MISC_HOST_CTRL,
709              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
710
711         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
712         for (i = 0; i < tp->irq_cnt; i++) {
713                 struct tg3_napi *tnapi = &tp->napi[i];
714
715                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
716                 if (tg3_flag(tp, 1SHOT_MSI))
717                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
718
719                 tp->coal_now |= tnapi->coal_now;
720         }
721
722         /* Force an initial interrupt */
723         if (!tg3_flag(tp, TAGGED_STATUS) &&
724             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
725                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
726         else
727                 tw32(HOSTCC_MODE, tp->coal_now);
728
729         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
730 }
731
732 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
733 {
734         struct tg3 *tp = tnapi->tp;
735         struct tg3_hw_status *sblk = tnapi->hw_status;
736         unsigned int work_exists = 0;
737
738         /* check for phy events */
739         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
740                 if (sblk->status & SD_STATUS_LINK_CHG)
741                         work_exists = 1;
742         }
743
744         /* check for TX work to do */
745         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
746                 work_exists = 1;
747
748         /* check for RX work to do */
749         if (tnapi->rx_rcb_prod_idx &&
750             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
751                 work_exists = 1;
752
753         return work_exists;
754 }
755
756 /* tg3_int_reenable
757  *  similar to tg3_enable_ints, but it accurately determines whether there
758  *  is new work pending and can return without flushing the PIO write
759  *  which reenables interrupts
760  */
761 static void tg3_int_reenable(struct tg3_napi *tnapi)
762 {
763         struct tg3 *tp = tnapi->tp;
764
765         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
766         mmiowb();
767
768         /* When doing tagged status, this work check is unnecessary.
769          * The last_tag we write above tells the chip which piece of
770          * work we've completed.
771          */
772         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
773                 tw32(HOSTCC_MODE, tp->coalesce_mode |
774                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
775 }
776
777 static void tg3_switch_clocks(struct tg3 *tp)
778 {
779         u32 clock_ctrl;
780         u32 orig_clock_ctrl;
781
782         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
783                 return;
784
785         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
786
787         orig_clock_ctrl = clock_ctrl;
788         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
789                        CLOCK_CTRL_CLKRUN_OENABLE |
790                        0x1f);
791         tp->pci_clock_ctrl = clock_ctrl;
792
793         if (tg3_flag(tp, 5705_PLUS)) {
794                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
795                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
796                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
797                 }
798         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
799                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
800                             clock_ctrl |
801                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
802                             40);
803                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
804                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
805                             40);
806         }
807         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
808 }
809
810 #define PHY_BUSY_LOOPS  5000
811
812 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
813 {
814         u32 frame_val;
815         unsigned int loops;
816         int ret;
817
818         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
819                 tw32_f(MAC_MI_MODE,
820                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
821                 udelay(80);
822         }
823
824         *val = 0x0;
825
826         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
827                       MI_COM_PHY_ADDR_MASK);
828         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
829                       MI_COM_REG_ADDR_MASK);
830         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
831
832         tw32_f(MAC_MI_COM, frame_val);
833
834         loops = PHY_BUSY_LOOPS;
835         while (loops != 0) {
836                 udelay(10);
837                 frame_val = tr32(MAC_MI_COM);
838
839                 if ((frame_val & MI_COM_BUSY) == 0) {
840                         udelay(5);
841                         frame_val = tr32(MAC_MI_COM);
842                         break;
843                 }
844                 loops -= 1;
845         }
846
847         ret = -EBUSY;
848         if (loops != 0) {
849                 *val = frame_val & MI_COM_DATA_MASK;
850                 ret = 0;
851         }
852
853         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
854                 tw32_f(MAC_MI_MODE, tp->mi_mode);
855                 udelay(80);
856         }
857
858         return ret;
859 }
860
861 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
862 {
863         u32 frame_val;
864         unsigned int loops;
865         int ret;
866
867         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
868             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
869                 return 0;
870
871         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
872                 tw32_f(MAC_MI_MODE,
873                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
874                 udelay(80);
875         }
876
877         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
878                       MI_COM_PHY_ADDR_MASK);
879         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
880                       MI_COM_REG_ADDR_MASK);
881         frame_val |= (val & MI_COM_DATA_MASK);
882         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
883
884         tw32_f(MAC_MI_COM, frame_val);
885
886         loops = PHY_BUSY_LOOPS;
887         while (loops != 0) {
888                 udelay(10);
889                 frame_val = tr32(MAC_MI_COM);
890                 if ((frame_val & MI_COM_BUSY) == 0) {
891                         udelay(5);
892                         frame_val = tr32(MAC_MI_COM);
893                         break;
894                 }
895                 loops -= 1;
896         }
897
898         ret = -EBUSY;
899         if (loops != 0)
900                 ret = 0;
901
902         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
903                 tw32_f(MAC_MI_MODE, tp->mi_mode);
904                 udelay(80);
905         }
906
907         return ret;
908 }
909
910 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
911 {
912         int err;
913
914         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
915         if (err)
916                 goto done;
917
918         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
919         if (err)
920                 goto done;
921
922         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
923                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
924         if (err)
925                 goto done;
926
927         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
928
929 done:
930         return err;
931 }
932
933 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
934 {
935         int err;
936
937         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938         if (err)
939                 goto done;
940
941         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942         if (err)
943                 goto done;
944
945         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947         if (err)
948                 goto done;
949
950         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
951
952 done:
953         return err;
954 }
955
956 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
957 {
958         int err;
959
960         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
961         if (!err)
962                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
963
964         return err;
965 }
966
967 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
968 {
969         int err;
970
971         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
972         if (!err)
973                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
974
975         return err;
976 }
977
978 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
979 {
980         int err;
981
982         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
983                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
984                            MII_TG3_AUXCTL_SHDWSEL_MISC);
985         if (!err)
986                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
987
988         return err;
989 }
990
991 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
992 {
993         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
994                 set |= MII_TG3_AUXCTL_MISC_WREN;
995
996         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
997 }
998
999 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1000         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1001                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1002                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1003
1004 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1005         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1006                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1007
1008 static int tg3_bmcr_reset(struct tg3 *tp)
1009 {
1010         u32 phy_control;
1011         int limit, err;
1012
1013         /* OK, reset it, and poll the BMCR_RESET bit until it
1014          * clears or we time out.
1015          */
1016         phy_control = BMCR_RESET;
1017         err = tg3_writephy(tp, MII_BMCR, phy_control);
1018         if (err != 0)
1019                 return -EBUSY;
1020
1021         limit = 5000;
1022         while (limit--) {
1023                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1024                 if (err != 0)
1025                         return -EBUSY;
1026
1027                 if ((phy_control & BMCR_RESET) == 0) {
1028                         udelay(40);
1029                         break;
1030                 }
1031                 udelay(10);
1032         }
1033         if (limit < 0)
1034                 return -EBUSY;
1035
1036         return 0;
1037 }
1038
1039 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1040 {
1041         struct tg3 *tp = bp->priv;
1042         u32 val;
1043
1044         spin_lock_bh(&tp->lock);
1045
1046         if (tg3_readphy(tp, reg, &val))
1047                 val = -EIO;
1048
1049         spin_unlock_bh(&tp->lock);
1050
1051         return val;
1052 }
1053
1054 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1055 {
1056         struct tg3 *tp = bp->priv;
1057         u32 ret = 0;
1058
1059         spin_lock_bh(&tp->lock);
1060
1061         if (tg3_writephy(tp, reg, val))
1062                 ret = -EIO;
1063
1064         spin_unlock_bh(&tp->lock);
1065
1066         return ret;
1067 }
1068
1069 static int tg3_mdio_reset(struct mii_bus *bp)
1070 {
1071         return 0;
1072 }
1073
1074 static void tg3_mdio_config_5785(struct tg3 *tp)
1075 {
1076         u32 val;
1077         struct phy_device *phydev;
1078
1079         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1080         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1081         case PHY_ID_BCM50610:
1082         case PHY_ID_BCM50610M:
1083                 val = MAC_PHYCFG2_50610_LED_MODES;
1084                 break;
1085         case PHY_ID_BCMAC131:
1086                 val = MAC_PHYCFG2_AC131_LED_MODES;
1087                 break;
1088         case PHY_ID_RTL8211C:
1089                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1090                 break;
1091         case PHY_ID_RTL8201E:
1092                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1093                 break;
1094         default:
1095                 return;
1096         }
1097
1098         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1099                 tw32(MAC_PHYCFG2, val);
1100
1101                 val = tr32(MAC_PHYCFG1);
1102                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1103                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1104                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1105                 tw32(MAC_PHYCFG1, val);
1106
1107                 return;
1108         }
1109
1110         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1111                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1112                        MAC_PHYCFG2_FMODE_MASK_MASK |
1113                        MAC_PHYCFG2_GMODE_MASK_MASK |
1114                        MAC_PHYCFG2_ACT_MASK_MASK   |
1115                        MAC_PHYCFG2_QUAL_MASK_MASK |
1116                        MAC_PHYCFG2_INBAND_ENABLE;
1117
1118         tw32(MAC_PHYCFG2, val);
1119
1120         val = tr32(MAC_PHYCFG1);
1121         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1122                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1123         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1124                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1125                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1126                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1127                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1128         }
1129         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1130                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1131         tw32(MAC_PHYCFG1, val);
1132
1133         val = tr32(MAC_EXT_RGMII_MODE);
1134         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1135                  MAC_RGMII_MODE_RX_QUALITY |
1136                  MAC_RGMII_MODE_RX_ACTIVITY |
1137                  MAC_RGMII_MODE_RX_ENG_DET |
1138                  MAC_RGMII_MODE_TX_ENABLE |
1139                  MAC_RGMII_MODE_TX_LOWPWR |
1140                  MAC_RGMII_MODE_TX_RESET);
1141         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1142                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1143                         val |= MAC_RGMII_MODE_RX_INT_B |
1144                                MAC_RGMII_MODE_RX_QUALITY |
1145                                MAC_RGMII_MODE_RX_ACTIVITY |
1146                                MAC_RGMII_MODE_RX_ENG_DET;
1147                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1148                         val |= MAC_RGMII_MODE_TX_ENABLE |
1149                                MAC_RGMII_MODE_TX_LOWPWR |
1150                                MAC_RGMII_MODE_TX_RESET;
1151         }
1152         tw32(MAC_EXT_RGMII_MODE, val);
1153 }
1154
1155 static void tg3_mdio_start(struct tg3 *tp)
1156 {
1157         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1158         tw32_f(MAC_MI_MODE, tp->mi_mode);
1159         udelay(80);
1160
1161         if (tg3_flag(tp, MDIOBUS_INITED) &&
1162             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1163                 tg3_mdio_config_5785(tp);
1164 }
1165
1166 static int tg3_mdio_init(struct tg3 *tp)
1167 {
1168         int i;
1169         u32 reg;
1170         struct phy_device *phydev;
1171
1172         if (tg3_flag(tp, 5717_PLUS)) {
1173                 u32 is_serdes;
1174
1175                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1176
1177                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1178                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1179                 else
1180                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1181                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1182                 if (is_serdes)
1183                         tp->phy_addr += 7;
1184         } else
1185                 tp->phy_addr = TG3_PHY_MII_ADDR;
1186
1187         tg3_mdio_start(tp);
1188
1189         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1190                 return 0;
1191
1192         tp->mdio_bus = mdiobus_alloc();
1193         if (tp->mdio_bus == NULL)
1194                 return -ENOMEM;
1195
1196         tp->mdio_bus->name     = "tg3 mdio bus";
1197         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1198                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1199         tp->mdio_bus->priv     = tp;
1200         tp->mdio_bus->parent   = &tp->pdev->dev;
1201         tp->mdio_bus->read     = &tg3_mdio_read;
1202         tp->mdio_bus->write    = &tg3_mdio_write;
1203         tp->mdio_bus->reset    = &tg3_mdio_reset;
1204         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1205         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1206
1207         for (i = 0; i < PHY_MAX_ADDR; i++)
1208                 tp->mdio_bus->irq[i] = PHY_POLL;
1209
1210         /* The bus registration will look for all the PHYs on the mdio bus.
1211          * Unfortunately, it does not ensure the PHY is powered up before
1212          * accessing the PHY ID registers.  A chip reset is the
1213          * quickest way to bring the device back to an operational state..
1214          */
1215         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1216                 tg3_bmcr_reset(tp);
1217
1218         i = mdiobus_register(tp->mdio_bus);
1219         if (i) {
1220                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1221                 mdiobus_free(tp->mdio_bus);
1222                 return i;
1223         }
1224
1225         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1226
1227         if (!phydev || !phydev->drv) {
1228                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1229                 mdiobus_unregister(tp->mdio_bus);
1230                 mdiobus_free(tp->mdio_bus);
1231                 return -ENODEV;
1232         }
1233
1234         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1235         case PHY_ID_BCM57780:
1236                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1237                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1238                 break;
1239         case PHY_ID_BCM50610:
1240         case PHY_ID_BCM50610M:
1241                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1242                                      PHY_BRCM_RX_REFCLK_UNUSED |
1243                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1244                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1245                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1246                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1247                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1248                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1249                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1250                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1251                 /* fallthru */
1252         case PHY_ID_RTL8211C:
1253                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1254                 break;
1255         case PHY_ID_RTL8201E:
1256         case PHY_ID_BCMAC131:
1257                 phydev->interface = PHY_INTERFACE_MODE_MII;
1258                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1259                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1260                 break;
1261         }
1262
1263         tg3_flag_set(tp, MDIOBUS_INITED);
1264
1265         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1266                 tg3_mdio_config_5785(tp);
1267
1268         return 0;
1269 }
1270
1271 static void tg3_mdio_fini(struct tg3 *tp)
1272 {
1273         if (tg3_flag(tp, MDIOBUS_INITED)) {
1274                 tg3_flag_clear(tp, MDIOBUS_INITED);
1275                 mdiobus_unregister(tp->mdio_bus);
1276                 mdiobus_free(tp->mdio_bus);
1277         }
1278 }
1279
1280 /* tp->lock is held. */
1281 static inline void tg3_generate_fw_event(struct tg3 *tp)
1282 {
1283         u32 val;
1284
1285         val = tr32(GRC_RX_CPU_EVENT);
1286         val |= GRC_RX_CPU_DRIVER_EVENT;
1287         tw32_f(GRC_RX_CPU_EVENT, val);
1288
1289         tp->last_event_jiffies = jiffies;
1290 }
1291
1292 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1293
1294 /* tp->lock is held. */
1295 static void tg3_wait_for_event_ack(struct tg3 *tp)
1296 {
1297         int i;
1298         unsigned int delay_cnt;
1299         long time_remain;
1300
1301         /* If enough time has passed, no wait is necessary. */
1302         time_remain = (long)(tp->last_event_jiffies + 1 +
1303                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1304                       (long)jiffies;
1305         if (time_remain < 0)
1306                 return;
1307
1308         /* Check if we can shorten the wait time. */
1309         delay_cnt = jiffies_to_usecs(time_remain);
1310         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1311                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1312         delay_cnt = (delay_cnt >> 3) + 1;
1313
1314         for (i = 0; i < delay_cnt; i++) {
1315                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1316                         break;
1317                 udelay(8);
1318         }
1319 }
1320
1321 /* tp->lock is held. */
1322 static void tg3_ump_link_report(struct tg3 *tp)
1323 {
1324         u32 reg;
1325         u32 val;
1326
1327         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1328                 return;
1329
1330         tg3_wait_for_event_ack(tp);
1331
1332         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1333
1334         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1335
1336         val = 0;
1337         if (!tg3_readphy(tp, MII_BMCR, &reg))
1338                 val = reg << 16;
1339         if (!tg3_readphy(tp, MII_BMSR, &reg))
1340                 val |= (reg & 0xffff);
1341         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1342
1343         val = 0;
1344         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1345                 val = reg << 16;
1346         if (!tg3_readphy(tp, MII_LPA, &reg))
1347                 val |= (reg & 0xffff);
1348         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1349
1350         val = 0;
1351         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1352                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1353                         val = reg << 16;
1354                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1355                         val |= (reg & 0xffff);
1356         }
1357         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1358
1359         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1360                 val = reg << 16;
1361         else
1362                 val = 0;
1363         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1364
1365         tg3_generate_fw_event(tp);
1366 }
1367
1368 static void tg3_link_report(struct tg3 *tp)
1369 {
1370         if (!netif_carrier_ok(tp->dev)) {
1371                 netif_info(tp, link, tp->dev, "Link is down\n");
1372                 tg3_ump_link_report(tp);
1373         } else if (netif_msg_link(tp)) {
1374                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1375                             (tp->link_config.active_speed == SPEED_1000 ?
1376                              1000 :
1377                              (tp->link_config.active_speed == SPEED_100 ?
1378                               100 : 10)),
1379                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1380                              "full" : "half"));
1381
1382                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1383                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1384                             "on" : "off",
1385                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1386                             "on" : "off");
1387
1388                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1389                         netdev_info(tp->dev, "EEE is %s\n",
1390                                     tp->setlpicnt ? "enabled" : "disabled");
1391
1392                 tg3_ump_link_report(tp);
1393         }
1394 }
1395
1396 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1397 {
1398         u16 miireg;
1399
1400         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1401                 miireg = ADVERTISE_PAUSE_CAP;
1402         else if (flow_ctrl & FLOW_CTRL_TX)
1403                 miireg = ADVERTISE_PAUSE_ASYM;
1404         else if (flow_ctrl & FLOW_CTRL_RX)
1405                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1406         else
1407                 miireg = 0;
1408
1409         return miireg;
1410 }
1411
1412 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1413 {
1414         u16 miireg;
1415
1416         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1417                 miireg = ADVERTISE_1000XPAUSE;
1418         else if (flow_ctrl & FLOW_CTRL_TX)
1419                 miireg = ADVERTISE_1000XPSE_ASYM;
1420         else if (flow_ctrl & FLOW_CTRL_RX)
1421                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1422         else
1423                 miireg = 0;
1424
1425         return miireg;
1426 }
1427
1428 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1429 {
1430         u8 cap = 0;
1431
1432         if (lcladv & ADVERTISE_1000XPAUSE) {
1433                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1434                         if (rmtadv & LPA_1000XPAUSE)
1435                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1436                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1437                                 cap = FLOW_CTRL_RX;
1438                 } else {
1439                         if (rmtadv & LPA_1000XPAUSE)
1440                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1441                 }
1442         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1443                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1444                         cap = FLOW_CTRL_TX;
1445         }
1446
1447         return cap;
1448 }
1449
1450 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1451 {
1452         u8 autoneg;
1453         u8 flowctrl = 0;
1454         u32 old_rx_mode = tp->rx_mode;
1455         u32 old_tx_mode = tp->tx_mode;
1456
1457         if (tg3_flag(tp, USE_PHYLIB))
1458                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1459         else
1460                 autoneg = tp->link_config.autoneg;
1461
1462         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1463                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1464                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1465                 else
1466                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1467         } else
1468                 flowctrl = tp->link_config.flowctrl;
1469
1470         tp->link_config.active_flowctrl = flowctrl;
1471
1472         if (flowctrl & FLOW_CTRL_RX)
1473                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1474         else
1475                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1476
1477         if (old_rx_mode != tp->rx_mode)
1478                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1479
1480         if (flowctrl & FLOW_CTRL_TX)
1481                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1482         else
1483                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1484
1485         if (old_tx_mode != tp->tx_mode)
1486                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1487 }
1488
1489 static void tg3_adjust_link(struct net_device *dev)
1490 {
1491         u8 oldflowctrl, linkmesg = 0;
1492         u32 mac_mode, lcl_adv, rmt_adv;
1493         struct tg3 *tp = netdev_priv(dev);
1494         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1495
1496         spin_lock_bh(&tp->lock);
1497
1498         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1499                                     MAC_MODE_HALF_DUPLEX);
1500
1501         oldflowctrl = tp->link_config.active_flowctrl;
1502
1503         if (phydev->link) {
1504                 lcl_adv = 0;
1505                 rmt_adv = 0;
1506
1507                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1508                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1509                 else if (phydev->speed == SPEED_1000 ||
1510                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1511                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1512                 else
1513                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1514
1515                 if (phydev->duplex == DUPLEX_HALF)
1516                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1517                 else {
1518                         lcl_adv = tg3_advert_flowctrl_1000T(
1519                                   tp->link_config.flowctrl);
1520
1521                         if (phydev->pause)
1522                                 rmt_adv = LPA_PAUSE_CAP;
1523                         if (phydev->asym_pause)
1524                                 rmt_adv |= LPA_PAUSE_ASYM;
1525                 }
1526
1527                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1528         } else
1529                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1530
1531         if (mac_mode != tp->mac_mode) {
1532                 tp->mac_mode = mac_mode;
1533                 tw32_f(MAC_MODE, tp->mac_mode);
1534                 udelay(40);
1535         }
1536
1537         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1538                 if (phydev->speed == SPEED_10)
1539                         tw32(MAC_MI_STAT,
1540                              MAC_MI_STAT_10MBPS_MODE |
1541                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1542                 else
1543                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1544         }
1545
1546         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1547                 tw32(MAC_TX_LENGTHS,
1548                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1549                       (6 << TX_LENGTHS_IPG_SHIFT) |
1550                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1551         else
1552                 tw32(MAC_TX_LENGTHS,
1553                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1554                       (6 << TX_LENGTHS_IPG_SHIFT) |
1555                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1556
1557         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1558             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1559             phydev->speed != tp->link_config.active_speed ||
1560             phydev->duplex != tp->link_config.active_duplex ||
1561             oldflowctrl != tp->link_config.active_flowctrl)
1562                 linkmesg = 1;
1563
1564         tp->link_config.active_speed = phydev->speed;
1565         tp->link_config.active_duplex = phydev->duplex;
1566
1567         spin_unlock_bh(&tp->lock);
1568
1569         if (linkmesg)
1570                 tg3_link_report(tp);
1571 }
1572
1573 static int tg3_phy_init(struct tg3 *tp)
1574 {
1575         struct phy_device *phydev;
1576
1577         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1578                 return 0;
1579
1580         /* Bring the PHY back to a known state. */
1581         tg3_bmcr_reset(tp);
1582
1583         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1584
1585         /* Attach the MAC to the PHY. */
1586         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1587                              phydev->dev_flags, phydev->interface);
1588         if (IS_ERR(phydev)) {
1589                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1590                 return PTR_ERR(phydev);
1591         }
1592
1593         /* Mask with MAC supported features. */
1594         switch (phydev->interface) {
1595         case PHY_INTERFACE_MODE_GMII:
1596         case PHY_INTERFACE_MODE_RGMII:
1597                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1598                         phydev->supported &= (PHY_GBIT_FEATURES |
1599                                               SUPPORTED_Pause |
1600                                               SUPPORTED_Asym_Pause);
1601                         break;
1602                 }
1603                 /* fallthru */
1604         case PHY_INTERFACE_MODE_MII:
1605                 phydev->supported &= (PHY_BASIC_FEATURES |
1606                                       SUPPORTED_Pause |
1607                                       SUPPORTED_Asym_Pause);
1608                 break;
1609         default:
1610                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1611                 return -EINVAL;
1612         }
1613
1614         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1615
1616         phydev->advertising = phydev->supported;
1617
1618         return 0;
1619 }
1620
1621 static void tg3_phy_start(struct tg3 *tp)
1622 {
1623         struct phy_device *phydev;
1624
1625         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1626                 return;
1627
1628         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1629
1630         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1631                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1632                 phydev->speed = tp->link_config.orig_speed;
1633                 phydev->duplex = tp->link_config.orig_duplex;
1634                 phydev->autoneg = tp->link_config.orig_autoneg;
1635                 phydev->advertising = tp->link_config.orig_advertising;
1636         }
1637
1638         phy_start(phydev);
1639
1640         phy_start_aneg(phydev);
1641 }
1642
1643 static void tg3_phy_stop(struct tg3 *tp)
1644 {
1645         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1646                 return;
1647
1648         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1649 }
1650
1651 static void tg3_phy_fini(struct tg3 *tp)
1652 {
1653         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1654                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1655                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1656         }
1657 }
1658
1659 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1660 {
1661         u32 phytest;
1662
1663         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1664                 u32 phy;
1665
1666                 tg3_writephy(tp, MII_TG3_FET_TEST,
1667                              phytest | MII_TG3_FET_SHADOW_EN);
1668                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1669                         if (enable)
1670                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1671                         else
1672                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1673                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1674                 }
1675                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1676         }
1677 }
1678
1679 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1680 {
1681         u32 reg;
1682
1683         if (!tg3_flag(tp, 5705_PLUS) ||
1684             (tg3_flag(tp, 5717_PLUS) &&
1685              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1686                 return;
1687
1688         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1689                 tg3_phy_fet_toggle_apd(tp, enable);
1690                 return;
1691         }
1692
1693         reg = MII_TG3_MISC_SHDW_WREN |
1694               MII_TG3_MISC_SHDW_SCR5_SEL |
1695               MII_TG3_MISC_SHDW_SCR5_LPED |
1696               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1697               MII_TG3_MISC_SHDW_SCR5_SDTL |
1698               MII_TG3_MISC_SHDW_SCR5_C125OE;
1699         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1700                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1701
1702         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1703
1704
1705         reg = MII_TG3_MISC_SHDW_WREN |
1706               MII_TG3_MISC_SHDW_APD_SEL |
1707               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1708         if (enable)
1709                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1710
1711         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1712 }
1713
1714 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1715 {
1716         u32 phy;
1717
1718         if (!tg3_flag(tp, 5705_PLUS) ||
1719             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1720                 return;
1721
1722         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1723                 u32 ephy;
1724
1725                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1726                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1727
1728                         tg3_writephy(tp, MII_TG3_FET_TEST,
1729                                      ephy | MII_TG3_FET_SHADOW_EN);
1730                         if (!tg3_readphy(tp, reg, &phy)) {
1731                                 if (enable)
1732                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1733                                 else
1734                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1735                                 tg3_writephy(tp, reg, phy);
1736                         }
1737                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1738                 }
1739         } else {
1740                 int ret;
1741
1742                 ret = tg3_phy_auxctl_read(tp,
1743                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1744                 if (!ret) {
1745                         if (enable)
1746                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1747                         else
1748                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1749                         tg3_phy_auxctl_write(tp,
1750                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1751                 }
1752         }
1753 }
1754
1755 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1756 {
1757         int ret;
1758         u32 val;
1759
1760         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1761                 return;
1762
1763         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1764         if (!ret)
1765                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1766                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1767 }
1768
1769 static void tg3_phy_apply_otp(struct tg3 *tp)
1770 {
1771         u32 otp, phy;
1772
1773         if (!tp->phy_otp)
1774                 return;
1775
1776         otp = tp->phy_otp;
1777
1778         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1779                 return;
1780
1781         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1782         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1783         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1784
1785         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1786               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1787         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1788
1789         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1790         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1791         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1792
1793         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1794         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1795
1796         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1797         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1798
1799         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1800               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1801         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1802
1803         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1804 }
1805
1806 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1807 {
1808         u32 val;
1809
1810         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1811                 return;
1812
1813         tp->setlpicnt = 0;
1814
1815         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1816             current_link_up == 1 &&
1817             tp->link_config.active_duplex == DUPLEX_FULL &&
1818             (tp->link_config.active_speed == SPEED_100 ||
1819              tp->link_config.active_speed == SPEED_1000)) {
1820                 u32 eeectl;
1821
1822                 if (tp->link_config.active_speed == SPEED_1000)
1823                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1824                 else
1825                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1826
1827                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1828
1829                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1830                                   TG3_CL45_D7_EEERES_STAT, &val);
1831
1832                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1833                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1834                         tp->setlpicnt = 2;
1835         }
1836
1837         if (!tp->setlpicnt) {
1838                 val = tr32(TG3_CPMU_EEE_MODE);
1839                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1840         }
1841 }
1842
1843 static void tg3_phy_eee_enable(struct tg3 *tp)
1844 {
1845         u32 val;
1846
1847         if (tp->link_config.active_speed == SPEED_1000 &&
1848             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1849              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1850              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1851             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1852                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1853                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1854         }
1855
1856         val = tr32(TG3_CPMU_EEE_MODE);
1857         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1858 }
1859
1860 static int tg3_wait_macro_done(struct tg3 *tp)
1861 {
1862         int limit = 100;
1863
1864         while (limit--) {
1865                 u32 tmp32;
1866
1867                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1868                         if ((tmp32 & 0x1000) == 0)
1869                                 break;
1870                 }
1871         }
1872         if (limit < 0)
1873                 return -EBUSY;
1874
1875         return 0;
1876 }
1877
1878 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1879 {
1880         static const u32 test_pat[4][6] = {
1881         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1882         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1883         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1884         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1885         };
1886         int chan;
1887
1888         for (chan = 0; chan < 4; chan++) {
1889                 int i;
1890
1891                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1892                              (chan * 0x2000) | 0x0200);
1893                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1894
1895                 for (i = 0; i < 6; i++)
1896                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1897                                      test_pat[chan][i]);
1898
1899                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1900                 if (tg3_wait_macro_done(tp)) {
1901                         *resetp = 1;
1902                         return -EBUSY;
1903                 }
1904
1905                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1906                              (chan * 0x2000) | 0x0200);
1907                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1908                 if (tg3_wait_macro_done(tp)) {
1909                         *resetp = 1;
1910                         return -EBUSY;
1911                 }
1912
1913                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1914                 if (tg3_wait_macro_done(tp)) {
1915                         *resetp = 1;
1916                         return -EBUSY;
1917                 }
1918
1919                 for (i = 0; i < 6; i += 2) {
1920                         u32 low, high;
1921
1922                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1923                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1924                             tg3_wait_macro_done(tp)) {
1925                                 *resetp = 1;
1926                                 return -EBUSY;
1927                         }
1928                         low &= 0x7fff;
1929                         high &= 0x000f;
1930                         if (low != test_pat[chan][i] ||
1931                             high != test_pat[chan][i+1]) {
1932                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1933                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1934                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1935
1936                                 return -EBUSY;
1937                         }
1938                 }
1939         }
1940
1941         return 0;
1942 }
1943
1944 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1945 {
1946         int chan;
1947
1948         for (chan = 0; chan < 4; chan++) {
1949                 int i;
1950
1951                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1952                              (chan * 0x2000) | 0x0200);
1953                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1954                 for (i = 0; i < 6; i++)
1955                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1956                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1957                 if (tg3_wait_macro_done(tp))
1958                         return -EBUSY;
1959         }
1960
1961         return 0;
1962 }
1963
1964 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1965 {
1966         u32 reg32, phy9_orig;
1967         int retries, do_phy_reset, err;
1968
1969         retries = 10;
1970         do_phy_reset = 1;
1971         do {
1972                 if (do_phy_reset) {
1973                         err = tg3_bmcr_reset(tp);
1974                         if (err)
1975                                 return err;
1976                         do_phy_reset = 0;
1977                 }
1978
1979                 /* Disable transmitter and interrupt.  */
1980                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1981                         continue;
1982
1983                 reg32 |= 0x3000;
1984                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1985
1986                 /* Set full-duplex, 1000 mbps.  */
1987                 tg3_writephy(tp, MII_BMCR,
1988                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1989
1990                 /* Set to master mode.  */
1991                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1992                         continue;
1993
1994                 tg3_writephy(tp, MII_TG3_CTRL,
1995                              (MII_TG3_CTRL_AS_MASTER |
1996                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1997
1998                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1999                 if (err)
2000                         return err;
2001
2002                 /* Block the PHY control access.  */
2003                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2004
2005                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2006                 if (!err)
2007                         break;
2008         } while (--retries);
2009
2010         err = tg3_phy_reset_chanpat(tp);
2011         if (err)
2012                 return err;
2013
2014         tg3_phydsp_write(tp, 0x8005, 0x0000);
2015
2016         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2017         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2018
2019         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2020
2021         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2022
2023         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2024                 reg32 &= ~0x3000;
2025                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2026         } else if (!err)
2027                 err = -EBUSY;
2028
2029         return err;
2030 }
2031
2032 /* This will reset the tigon3 PHY if there is no valid
2033  * link unless the FORCE argument is non-zero.
2034  */
2035 static int tg3_phy_reset(struct tg3 *tp)
2036 {
2037         u32 val, cpmuctrl;
2038         int err;
2039
2040         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2041                 val = tr32(GRC_MISC_CFG);
2042                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2043                 udelay(40);
2044         }
2045         err  = tg3_readphy(tp, MII_BMSR, &val);
2046         err |= tg3_readphy(tp, MII_BMSR, &val);
2047         if (err != 0)
2048                 return -EBUSY;
2049
2050         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2051                 netif_carrier_off(tp->dev);
2052                 tg3_link_report(tp);
2053         }
2054
2055         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2056             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2057             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2058                 err = tg3_phy_reset_5703_4_5(tp);
2059                 if (err)
2060                         return err;
2061                 goto out;
2062         }
2063
2064         cpmuctrl = 0;
2065         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2066             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2067                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2068                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2069                         tw32(TG3_CPMU_CTRL,
2070                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2071         }
2072
2073         err = tg3_bmcr_reset(tp);
2074         if (err)
2075                 return err;
2076
2077         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2078                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2079                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2080
2081                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2082         }
2083
2084         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2085             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2086                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2087                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2088                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2089                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2090                         udelay(40);
2091                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2092                 }
2093         }
2094
2095         if (tg3_flag(tp, 5717_PLUS) &&
2096             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2097                 return 0;
2098
2099         tg3_phy_apply_otp(tp);
2100
2101         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2102                 tg3_phy_toggle_apd(tp, true);
2103         else
2104                 tg3_phy_toggle_apd(tp, false);
2105
2106 out:
2107         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2108             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2109                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2110                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2111                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2112         }
2113
2114         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2115                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2116                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2117         }
2118
2119         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2120                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2121                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2122                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2123                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2124                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2125                 }
2126         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2127                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2128                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2129                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2130                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2131                                 tg3_writephy(tp, MII_TG3_TEST1,
2132                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2133                         } else
2134                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2135
2136                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2137                 }
2138         }
2139
2140         /* Set Extended packet length bit (bit 14) on all chips that */
2141         /* support jumbo frames */
2142         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2143                 /* Cannot do read-modify-write on 5401 */
2144                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2145         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2146                 /* Set bit 14 with read-modify-write to preserve other bits */
2147                 err = tg3_phy_auxctl_read(tp,
2148                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2149                 if (!err)
2150                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2151                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2152         }
2153
2154         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2155          * jumbo frames transmission.
2156          */
2157         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2158                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2159                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2160                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2161         }
2162
2163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2164                 /* adjust output voltage */
2165                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2166         }
2167
2168         tg3_phy_toggle_automdix(tp, 1);
2169         tg3_phy_set_wirespeed(tp);
2170         return 0;
2171 }
2172
2173 static void tg3_frob_aux_power(struct tg3 *tp)
2174 {
2175         bool need_vaux = false;
2176
2177         /* The GPIOs do something completely different on 57765. */
2178         if (!tg3_flag(tp, IS_NIC) ||
2179             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2180             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2181                 return;
2182
2183         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2184              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2185              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2186              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2187             tp->pdev_peer != tp->pdev) {
2188                 struct net_device *dev_peer;
2189
2190                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2191
2192                 /* remove_one() may have been run on the peer. */
2193                 if (dev_peer) {
2194                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2195
2196                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2197                                 return;
2198
2199                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2200                             tg3_flag(tp_peer, ENABLE_ASF))
2201                                 need_vaux = true;
2202                 }
2203         }
2204
2205         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2206                 need_vaux = true;
2207
2208         if (need_vaux) {
2209                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2210                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2211                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2212                                     (GRC_LCLCTRL_GPIO_OE0 |
2213                                      GRC_LCLCTRL_GPIO_OE1 |
2214                                      GRC_LCLCTRL_GPIO_OE2 |
2215                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2216                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2217                                     100);
2218                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2219                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2220                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2221                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2222                                              GRC_LCLCTRL_GPIO_OE1 |
2223                                              GRC_LCLCTRL_GPIO_OE2 |
2224                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2225                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2226                                              tp->grc_local_ctrl;
2227                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2228
2229                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2230                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2231
2232                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2233                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2234                 } else {
2235                         u32 no_gpio2;
2236                         u32 grc_local_ctrl = 0;
2237
2238                         /* Workaround to prevent overdrawing Amps. */
2239                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2240                             ASIC_REV_5714) {
2241                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2242                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2243                                             grc_local_ctrl, 100);
2244                         }
2245
2246                         /* On 5753 and variants, GPIO2 cannot be used. */
2247                         no_gpio2 = tp->nic_sram_data_cfg &
2248                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2249
2250                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2251                                          GRC_LCLCTRL_GPIO_OE1 |
2252                                          GRC_LCLCTRL_GPIO_OE2 |
2253                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2254                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2255                         if (no_gpio2) {
2256                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2257                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2258                         }
2259                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2260                                                     grc_local_ctrl, 100);
2261
2262                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2263
2264                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2265                                                     grc_local_ctrl, 100);
2266
2267                         if (!no_gpio2) {
2268                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2269                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2270                                             grc_local_ctrl, 100);
2271                         }
2272                 }
2273         } else {
2274                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2275                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2276                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2277                                     (GRC_LCLCTRL_GPIO_OE1 |
2278                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2279
2280                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2281                                     GRC_LCLCTRL_GPIO_OE1, 100);
2282
2283                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2284                                     (GRC_LCLCTRL_GPIO_OE1 |
2285                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2286                 }
2287         }
2288 }
2289
2290 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2291 {
2292         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2293                 return 1;
2294         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2295                 if (speed != SPEED_10)
2296                         return 1;
2297         } else if (speed == SPEED_10)
2298                 return 1;
2299
2300         return 0;
2301 }
2302
2303 static int tg3_setup_phy(struct tg3 *, int);
2304
2305 #define RESET_KIND_SHUTDOWN     0
2306 #define RESET_KIND_INIT         1
2307 #define RESET_KIND_SUSPEND      2
2308
2309 static void tg3_write_sig_post_reset(struct tg3 *, int);
2310 static int tg3_halt_cpu(struct tg3 *, u32);
2311
2312 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2313 {
2314         u32 val;
2315
2316         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2317                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2318                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2319                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2320
2321                         sg_dig_ctrl |=
2322                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2323                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2324                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2325                 }
2326                 return;
2327         }
2328
2329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2330                 tg3_bmcr_reset(tp);
2331                 val = tr32(GRC_MISC_CFG);
2332                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2333                 udelay(40);
2334                 return;
2335         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2336                 u32 phytest;
2337                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2338                         u32 phy;
2339
2340                         tg3_writephy(tp, MII_ADVERTISE, 0);
2341                         tg3_writephy(tp, MII_BMCR,
2342                                      BMCR_ANENABLE | BMCR_ANRESTART);
2343
2344                         tg3_writephy(tp, MII_TG3_FET_TEST,
2345                                      phytest | MII_TG3_FET_SHADOW_EN);
2346                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2347                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2348                                 tg3_writephy(tp,
2349                                              MII_TG3_FET_SHDW_AUXMODE4,
2350                                              phy);
2351                         }
2352                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2353                 }
2354                 return;
2355         } else if (do_low_power) {
2356                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2357                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2358
2359                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2360                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2361                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2362                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2363         }
2364
2365         /* The PHY should not be powered down on some chips because
2366          * of bugs.
2367          */
2368         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2369             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2370             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2371              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2372                 return;
2373
2374         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2375             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2376                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2377                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2378                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2379                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2380         }
2381
2382         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2383 }
2384
2385 /* tp->lock is held. */
2386 static int tg3_nvram_lock(struct tg3 *tp)
2387 {
2388         if (tg3_flag(tp, NVRAM)) {
2389                 int i;
2390
2391                 if (tp->nvram_lock_cnt == 0) {
2392                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2393                         for (i = 0; i < 8000; i++) {
2394                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2395                                         break;
2396                                 udelay(20);
2397                         }
2398                         if (i == 8000) {
2399                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2400                                 return -ENODEV;
2401                         }
2402                 }
2403                 tp->nvram_lock_cnt++;
2404         }
2405         return 0;
2406 }
2407
2408 /* tp->lock is held. */
2409 static void tg3_nvram_unlock(struct tg3 *tp)
2410 {
2411         if (tg3_flag(tp, NVRAM)) {
2412                 if (tp->nvram_lock_cnt > 0)
2413                         tp->nvram_lock_cnt--;
2414                 if (tp->nvram_lock_cnt == 0)
2415                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2416         }
2417 }
2418
2419 /* tp->lock is held. */
2420 static void tg3_enable_nvram_access(struct tg3 *tp)
2421 {
2422         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2423                 u32 nvaccess = tr32(NVRAM_ACCESS);
2424
2425                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2426         }
2427 }
2428
2429 /* tp->lock is held. */
2430 static void tg3_disable_nvram_access(struct tg3 *tp)
2431 {
2432         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2433                 u32 nvaccess = tr32(NVRAM_ACCESS);
2434
2435                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2436         }
2437 }
2438
2439 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2440                                         u32 offset, u32 *val)
2441 {
2442         u32 tmp;
2443         int i;
2444
2445         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2446                 return -EINVAL;
2447
2448         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2449                                         EEPROM_ADDR_DEVID_MASK |
2450                                         EEPROM_ADDR_READ);
2451         tw32(GRC_EEPROM_ADDR,
2452              tmp |
2453              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2454              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2455               EEPROM_ADDR_ADDR_MASK) |
2456              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2457
2458         for (i = 0; i < 1000; i++) {
2459                 tmp = tr32(GRC_EEPROM_ADDR);
2460
2461                 if (tmp & EEPROM_ADDR_COMPLETE)
2462                         break;
2463                 msleep(1);
2464         }
2465         if (!(tmp & EEPROM_ADDR_COMPLETE))
2466                 return -EBUSY;
2467
2468         tmp = tr32(GRC_EEPROM_DATA);
2469
2470         /*
2471          * The data will always be opposite the native endian
2472          * format.  Perform a blind byteswap to compensate.
2473          */
2474         *val = swab32(tmp);
2475
2476         return 0;
2477 }
2478
2479 #define NVRAM_CMD_TIMEOUT 10000
2480
2481 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2482 {
2483         int i;
2484
2485         tw32(NVRAM_CMD, nvram_cmd);
2486         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2487                 udelay(10);
2488                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2489                         udelay(10);
2490                         break;
2491                 }
2492         }
2493
2494         if (i == NVRAM_CMD_TIMEOUT)
2495                 return -EBUSY;
2496
2497         return 0;
2498 }
2499
2500 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2501 {
2502         if (tg3_flag(tp, NVRAM) &&
2503             tg3_flag(tp, NVRAM_BUFFERED) &&
2504             tg3_flag(tp, FLASH) &&
2505             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2506             (tp->nvram_jedecnum == JEDEC_ATMEL))
2507
2508                 addr = ((addr / tp->nvram_pagesize) <<
2509                         ATMEL_AT45DB0X1B_PAGE_POS) +
2510                        (addr % tp->nvram_pagesize);
2511
2512         return addr;
2513 }
2514
2515 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2516 {
2517         if (tg3_flag(tp, NVRAM) &&
2518             tg3_flag(tp, NVRAM_BUFFERED) &&
2519             tg3_flag(tp, FLASH) &&
2520             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2521             (tp->nvram_jedecnum == JEDEC_ATMEL))
2522
2523                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2524                         tp->nvram_pagesize) +
2525                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2526
2527         return addr;
2528 }
2529
2530 /* NOTE: Data read in from NVRAM is byteswapped according to
2531  * the byteswapping settings for all other register accesses.
2532  * tg3 devices are BE devices, so on a BE machine, the data
2533  * returned will be exactly as it is seen in NVRAM.  On a LE
2534  * machine, the 32-bit value will be byteswapped.
2535  */
2536 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2537 {
2538         int ret;
2539
2540         if (!tg3_flag(tp, NVRAM))
2541                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2542
2543         offset = tg3_nvram_phys_addr(tp, offset);
2544
2545         if (offset > NVRAM_ADDR_MSK)
2546                 return -EINVAL;
2547
2548         ret = tg3_nvram_lock(tp);
2549         if (ret)
2550                 return ret;
2551
2552         tg3_enable_nvram_access(tp);
2553
2554         tw32(NVRAM_ADDR, offset);
2555         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2556                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2557
2558         if (ret == 0)
2559                 *val = tr32(NVRAM_RDDATA);
2560
2561         tg3_disable_nvram_access(tp);
2562
2563         tg3_nvram_unlock(tp);
2564
2565         return ret;
2566 }
2567
2568 /* Ensures NVRAM data is in bytestream format. */
2569 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2570 {
2571         u32 v;
2572         int res = tg3_nvram_read(tp, offset, &v);
2573         if (!res)
2574                 *val = cpu_to_be32(v);
2575         return res;
2576 }
2577
2578 /* tp->lock is held. */
2579 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2580 {
2581         u32 addr_high, addr_low;
2582         int i;
2583
2584         addr_high = ((tp->dev->dev_addr[0] << 8) |
2585                      tp->dev->dev_addr[1]);
2586         addr_low = ((tp->dev->dev_addr[2] << 24) |
2587                     (tp->dev->dev_addr[3] << 16) |
2588                     (tp->dev->dev_addr[4] <<  8) |
2589                     (tp->dev->dev_addr[5] <<  0));
2590         for (i = 0; i < 4; i++) {
2591                 if (i == 1 && skip_mac_1)
2592                         continue;
2593                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2594                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2595         }
2596
2597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2598             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2599                 for (i = 0; i < 12; i++) {
2600                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2601                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2602                 }
2603         }
2604
2605         addr_high = (tp->dev->dev_addr[0] +
2606                      tp->dev->dev_addr[1] +
2607                      tp->dev->dev_addr[2] +
2608                      tp->dev->dev_addr[3] +
2609                      tp->dev->dev_addr[4] +
2610                      tp->dev->dev_addr[5]) &
2611                 TX_BACKOFF_SEED_MASK;
2612         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2613 }
2614
2615 static void tg3_enable_register_access(struct tg3 *tp)
2616 {
2617         /*
2618          * Make sure register accesses (indirect or otherwise) will function
2619          * correctly.
2620          */
2621         pci_write_config_dword(tp->pdev,
2622                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2623 }
2624
2625 static int tg3_power_up(struct tg3 *tp)
2626 {
2627         tg3_enable_register_access(tp);
2628
2629         pci_set_power_state(tp->pdev, PCI_D0);
2630
2631         /* Switch out of Vaux if it is a NIC */
2632         if (tg3_flag(tp, IS_NIC))
2633                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2634
2635         return 0;
2636 }
2637
2638 static int tg3_power_down_prepare(struct tg3 *tp)
2639 {
2640         u32 misc_host_ctrl;
2641         bool device_should_wake, do_low_power;
2642
2643         tg3_enable_register_access(tp);
2644
2645         /* Restore the CLKREQ setting. */
2646         if (tg3_flag(tp, CLKREQ_BUG)) {
2647                 u16 lnkctl;
2648
2649                 pci_read_config_word(tp->pdev,
2650                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2651                                      &lnkctl);
2652                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2653                 pci_write_config_word(tp->pdev,
2654                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2655                                       lnkctl);
2656         }
2657
2658         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2659         tw32(TG3PCI_MISC_HOST_CTRL,
2660              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2661
2662         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2663                              tg3_flag(tp, WOL_ENABLE);
2664
2665         if (tg3_flag(tp, USE_PHYLIB)) {
2666                 do_low_power = false;
2667                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2668                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2669                         struct phy_device *phydev;
2670                         u32 phyid, advertising;
2671
2672                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2673
2674                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2675
2676                         tp->link_config.orig_speed = phydev->speed;
2677                         tp->link_config.orig_duplex = phydev->duplex;
2678                         tp->link_config.orig_autoneg = phydev->autoneg;
2679                         tp->link_config.orig_advertising = phydev->advertising;
2680
2681                         advertising = ADVERTISED_TP |
2682                                       ADVERTISED_Pause |
2683                                       ADVERTISED_Autoneg |
2684                                       ADVERTISED_10baseT_Half;
2685
2686                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2687                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2688                                         advertising |=
2689                                                 ADVERTISED_100baseT_Half |
2690                                                 ADVERTISED_100baseT_Full |
2691                                                 ADVERTISED_10baseT_Full;
2692                                 else
2693                                         advertising |= ADVERTISED_10baseT_Full;
2694                         }
2695
2696                         phydev->advertising = advertising;
2697
2698                         phy_start_aneg(phydev);
2699
2700                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2701                         if (phyid != PHY_ID_BCMAC131) {
2702                                 phyid &= PHY_BCM_OUI_MASK;
2703                                 if (phyid == PHY_BCM_OUI_1 ||
2704                                     phyid == PHY_BCM_OUI_2 ||
2705                                     phyid == PHY_BCM_OUI_3)
2706                                         do_low_power = true;
2707                         }
2708                 }
2709         } else {
2710                 do_low_power = true;
2711
2712                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2713                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2714                         tp->link_config.orig_speed = tp->link_config.speed;
2715                         tp->link_config.orig_duplex = tp->link_config.duplex;
2716                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2717                 }
2718
2719                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2720                         tp->link_config.speed = SPEED_10;
2721                         tp->link_config.duplex = DUPLEX_HALF;
2722                         tp->link_config.autoneg = AUTONEG_ENABLE;
2723                         tg3_setup_phy(tp, 0);
2724                 }
2725         }
2726
2727         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2728                 u32 val;
2729
2730                 val = tr32(GRC_VCPU_EXT_CTRL);
2731                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2732         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2733                 int i;
2734                 u32 val;
2735
2736                 for (i = 0; i < 200; i++) {
2737                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2738                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2739                                 break;
2740                         msleep(1);
2741                 }
2742         }
2743         if (tg3_flag(tp, WOL_CAP))
2744                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2745                                                      WOL_DRV_STATE_SHUTDOWN |
2746                                                      WOL_DRV_WOL |
2747                                                      WOL_SET_MAGIC_PKT);
2748
2749         if (device_should_wake) {
2750                 u32 mac_mode;
2751
2752                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2753                         if (do_low_power &&
2754                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2755                                 tg3_phy_auxctl_write(tp,
2756                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2757                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2758                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2759                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2760                                 udelay(40);
2761                         }
2762
2763                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2764                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2765                         else
2766                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2767
2768                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2769                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2770                             ASIC_REV_5700) {
2771                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2772                                              SPEED_100 : SPEED_10;
2773                                 if (tg3_5700_link_polarity(tp, speed))
2774                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2775                                 else
2776                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2777                         }
2778                 } else {
2779                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2780                 }
2781
2782                 if (!tg3_flag(tp, 5750_PLUS))
2783                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2784
2785                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2786                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2787                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2788                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2789
2790                 if (tg3_flag(tp, ENABLE_APE))
2791                         mac_mode |= MAC_MODE_APE_TX_EN |
2792                                     MAC_MODE_APE_RX_EN |
2793                                     MAC_MODE_TDE_ENABLE;
2794
2795                 tw32_f(MAC_MODE, mac_mode);
2796                 udelay(100);
2797
2798                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2799                 udelay(10);
2800         }
2801
2802         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2803             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2804              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2805                 u32 base_val;
2806
2807                 base_val = tp->pci_clock_ctrl;
2808                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2809                              CLOCK_CTRL_TXCLK_DISABLE);
2810
2811                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2812                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2813         } else if (tg3_flag(tp, 5780_CLASS) ||
2814                    tg3_flag(tp, CPMU_PRESENT) ||
2815                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2816                 /* do nothing */
2817         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2818                 u32 newbits1, newbits2;
2819
2820                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2821                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2822                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2823                                     CLOCK_CTRL_TXCLK_DISABLE |
2824                                     CLOCK_CTRL_ALTCLK);
2825                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2826                 } else if (tg3_flag(tp, 5705_PLUS)) {
2827                         newbits1 = CLOCK_CTRL_625_CORE;
2828                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2829                 } else {
2830                         newbits1 = CLOCK_CTRL_ALTCLK;
2831                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2832                 }
2833
2834                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2835                             40);
2836
2837                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2838                             40);
2839
2840                 if (!tg3_flag(tp, 5705_PLUS)) {
2841                         u32 newbits3;
2842
2843                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2844                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2845                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2846                                             CLOCK_CTRL_TXCLK_DISABLE |
2847                                             CLOCK_CTRL_44MHZ_CORE);
2848                         } else {
2849                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2850                         }
2851
2852                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2853                                     tp->pci_clock_ctrl | newbits3, 40);
2854                 }
2855         }
2856
2857         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2858                 tg3_power_down_phy(tp, do_low_power);
2859
2860         tg3_frob_aux_power(tp);
2861
2862         /* Workaround for unstable PLL clock */
2863         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2864             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2865                 u32 val = tr32(0x7d00);
2866
2867                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2868                 tw32(0x7d00, val);
2869                 if (!tg3_flag(tp, ENABLE_ASF)) {
2870                         int err;
2871
2872                         err = tg3_nvram_lock(tp);
2873                         tg3_halt_cpu(tp, RX_CPU_BASE);
2874                         if (!err)
2875                                 tg3_nvram_unlock(tp);
2876                 }
2877         }
2878
2879         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2880
2881         return 0;
2882 }
2883
2884 static void tg3_power_down(struct tg3 *tp)
2885 {
2886         tg3_power_down_prepare(tp);
2887
2888         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2889         pci_set_power_state(tp->pdev, PCI_D3hot);
2890 }
2891
2892 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2893 {
2894         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2895         case MII_TG3_AUX_STAT_10HALF:
2896                 *speed = SPEED_10;
2897                 *duplex = DUPLEX_HALF;
2898                 break;
2899
2900         case MII_TG3_AUX_STAT_10FULL:
2901                 *speed = SPEED_10;
2902                 *duplex = DUPLEX_FULL;
2903                 break;
2904
2905         case MII_TG3_AUX_STAT_100HALF:
2906                 *speed = SPEED_100;
2907                 *duplex = DUPLEX_HALF;
2908                 break;
2909
2910         case MII_TG3_AUX_STAT_100FULL:
2911                 *speed = SPEED_100;
2912                 *duplex = DUPLEX_FULL;
2913                 break;
2914
2915         case MII_TG3_AUX_STAT_1000HALF:
2916                 *speed = SPEED_1000;
2917                 *duplex = DUPLEX_HALF;
2918                 break;
2919
2920         case MII_TG3_AUX_STAT_1000FULL:
2921                 *speed = SPEED_1000;
2922                 *duplex = DUPLEX_FULL;
2923                 break;
2924
2925         default:
2926                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2927                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2928                                  SPEED_10;
2929                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2930                                   DUPLEX_HALF;
2931                         break;
2932                 }
2933                 *speed = SPEED_INVALID;
2934                 *duplex = DUPLEX_INVALID;
2935                 break;
2936         }
2937 }
2938
2939 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2940 {
2941         int err = 0;
2942         u32 val, new_adv;
2943
2944         new_adv = ADVERTISE_CSMA;
2945         if (advertise & ADVERTISED_10baseT_Half)
2946                 new_adv |= ADVERTISE_10HALF;
2947         if (advertise & ADVERTISED_10baseT_Full)
2948                 new_adv |= ADVERTISE_10FULL;
2949         if (advertise & ADVERTISED_100baseT_Half)
2950                 new_adv |= ADVERTISE_100HALF;
2951         if (advertise & ADVERTISED_100baseT_Full)
2952                 new_adv |= ADVERTISE_100FULL;
2953
2954         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2955
2956         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2957         if (err)
2958                 goto done;
2959
2960         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2961                 goto done;
2962
2963         new_adv = 0;
2964         if (advertise & ADVERTISED_1000baseT_Half)
2965                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2966         if (advertise & ADVERTISED_1000baseT_Full)
2967                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2968
2969         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2970             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2971                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2972                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2973
2974         err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2975         if (err)
2976                 goto done;
2977
2978         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2979                 goto done;
2980
2981         tw32(TG3_CPMU_EEE_MODE,
2982              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2983
2984         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2985         if (!err) {
2986                 u32 err2;
2987
2988                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2989                 case ASIC_REV_5717:
2990                 case ASIC_REV_57765:
2991                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2992                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2993                                                  MII_TG3_DSP_CH34TP2_HIBW01);
2994                         /* Fall through */
2995                 case ASIC_REV_5719:
2996                         val = MII_TG3_DSP_TAP26_ALNOKO |
2997                               MII_TG3_DSP_TAP26_RMRXSTO |
2998                               MII_TG3_DSP_TAP26_OPCSINPT;
2999                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3000                 }
3001
3002                 val = 0;
3003                 /* Advertise 100-BaseTX EEE ability */
3004                 if (advertise & ADVERTISED_100baseT_Full)
3005                         val |= MDIO_AN_EEE_ADV_100TX;
3006                 /* Advertise 1000-BaseT EEE ability */
3007                 if (advertise & ADVERTISED_1000baseT_Full)
3008                         val |= MDIO_AN_EEE_ADV_1000T;
3009                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3010
3011                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3012                 if (!err)
3013                         err = err2;
3014         }
3015
3016 done:
3017         return err;
3018 }
3019
3020 static void tg3_phy_copper_begin(struct tg3 *tp)
3021 {
3022         u32 new_adv;
3023         int i;
3024
3025         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3026                 new_adv = ADVERTISED_10baseT_Half |
3027                           ADVERTISED_10baseT_Full;
3028                 if (tg3_flag(tp, WOL_SPEED_100MB))
3029                         new_adv |= ADVERTISED_100baseT_Half |
3030                                    ADVERTISED_100baseT_Full;
3031
3032                 tg3_phy_autoneg_cfg(tp, new_adv,
3033                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3034         } else if (tp->link_config.speed == SPEED_INVALID) {
3035                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3036                         tp->link_config.advertising &=
3037                                 ~(ADVERTISED_1000baseT_Half |
3038                                   ADVERTISED_1000baseT_Full);
3039
3040                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3041                                     tp->link_config.flowctrl);
3042         } else {
3043                 /* Asking for a specific link mode. */
3044                 if (tp->link_config.speed == SPEED_1000) {
3045                         if (tp->link_config.duplex == DUPLEX_FULL)
3046                                 new_adv = ADVERTISED_1000baseT_Full;
3047                         else
3048                                 new_adv = ADVERTISED_1000baseT_Half;
3049                 } else if (tp->link_config.speed == SPEED_100) {
3050                         if (tp->link_config.duplex == DUPLEX_FULL)
3051                                 new_adv = ADVERTISED_100baseT_Full;
3052                         else
3053                                 new_adv = ADVERTISED_100baseT_Half;
3054                 } else {
3055                         if (tp->link_config.duplex == DUPLEX_FULL)
3056                                 new_adv = ADVERTISED_10baseT_Full;
3057                         else
3058                                 new_adv = ADVERTISED_10baseT_Half;
3059                 }
3060
3061                 tg3_phy_autoneg_cfg(tp, new_adv,
3062                                     tp->link_config.flowctrl);
3063         }
3064
3065         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3066             tp->link_config.speed != SPEED_INVALID) {
3067                 u32 bmcr, orig_bmcr;
3068
3069                 tp->link_config.active_speed = tp->link_config.speed;
3070                 tp->link_config.active_duplex = tp->link_config.duplex;
3071
3072                 bmcr = 0;
3073                 switch (tp->link_config.speed) {
3074                 default:
3075                 case SPEED_10:
3076                         break;
3077
3078                 case SPEED_100:
3079                         bmcr |= BMCR_SPEED100;
3080                         break;
3081
3082                 case SPEED_1000:
3083                         bmcr |= TG3_BMCR_SPEED1000;
3084                         break;
3085                 }
3086
3087                 if (tp->link_config.duplex == DUPLEX_FULL)
3088                         bmcr |= BMCR_FULLDPLX;
3089
3090                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3091                     (bmcr != orig_bmcr)) {
3092                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3093                         for (i = 0; i < 1500; i++) {
3094                                 u32 tmp;
3095
3096                                 udelay(10);
3097                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3098                                     tg3_readphy(tp, MII_BMSR, &tmp))
3099                                         continue;
3100                                 if (!(tmp & BMSR_LSTATUS)) {
3101                                         udelay(40);
3102                                         break;
3103                                 }
3104                         }
3105                         tg3_writephy(tp, MII_BMCR, bmcr);
3106                         udelay(40);
3107                 }
3108         } else {
3109                 tg3_writephy(tp, MII_BMCR,
3110                              BMCR_ANENABLE | BMCR_ANRESTART);
3111         }
3112 }
3113
3114 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3115 {
3116         int err;
3117
3118         /* Turn off tap power management. */
3119         /* Set Extended packet length bit */
3120         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3121
3122         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3123         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3124         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3125         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3126         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3127
3128         udelay(40);
3129
3130         return err;
3131 }
3132
3133 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3134 {
3135         u32 adv_reg, all_mask = 0;
3136
3137         if (mask & ADVERTISED_10baseT_Half)
3138                 all_mask |= ADVERTISE_10HALF;
3139         if (mask & ADVERTISED_10baseT_Full)
3140                 all_mask |= ADVERTISE_10FULL;
3141         if (mask & ADVERTISED_100baseT_Half)
3142                 all_mask |= ADVERTISE_100HALF;
3143         if (mask & ADVERTISED_100baseT_Full)
3144                 all_mask |= ADVERTISE_100FULL;
3145
3146         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3147                 return 0;
3148
3149         if ((adv_reg & all_mask) != all_mask)
3150                 return 0;
3151         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3152                 u32 tg3_ctrl;
3153
3154                 all_mask = 0;
3155                 if (mask & ADVERTISED_1000baseT_Half)
3156                         all_mask |= ADVERTISE_1000HALF;
3157                 if (mask & ADVERTISED_1000baseT_Full)
3158                         all_mask |= ADVERTISE_1000FULL;
3159
3160                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3161                         return 0;
3162
3163                 if ((tg3_ctrl & all_mask) != all_mask)
3164                         return 0;
3165         }
3166         return 1;
3167 }
3168
3169 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3170 {
3171         u32 curadv, reqadv;
3172
3173         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3174                 return 1;
3175
3176         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3177         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3178
3179         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3180                 if (curadv != reqadv)
3181                         return 0;
3182
3183                 if (tg3_flag(tp, PAUSE_AUTONEG))
3184                         tg3_readphy(tp, MII_LPA, rmtadv);
3185         } else {
3186                 /* Reprogram the advertisement register, even if it
3187                  * does not affect the current link.  If the link
3188                  * gets renegotiated in the future, we can save an
3189                  * additional renegotiation cycle by advertising
3190                  * it correctly in the first place.
3191                  */
3192                 if (curadv != reqadv) {
3193                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3194                                      ADVERTISE_PAUSE_ASYM);
3195                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3196                 }
3197         }
3198
3199         return 1;
3200 }
3201
3202 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3203 {
3204         int current_link_up;
3205         u32 bmsr, val;
3206         u32 lcl_adv, rmt_adv;
3207         u16 current_speed;
3208         u8 current_duplex;
3209         int i, err;
3210
3211         tw32(MAC_EVENT, 0);
3212
3213         tw32_f(MAC_STATUS,
3214              (MAC_STATUS_SYNC_CHANGED |
3215               MAC_STATUS_CFG_CHANGED |
3216               MAC_STATUS_MI_COMPLETION |
3217               MAC_STATUS_LNKSTATE_CHANGED));
3218         udelay(40);
3219
3220         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3221                 tw32_f(MAC_MI_MODE,
3222                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3223                 udelay(80);
3224         }
3225
3226         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3227
3228         /* Some third-party PHYs need to be reset on link going
3229          * down.
3230          */
3231         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3232              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3233              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3234             netif_carrier_ok(tp->dev)) {
3235                 tg3_readphy(tp, MII_BMSR, &bmsr);
3236                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3237                     !(bmsr & BMSR_LSTATUS))
3238                         force_reset = 1;
3239         }
3240         if (force_reset)
3241                 tg3_phy_reset(tp);
3242
3243         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3244                 tg3_readphy(tp, MII_BMSR, &bmsr);
3245                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3246                     !tg3_flag(tp, INIT_COMPLETE))
3247                         bmsr = 0;
3248
3249                 if (!(bmsr & BMSR_LSTATUS)) {
3250                         err = tg3_init_5401phy_dsp(tp);
3251                         if (err)
3252                                 return err;
3253
3254                         tg3_readphy(tp, MII_BMSR, &bmsr);
3255                         for (i = 0; i < 1000; i++) {
3256                                 udelay(10);
3257                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3258                                     (bmsr & BMSR_LSTATUS)) {
3259                                         udelay(40);
3260                                         break;
3261                                 }
3262                         }
3263
3264                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3265                             TG3_PHY_REV_BCM5401_B0 &&
3266                             !(bmsr & BMSR_LSTATUS) &&
3267                             tp->link_config.active_speed == SPEED_1000) {
3268                                 err = tg3_phy_reset(tp);
3269                                 if (!err)
3270                                         err = tg3_init_5401phy_dsp(tp);
3271                                 if (err)
3272                                         return err;
3273                         }
3274                 }
3275         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3276                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3277                 /* 5701 {A0,B0} CRC bug workaround */
3278                 tg3_writephy(tp, 0x15, 0x0a75);
3279                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3280                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3281                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3282         }
3283
3284         /* Clear pending interrupts... */
3285         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3286         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3287
3288         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3289                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3290         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3291                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3292
3293         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3294             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3295                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3296                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3297                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3298                 else
3299                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3300         }
3301
3302         current_link_up = 0;
3303         current_speed = SPEED_INVALID;
3304         current_duplex = DUPLEX_INVALID;
3305
3306         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3307                 err = tg3_phy_auxctl_read(tp,
3308                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3309                                           &val);
3310                 if (!err && !(val & (1 << 10))) {
3311                         tg3_phy_auxctl_write(tp,
3312                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3313                                              val | (1 << 10));
3314                         goto relink;
3315                 }
3316         }
3317
3318         bmsr = 0;
3319         for (i = 0; i < 100; i++) {
3320                 tg3_readphy(tp, MII_BMSR, &bmsr);
3321                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3322                     (bmsr & BMSR_LSTATUS))
3323                         break;
3324                 udelay(40);
3325         }
3326
3327         if (bmsr & BMSR_LSTATUS) {
3328                 u32 aux_stat, bmcr;
3329
3330                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3331                 for (i = 0; i < 2000; i++) {
3332                         udelay(10);
3333                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3334                             aux_stat)
3335                                 break;
3336                 }
3337
3338                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3339                                              &current_speed,
3340                                              &current_duplex);
3341
3342                 bmcr = 0;
3343                 for (i = 0; i < 200; i++) {
3344                         tg3_readphy(tp, MII_BMCR, &bmcr);
3345                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3346                                 continue;
3347                         if (bmcr && bmcr != 0x7fff)
3348                                 break;
3349                         udelay(10);
3350                 }
3351
3352                 lcl_adv = 0;
3353                 rmt_adv = 0;
3354
3355                 tp->link_config.active_speed = current_speed;
3356                 tp->link_config.active_duplex = current_duplex;
3357
3358                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3359                         if ((bmcr & BMCR_ANENABLE) &&
3360                             tg3_copper_is_advertising_all(tp,
3361                                                 tp->link_config.advertising)) {
3362                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3363                                                                   &rmt_adv))
3364                                         current_link_up = 1;
3365                         }
3366                 } else {
3367                         if (!(bmcr & BMCR_ANENABLE) &&
3368                             tp->link_config.speed == current_speed &&
3369                             tp->link_config.duplex == current_duplex &&
3370                             tp->link_config.flowctrl ==
3371                             tp->link_config.active_flowctrl) {
3372                                 current_link_up = 1;
3373                         }
3374                 }
3375
3376                 if (current_link_up == 1 &&
3377                     tp->link_config.active_duplex == DUPLEX_FULL)
3378                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3379         }
3380
3381 relink:
3382         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3383                 tg3_phy_copper_begin(tp);
3384
3385                 tg3_readphy(tp, MII_BMSR, &bmsr);
3386                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3387                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3388                         current_link_up = 1;
3389         }
3390
3391         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3392         if (current_link_up == 1) {
3393                 if (tp->link_config.active_speed == SPEED_100 ||
3394                     tp->link_config.active_speed == SPEED_10)
3395                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3396                 else
3397                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3398         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3399                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3400         else
3401                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3402
3403         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3404         if (tp->link_config.active_duplex == DUPLEX_HALF)
3405                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3406
3407         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3408                 if (current_link_up == 1 &&
3409                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3410                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3411                 else
3412                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3413         }
3414
3415         /* ??? Without this setting Netgear GA302T PHY does not
3416          * ??? send/receive packets...
3417          */
3418         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3419             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3420                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3421                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3422                 udelay(80);
3423         }
3424
3425         tw32_f(MAC_MODE, tp->mac_mode);
3426         udelay(40);
3427
3428         tg3_phy_eee_adjust(tp, current_link_up);
3429
3430         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3431                 /* Polled via timer. */
3432                 tw32_f(MAC_EVENT, 0);
3433         } else {
3434                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3435         }
3436         udelay(40);
3437
3438         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3439             current_link_up == 1 &&
3440             tp->link_config.active_speed == SPEED_1000 &&
3441             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3442                 udelay(120);
3443                 tw32_f(MAC_STATUS,
3444                      (MAC_STATUS_SYNC_CHANGED |
3445                       MAC_STATUS_CFG_CHANGED));
3446                 udelay(40);
3447                 tg3_write_mem(tp,
3448                               NIC_SRAM_FIRMWARE_MBOX,
3449                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3450         }
3451
3452         /* Prevent send BD corruption. */
3453         if (tg3_flag(tp, CLKREQ_BUG)) {
3454                 u16 oldlnkctl, newlnkctl;
3455
3456                 pci_read_config_word(tp->pdev,
3457                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3458                                      &oldlnkctl);
3459                 if (tp->link_config.active_speed == SPEED_100 ||
3460                     tp->link_config.active_speed == SPEED_10)
3461                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3462                 else
3463                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3464                 if (newlnkctl != oldlnkctl)
3465                         pci_write_config_word(tp->pdev,
3466                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3467                                               newlnkctl);
3468         }
3469
3470         if (current_link_up != netif_carrier_ok(tp->dev)) {
3471                 if (current_link_up)
3472                         netif_carrier_on(tp->dev);
3473                 else
3474                         netif_carrier_off(tp->dev);
3475                 tg3_link_report(tp);
3476         }
3477
3478         return 0;
3479 }
3480
3481 struct tg3_fiber_aneginfo {
3482         int state;
3483 #define ANEG_STATE_UNKNOWN              0
3484 #define ANEG_STATE_AN_ENABLE            1
3485 #define ANEG_STATE_RESTART_INIT         2
3486 #define ANEG_STATE_RESTART              3
3487 #define ANEG_STATE_DISABLE_LINK_OK      4
3488 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3489 #define ANEG_STATE_ABILITY_DETECT       6
3490 #define ANEG_STATE_ACK_DETECT_INIT      7
3491 #define ANEG_STATE_ACK_DETECT           8
3492 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3493 #define ANEG_STATE_COMPLETE_ACK         10
3494 #define ANEG_STATE_IDLE_DETECT_INIT     11
3495 #define ANEG_STATE_IDLE_DETECT          12
3496 #define ANEG_STATE_LINK_OK              13
3497 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3498 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3499
3500         u32 flags;
3501 #define MR_AN_ENABLE            0x00000001
3502 #define MR_RESTART_AN           0x00000002
3503 #define MR_AN_COMPLETE          0x00000004
3504 #define MR_PAGE_RX              0x00000008
3505 #define MR_NP_LOADED            0x00000010
3506 #define MR_TOGGLE_TX            0x00000020
3507 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3508 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3509 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3510 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3511 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3512 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3513 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3514 #define MR_TOGGLE_RX            0x00002000
3515 #define MR_NP_RX                0x00004000
3516
3517 #define MR_LINK_OK              0x80000000
3518
3519         unsigned long link_time, cur_time;
3520
3521         u32 ability_match_cfg;
3522         int ability_match_count;
3523
3524         char ability_match, idle_match, ack_match;
3525
3526         u32 txconfig, rxconfig;
3527 #define ANEG_CFG_NP             0x00000080
3528 #define ANEG_CFG_ACK            0x00000040
3529 #define ANEG_CFG_RF2            0x00000020
3530 #define ANEG_CFG_RF1            0x00000010
3531 #define ANEG_CFG_PS2            0x00000001
3532 #define ANEG_CFG_PS1            0x00008000
3533 #define ANEG_CFG_HD             0x00004000
3534 #define ANEG_CFG_FD             0x00002000
3535 #define ANEG_CFG_INVAL          0x00001f06
3536
3537 };
3538 #define ANEG_OK         0
3539 #define ANEG_DONE       1
3540 #define ANEG_TIMER_ENAB 2
3541 #define ANEG_FAILED     -1
3542
3543 #define ANEG_STATE_SETTLE_TIME  10000
3544
3545 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3546                                    struct tg3_fiber_aneginfo *ap)
3547 {
3548         u16 flowctrl;
3549         unsigned long delta;
3550         u32 rx_cfg_reg;
3551         int ret;
3552
3553         if (ap->state == ANEG_STATE_UNKNOWN) {
3554                 ap->rxconfig = 0;
3555                 ap->link_time = 0;
3556                 ap->cur_time = 0;
3557                 ap->ability_match_cfg = 0;
3558                 ap->ability_match_count = 0;
3559                 ap->ability_match = 0;
3560                 ap->idle_match = 0;
3561                 ap->ack_match = 0;
3562         }
3563         ap->cur_time++;
3564
3565         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3566                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3567
3568                 if (rx_cfg_reg != ap->ability_match_cfg) {
3569                         ap->ability_match_cfg = rx_cfg_reg;
3570                         ap->ability_match = 0;
3571                         ap->ability_match_count = 0;
3572                 } else {
3573                         if (++ap->ability_match_count > 1) {
3574                                 ap->ability_match = 1;
3575                                 ap->ability_match_cfg = rx_cfg_reg;
3576                         }
3577                 }
3578                 if (rx_cfg_reg & ANEG_CFG_ACK)
3579                         ap->ack_match = 1;
3580                 else
3581                         ap->ack_match = 0;
3582
3583                 ap->idle_match = 0;
3584         } else {
3585                 ap->idle_match = 1;
3586                 ap->ability_match_cfg = 0;
3587                 ap->ability_match_count = 0;
3588                 ap->ability_match = 0;
3589                 ap->ack_match = 0;
3590
3591                 rx_cfg_reg = 0;
3592         }
3593
3594         ap->rxconfig = rx_cfg_reg;
3595         ret = ANEG_OK;
3596
3597         switch (ap->state) {
3598         case ANEG_STATE_UNKNOWN:
3599                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3600                         ap->state = ANEG_STATE_AN_ENABLE;
3601
3602                 /* fallthru */
3603         case ANEG_STATE_AN_ENABLE:
3604                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3605                 if (ap->flags & MR_AN_ENABLE) {
3606                         ap->link_time = 0;
3607                         ap->cur_time = 0;
3608                         ap->ability_match_cfg = 0;
3609                         ap->ability_match_count = 0;
3610                         ap->ability_match = 0;
3611                         ap->idle_match = 0;
3612                         ap->ack_match = 0;
3613
3614                         ap->state = ANEG_STATE_RESTART_INIT;
3615                 } else {
3616                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3617                 }
3618                 break;
3619
3620         case ANEG_STATE_RESTART_INIT:
3621                 ap->link_time = ap->cur_time;
3622                 ap->flags &= ~(MR_NP_LOADED);
3623                 ap->txconfig = 0;
3624                 tw32(MAC_TX_AUTO_NEG, 0);
3625                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3626                 tw32_f(MAC_MODE, tp->mac_mode);
3627                 udelay(40);
3628
3629                 ret = ANEG_TIMER_ENAB;
3630                 ap->state = ANEG_STATE_RESTART;
3631
3632                 /* fallthru */
3633         case ANEG_STATE_RESTART:
3634                 delta = ap->cur_time - ap->link_time;
3635                 if (delta > ANEG_STATE_SETTLE_TIME)
3636                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3637                 else
3638                         ret = ANEG_TIMER_ENAB;
3639                 break;
3640
3641         case ANEG_STATE_DISABLE_LINK_OK:
3642                 ret = ANEG_DONE;
3643                 break;
3644
3645         case ANEG_STATE_ABILITY_DETECT_INIT:
3646                 ap->flags &= ~(MR_TOGGLE_TX);
3647                 ap->txconfig = ANEG_CFG_FD;
3648                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3649                 if (flowctrl & ADVERTISE_1000XPAUSE)
3650                         ap->txconfig |= ANEG_CFG_PS1;
3651                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3652                         ap->txconfig |= ANEG_CFG_PS2;
3653                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3654                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3655                 tw32_f(MAC_MODE, tp->mac_mode);
3656                 udelay(40);
3657
3658                 ap->state = ANEG_STATE_ABILITY_DETECT;
3659                 break;
3660
3661         case ANEG_STATE_ABILITY_DETECT:
3662                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3663                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3664                 break;
3665
3666         case ANEG_STATE_ACK_DETECT_INIT:
3667                 ap->txconfig |= ANEG_CFG_ACK;
3668                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3669                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3670                 tw32_f(MAC_MODE, tp->mac_mode);
3671                 udelay(40);
3672
3673                 ap->state = ANEG_STATE_ACK_DETECT;
3674
3675                 /* fallthru */
3676         case ANEG_STATE_ACK_DETECT:
3677                 if (ap->ack_match != 0) {
3678                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3679                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3680                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3681                         } else {
3682                                 ap->state = ANEG_STATE_AN_ENABLE;
3683                         }
3684                 } else if (ap->ability_match != 0 &&
3685                            ap->rxconfig == 0) {
3686                         ap->state = ANEG_STATE_AN_ENABLE;
3687                 }
3688                 break;
3689
3690         case ANEG_STATE_COMPLETE_ACK_INIT:
3691                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3692                         ret = ANEG_FAILED;
3693                         break;
3694                 }
3695                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3696                                MR_LP_ADV_HALF_DUPLEX |
3697                                MR_LP_ADV_SYM_PAUSE |
3698                                MR_LP_ADV_ASYM_PAUSE |
3699                                MR_LP_ADV_REMOTE_FAULT1 |
3700                                MR_LP_ADV_REMOTE_FAULT2 |
3701                                MR_LP_ADV_NEXT_PAGE |
3702                                MR_TOGGLE_RX |
3703                                MR_NP_RX);
3704                 if (ap->rxconfig & ANEG_CFG_FD)
3705                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3706                 if (ap->rxconfig & ANEG_CFG_HD)
3707                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3708                 if (ap->rxconfig & ANEG_CFG_PS1)
3709                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3710                 if (ap->rxconfig & ANEG_CFG_PS2)
3711                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3712                 if (ap->rxconfig & ANEG_CFG_RF1)
3713                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3714                 if (ap->rxconfig & ANEG_CFG_RF2)
3715                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3716                 if (ap->rxconfig & ANEG_CFG_NP)
3717                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3718
3719                 ap->link_time = ap->cur_time;
3720
3721                 ap->flags ^= (MR_TOGGLE_TX);
3722                 if (ap->rxconfig & 0x0008)
3723                         ap->flags |= MR_TOGGLE_RX;
3724                 if (ap->rxconfig & ANEG_CFG_NP)
3725                         ap->flags |= MR_NP_RX;
3726                 ap->flags |= MR_PAGE_RX;
3727
3728                 ap->state = ANEG_STATE_COMPLETE_ACK;
3729                 ret = ANEG_TIMER_ENAB;
3730                 break;
3731
3732         case ANEG_STATE_COMPLETE_ACK:
3733                 if (ap->ability_match != 0 &&
3734                     ap->rxconfig == 0) {
3735                         ap->state = ANEG_STATE_AN_ENABLE;
3736                         break;
3737                 }
3738                 delta = ap->cur_time - ap->link_time;
3739                 if (delta > ANEG_STATE_SETTLE_TIME) {
3740                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3741                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3742                         } else {
3743                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3744                                     !(ap->flags & MR_NP_RX)) {
3745                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3746                                 } else {
3747                                         ret = ANEG_FAILED;
3748                                 }
3749                         }
3750                 }
3751                 break;
3752
3753         case ANEG_STATE_IDLE_DETECT_INIT:
3754                 ap->link_time = ap->cur_time;
3755                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3756                 tw32_f(MAC_MODE, tp->mac_mode);
3757                 udelay(40);
3758
3759                 ap->state = ANEG_STATE_IDLE_DETECT;
3760                 ret = ANEG_TIMER_ENAB;
3761                 break;
3762
3763         case ANEG_STATE_IDLE_DETECT:
3764                 if (ap->ability_match != 0 &&
3765                     ap->rxconfig == 0) {
3766                         ap->state = ANEG_STATE_AN_ENABLE;
3767                         break;
3768                 }
3769                 delta = ap->cur_time - ap->link_time;
3770                 if (delta > ANEG_STATE_SETTLE_TIME) {
3771                         /* XXX another gem from the Broadcom driver :( */
3772                         ap->state = ANEG_STATE_LINK_OK;
3773                 }
3774                 break;
3775
3776         case ANEG_STATE_LINK_OK:
3777                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3778                 ret = ANEG_DONE;
3779                 break;
3780
3781         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3782                 /* ??? unimplemented */
3783                 break;
3784
3785         case ANEG_STATE_NEXT_PAGE_WAIT:
3786                 /* ??? unimplemented */
3787                 break;
3788
3789         default:
3790                 ret = ANEG_FAILED;
3791                 break;
3792         }
3793
3794         return ret;
3795 }
3796
3797 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3798 {
3799         int res = 0;
3800         struct tg3_fiber_aneginfo aninfo;
3801         int status = ANEG_FAILED;
3802         unsigned int tick;
3803         u32 tmp;
3804
3805         tw32_f(MAC_TX_AUTO_NEG, 0);
3806
3807         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3808         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3809         udelay(40);
3810
3811         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3812         udelay(40);
3813
3814         memset(&aninfo, 0, sizeof(aninfo));
3815         aninfo.flags |= MR_AN_ENABLE;
3816         aninfo.state = ANEG_STATE_UNKNOWN;
3817         aninfo.cur_time = 0;
3818         tick = 0;
3819         while (++tick < 195000) {
3820                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3821                 if (status == ANEG_DONE || status == ANEG_FAILED)
3822                         break;
3823
3824                 udelay(1);
3825         }
3826
3827         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3828         tw32_f(MAC_MODE, tp->mac_mode);
3829         udelay(40);
3830
3831         *txflags = aninfo.txconfig;
3832         *rxflags = aninfo.flags;
3833
3834         if (status == ANEG_DONE &&
3835             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3836                              MR_LP_ADV_FULL_DUPLEX)))
3837                 res = 1;
3838
3839         return res;
3840 }
3841
3842 static void tg3_init_bcm8002(struct tg3 *tp)
3843 {
3844         u32 mac_status = tr32(MAC_STATUS);
3845         int i;
3846
3847         /* Reset when initting first time or we have a link. */
3848         if (tg3_flag(tp, INIT_COMPLETE) &&
3849             !(mac_status & MAC_STATUS_PCS_SYNCED))
3850                 return;
3851
3852         /* Set PLL lock range. */
3853         tg3_writephy(tp, 0x16, 0x8007);
3854
3855         /* SW reset */
3856         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3857
3858         /* Wait for reset to complete. */
3859         /* XXX schedule_timeout() ... */
3860         for (i = 0; i < 500; i++)
3861                 udelay(10);
3862
3863         /* Config mode; select PMA/Ch 1 regs. */
3864         tg3_writephy(tp, 0x10, 0x8411);
3865
3866         /* Enable auto-lock and comdet, select txclk for tx. */
3867         tg3_writephy(tp, 0x11, 0x0a10);
3868
3869         tg3_writephy(tp, 0x18, 0x00a0);
3870         tg3_writephy(tp, 0x16, 0x41ff);
3871
3872         /* Assert and deassert POR. */
3873         tg3_writephy(tp, 0x13, 0x0400);
3874         udelay(40);
3875         tg3_writephy(tp, 0x13, 0x0000);
3876
3877         tg3_writephy(tp, 0x11, 0x0a50);
3878         udelay(40);
3879         tg3_writephy(tp, 0x11, 0x0a10);
3880
3881         /* Wait for signal to stabilize */
3882         /* XXX schedule_timeout() ... */
3883         for (i = 0; i < 15000; i++)
3884                 udelay(10);
3885
3886         /* Deselect the channel register so we can read the PHYID
3887          * later.
3888          */
3889         tg3_writephy(tp, 0x10, 0x8011);
3890 }
3891
3892 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3893 {
3894         u16 flowctrl;
3895         u32 sg_dig_ctrl, sg_dig_status;
3896         u32 serdes_cfg, expected_sg_dig_ctrl;
3897         int workaround, port_a;
3898         int current_link_up;
3899
3900         serdes_cfg = 0;
3901         expected_sg_dig_ctrl = 0;
3902         workaround = 0;
3903         port_a = 1;
3904         current_link_up = 0;
3905
3906         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3907             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3908                 workaround = 1;
3909                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3910                         port_a = 0;
3911
3912                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3913                 /* preserve bits 20-23 for voltage regulator */
3914                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3915         }
3916
3917         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3918
3919         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3920                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3921                         if (workaround) {
3922                                 u32 val = serdes_cfg;
3923
3924                                 if (port_a)
3925                                         val |= 0xc010000;
3926                                 else
3927                                         val |= 0x4010000;
3928                                 tw32_f(MAC_SERDES_CFG, val);
3929                         }
3930
3931                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3932                 }
3933                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3934                         tg3_setup_flow_control(tp, 0, 0);
3935                         current_link_up = 1;
3936                 }
3937                 goto out;
3938         }
3939
3940         /* Want auto-negotiation.  */
3941         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3942
3943         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3944         if (flowctrl & ADVERTISE_1000XPAUSE)
3945                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3946         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3947                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3948
3949         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3950                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3951                     tp->serdes_counter &&
3952                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3953                                     MAC_STATUS_RCVD_CFG)) ==
3954                      MAC_STATUS_PCS_SYNCED)) {
3955                         tp->serdes_counter--;
3956                         current_link_up = 1;
3957                         goto out;
3958                 }
3959 restart_autoneg:
3960                 if (workaround)
3961                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3962                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3963                 udelay(5);
3964                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3965
3966                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3967                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3968         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3969                                  MAC_STATUS_SIGNAL_DET)) {
3970                 sg_dig_status = tr32(SG_DIG_STATUS);
3971                 mac_status = tr32(MAC_STATUS);
3972
3973                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3974                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3975                         u32 local_adv = 0, remote_adv = 0;
3976
3977                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3978                                 local_adv |= ADVERTISE_1000XPAUSE;
3979                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3980                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3981
3982                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3983                                 remote_adv |= LPA_1000XPAUSE;
3984                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3985                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3986
3987                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3988                         current_link_up = 1;
3989                         tp->serdes_counter = 0;
3990                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3991                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3992                         if (tp->serdes_counter)
3993                                 tp->serdes_counter--;
3994                         else {
3995                                 if (workaround) {
3996                                         u32 val = serdes_cfg;
3997
3998                                         if (port_a)
3999                                                 val |= 0xc010000;
4000                                         else
4001                                                 val |= 0x4010000;
4002
4003                                         tw32_f(MAC_SERDES_CFG, val);
4004                                 }
4005
4006                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4007                                 udelay(40);
4008
4009                                 /* Link parallel detection - link is up */
4010                                 /* only if we have PCS_SYNC and not */
4011                                 /* receiving config code words */
4012                                 mac_status = tr32(MAC_STATUS);
4013                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4014                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4015                                         tg3_setup_flow_control(tp, 0, 0);
4016                                         current_link_up = 1;
4017                                         tp->phy_flags |=
4018                                                 TG3_PHYFLG_PARALLEL_DETECT;
4019                                         tp->serdes_counter =
4020                                                 SERDES_PARALLEL_DET_TIMEOUT;
4021                                 } else
4022                                         goto restart_autoneg;
4023                         }
4024                 }
4025         } else {
4026                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4027                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4028         }
4029
4030 out:
4031         return current_link_up;
4032 }
4033
4034 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4035 {
4036         int current_link_up = 0;
4037
4038         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4039                 goto out;
4040
4041         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4042                 u32 txflags, rxflags;
4043                 int i;
4044
4045                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4046                         u32 local_adv = 0, remote_adv = 0;
4047
4048                         if (txflags & ANEG_CFG_PS1)
4049                                 local_adv |= ADVERTISE_1000XPAUSE;
4050                         if (txflags & ANEG_CFG_PS2)
4051                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4052
4053                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4054                                 remote_adv |= LPA_1000XPAUSE;
4055                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4056                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4057
4058                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4059
4060                         current_link_up = 1;
4061                 }
4062                 for (i = 0; i < 30; i++) {
4063                         udelay(20);
4064                         tw32_f(MAC_STATUS,
4065                                (MAC_STATUS_SYNC_CHANGED |
4066                                 MAC_STATUS_CFG_CHANGED));
4067                         udelay(40);
4068                         if ((tr32(MAC_STATUS) &
4069                              (MAC_STATUS_SYNC_CHANGED |
4070                               MAC_STATUS_CFG_CHANGED)) == 0)
4071                                 break;
4072                 }
4073
4074                 mac_status = tr32(MAC_STATUS);
4075                 if (current_link_up == 0 &&
4076                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4077                     !(mac_status & MAC_STATUS_RCVD_CFG))
4078                         current_link_up = 1;
4079         } else {
4080                 tg3_setup_flow_control(tp, 0, 0);
4081
4082                 /* Forcing 1000FD link up. */
4083                 current_link_up = 1;
4084
4085                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4086                 udelay(40);
4087
4088                 tw32_f(MAC_MODE, tp->mac_mode);
4089                 udelay(40);
4090         }
4091
4092 out:
4093         return current_link_up;
4094 }
4095
4096 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4097 {
4098         u32 orig_pause_cfg;
4099         u16 orig_active_speed;
4100         u8 orig_active_duplex;
4101         u32 mac_status;
4102         int current_link_up;
4103         int i;
4104
4105         orig_pause_cfg = tp->link_config.active_flowctrl;
4106         orig_active_speed = tp->link_config.active_speed;
4107         orig_active_duplex = tp->link_config.active_duplex;
4108
4109         if (!tg3_flag(tp, HW_AUTONEG) &&
4110             netif_carrier_ok(tp->dev) &&
4111             tg3_flag(tp, INIT_COMPLETE)) {
4112                 mac_status = tr32(MAC_STATUS);
4113                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4114                                MAC_STATUS_SIGNAL_DET |
4115                                MAC_STATUS_CFG_CHANGED |
4116                                MAC_STATUS_RCVD_CFG);
4117                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4118                                    MAC_STATUS_SIGNAL_DET)) {
4119                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4120                                             MAC_STATUS_CFG_CHANGED));
4121                         return 0;
4122                 }
4123         }
4124
4125         tw32_f(MAC_TX_AUTO_NEG, 0);
4126
4127         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4128         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4129         tw32_f(MAC_MODE, tp->mac_mode);
4130         udelay(40);
4131
4132         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4133                 tg3_init_bcm8002(tp);
4134
4135         /* Enable link change event even when serdes polling.  */
4136         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4137         udelay(40);
4138
4139         current_link_up = 0;
4140         mac_status = tr32(MAC_STATUS);
4141
4142         if (tg3_flag(tp, HW_AUTONEG))
4143                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4144         else
4145                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4146
4147         tp->napi[0].hw_status->status =
4148                 (SD_STATUS_UPDATED |
4149                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4150
4151         for (i = 0; i < 100; i++) {
4152                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4153                                     MAC_STATUS_CFG_CHANGED));
4154                 udelay(5);
4155                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4156                                          MAC_STATUS_CFG_CHANGED |
4157                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4158                         break;
4159         }
4160
4161         mac_status = tr32(MAC_STATUS);
4162         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4163                 current_link_up = 0;
4164                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4165                     tp->serdes_counter == 0) {
4166                         tw32_f(MAC_MODE, (tp->mac_mode |
4167                                           MAC_MODE_SEND_CONFIGS));
4168                         udelay(1);
4169                         tw32_f(MAC_MODE, tp->mac_mode);
4170                 }
4171         }
4172
4173         if (current_link_up == 1) {
4174                 tp->link_config.active_speed = SPEED_1000;
4175                 tp->link_config.active_duplex = DUPLEX_FULL;
4176                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4177                                     LED_CTRL_LNKLED_OVERRIDE |
4178                                     LED_CTRL_1000MBPS_ON));
4179         } else {
4180                 tp->link_config.active_speed = SPEED_INVALID;
4181                 tp->link_config.active_duplex = DUPLEX_INVALID;
4182                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4183                                     LED_CTRL_LNKLED_OVERRIDE |
4184                                     LED_CTRL_TRAFFIC_OVERRIDE));
4185         }
4186
4187         if (current_link_up != netif_carrier_ok(tp->dev)) {
4188                 if (current_link_up)
4189                         netif_carrier_on(tp->dev);
4190                 else
4191                         netif_carrier_off(tp->dev);
4192                 tg3_link_report(tp);
4193         } else {
4194                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4195                 if (orig_pause_cfg != now_pause_cfg ||
4196                     orig_active_speed != tp->link_config.active_speed ||
4197                     orig_active_duplex != tp->link_config.active_duplex)
4198                         tg3_link_report(tp);
4199         }
4200
4201         return 0;
4202 }
4203
4204 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4205 {
4206         int current_link_up, err = 0;
4207         u32 bmsr, bmcr;
4208         u16 current_speed;
4209         u8 current_duplex;
4210         u32 local_adv, remote_adv;
4211
4212         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4213         tw32_f(MAC_MODE, tp->mac_mode);
4214         udelay(40);
4215
4216         tw32(MAC_EVENT, 0);
4217
4218         tw32_f(MAC_STATUS,
4219              (MAC_STATUS_SYNC_CHANGED |
4220               MAC_STATUS_CFG_CHANGED |
4221               MAC_STATUS_MI_COMPLETION |
4222               MAC_STATUS_LNKSTATE_CHANGED));
4223         udelay(40);
4224
4225         if (force_reset)
4226                 tg3_phy_reset(tp);
4227
4228         current_link_up = 0;
4229         current_speed = SPEED_INVALID;
4230         current_duplex = DUPLEX_INVALID;
4231
4232         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4233         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4234         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4235                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4236                         bmsr |= BMSR_LSTATUS;
4237                 else
4238                         bmsr &= ~BMSR_LSTATUS;
4239         }
4240
4241         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4242
4243         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4244             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4245                 /* do nothing, just check for link up at the end */
4246         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4247                 u32 adv, new_adv;
4248
4249                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4250                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4251                                   ADVERTISE_1000XPAUSE |
4252                                   ADVERTISE_1000XPSE_ASYM |
4253                                   ADVERTISE_SLCT);
4254
4255                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4256
4257                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4258                         new_adv |= ADVERTISE_1000XHALF;
4259                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4260                         new_adv |= ADVERTISE_1000XFULL;
4261
4262                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4263                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4264                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4265                         tg3_writephy(tp, MII_BMCR, bmcr);
4266
4267                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4268                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4269                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4270
4271                         return err;
4272                 }
4273         } else {
4274                 u32 new_bmcr;
4275
4276                 bmcr &= ~BMCR_SPEED1000;
4277                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4278
4279                 if (tp->link_config.duplex == DUPLEX_FULL)
4280                         new_bmcr |= BMCR_FULLDPLX;
4281
4282                 if (new_bmcr != bmcr) {
4283                         /* BMCR_SPEED1000 is a reserved bit that needs
4284                          * to be set on write.
4285                          */
4286                         new_bmcr |= BMCR_SPEED1000;
4287
4288                         /* Force a linkdown */
4289                         if (netif_carrier_ok(tp->dev)) {
4290                                 u32 adv;
4291
4292                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4293                                 adv &= ~(ADVERTISE_1000XFULL |
4294                                          ADVERTISE_1000XHALF |
4295                                          ADVERTISE_SLCT);
4296                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4297                                 tg3_writephy(tp, MII_BMCR, bmcr |
4298                                                            BMCR_ANRESTART |
4299                                                            BMCR_ANENABLE);
4300                                 udelay(10);
4301                                 netif_carrier_off(tp->dev);
4302                         }
4303                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4304                         bmcr = new_bmcr;
4305                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4306                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4307                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4308                             ASIC_REV_5714) {
4309                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4310                                         bmsr |= BMSR_LSTATUS;
4311                                 else
4312                                         bmsr &= ~BMSR_LSTATUS;
4313                         }
4314                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4315                 }
4316         }
4317
4318         if (bmsr & BMSR_LSTATUS) {
4319                 current_speed = SPEED_1000;
4320                 current_link_up = 1;
4321                 if (bmcr & BMCR_FULLDPLX)
4322                         current_duplex = DUPLEX_FULL;
4323                 else
4324                         current_duplex = DUPLEX_HALF;
4325
4326                 local_adv = 0;
4327                 remote_adv = 0;
4328
4329                 if (bmcr & BMCR_ANENABLE) {
4330                         u32 common;
4331
4332                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4333                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4334                         common = local_adv & remote_adv;
4335                         if (common & (ADVERTISE_1000XHALF |
4336                                       ADVERTISE_1000XFULL)) {
4337                                 if (common & ADVERTISE_1000XFULL)
4338                                         current_duplex = DUPLEX_FULL;
4339                                 else
4340                                         current_duplex = DUPLEX_HALF;
4341                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4342                                 /* Link is up via parallel detect */
4343                         } else {
4344                                 current_link_up = 0;
4345                         }
4346                 }
4347         }
4348
4349         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4350                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4351
4352         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4353         if (tp->link_config.active_duplex == DUPLEX_HALF)
4354                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4355
4356         tw32_f(MAC_MODE, tp->mac_mode);
4357         udelay(40);
4358
4359         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4360
4361         tp->link_config.active_speed = current_speed;
4362         tp->link_config.active_duplex = current_duplex;
4363
4364         if (current_link_up != netif_carrier_ok(tp->dev)) {
4365                 if (current_link_up)
4366                         netif_carrier_on(tp->dev);
4367                 else {
4368                         netif_carrier_off(tp->dev);
4369                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4370                 }
4371                 tg3_link_report(tp);
4372         }
4373         return err;
4374 }
4375
4376 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4377 {
4378         if (tp->serdes_counter) {
4379                 /* Give autoneg time to complete. */
4380                 tp->serdes_counter--;
4381                 return;
4382         }
4383
4384         if (!netif_carrier_ok(tp->dev) &&
4385             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4386                 u32 bmcr;
4387
4388                 tg3_readphy(tp, MII_BMCR, &bmcr);
4389                 if (bmcr & BMCR_ANENABLE) {
4390                         u32 phy1, phy2;
4391
4392                         /* Select shadow register 0x1f */
4393                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4394                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4395
4396                         /* Select expansion interrupt status register */
4397                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4398                                          MII_TG3_DSP_EXP1_INT_STAT);
4399                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4400                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4401
4402                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4403                                 /* We have signal detect and not receiving
4404                                  * config code words, link is up by parallel
4405                                  * detection.
4406                                  */
4407
4408                                 bmcr &= ~BMCR_ANENABLE;
4409                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4410                                 tg3_writephy(tp, MII_BMCR, bmcr);
4411                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4412                         }
4413                 }
4414         } else if (netif_carrier_ok(tp->dev) &&
4415                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4416                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4417                 u32 phy2;
4418
4419                 /* Select expansion interrupt status register */
4420                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4421                                  MII_TG3_DSP_EXP1_INT_STAT);
4422                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4423                 if (phy2 & 0x20) {
4424                         u32 bmcr;
4425
4426                         /* Config code words received, turn on autoneg. */
4427                         tg3_readphy(tp, MII_BMCR, &bmcr);
4428                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4429
4430                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4431
4432                 }
4433         }
4434 }
4435
4436 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4437 {
4438         u32 val;
4439         int err;
4440
4441         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4442                 err = tg3_setup_fiber_phy(tp, force_reset);
4443         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4444                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4445         else
4446                 err = tg3_setup_copper_phy(tp, force_reset);
4447
4448         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4449                 u32 scale;
4450
4451                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4452                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4453                         scale = 65;
4454                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4455                         scale = 6;
4456                 else
4457                         scale = 12;
4458
4459                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4460                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4461                 tw32(GRC_MISC_CFG, val);
4462         }
4463
4464         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4465               (6 << TX_LENGTHS_IPG_SHIFT);
4466         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4467                 val |= tr32(MAC_TX_LENGTHS) &
4468                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4469                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4470
4471         if (tp->link_config.active_speed == SPEED_1000 &&
4472             tp->link_config.active_duplex == DUPLEX_HALF)
4473                 tw32(MAC_TX_LENGTHS, val |
4474                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4475         else
4476                 tw32(MAC_TX_LENGTHS, val |
4477                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4478
4479         if (!tg3_flag(tp, 5705_PLUS)) {
4480                 if (netif_carrier_ok(tp->dev)) {
4481                         tw32(HOSTCC_STAT_COAL_TICKS,
4482                              tp->coal.stats_block_coalesce_usecs);
4483                 } else {
4484                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4485                 }
4486         }
4487
4488         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4489                 val = tr32(PCIE_PWR_MGMT_THRESH);
4490                 if (!netif_carrier_ok(tp->dev))
4491                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4492                               tp->pwrmgmt_thresh;
4493                 else
4494                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4495                 tw32(PCIE_PWR_MGMT_THRESH, val);
4496         }
4497
4498         return err;
4499 }
4500
4501 static inline int tg3_irq_sync(struct tg3 *tp)
4502 {
4503         return tp->irq_sync;
4504 }
4505
4506 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4507 {
4508         int i;
4509
4510         dst = (u32 *)((u8 *)dst + off);
4511         for (i = 0; i < len; i += sizeof(u32))
4512                 *dst++ = tr32(off + i);
4513 }
4514
4515 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4516 {
4517         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4518         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4519         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4520         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4521         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4522         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4523         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4524         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4525         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4526         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4527         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4528         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4529         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4530         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4531         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4532         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4533         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4534         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4535         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4536
4537         if (tg3_flag(tp, SUPPORT_MSIX))
4538                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4539
4540         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4541         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4542         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4543         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4544         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4545         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4546         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4547         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4548
4549         if (!tg3_flag(tp, 5705_PLUS)) {
4550                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4551                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4552                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4553         }
4554
4555         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4556         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4557         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4558         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4559         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4560
4561         if (tg3_flag(tp, NVRAM))
4562                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4563 }
4564
4565 static void tg3_dump_state(struct tg3 *tp)
4566 {
4567         int i;
4568         u32 *regs;
4569
4570         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4571         if (!regs) {
4572                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4573                 return;
4574         }
4575
4576         if (tg3_flag(tp, PCI_EXPRESS)) {
4577                 /* Read up to but not including private PCI registers */
4578                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4579                         regs[i / sizeof(u32)] = tr32(i);
4580         } else
4581                 tg3_dump_legacy_regs(tp, regs);
4582
4583         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4584                 if (!regs[i + 0] && !regs[i + 1] &&
4585                     !regs[i + 2] && !regs[i + 3])
4586                         continue;
4587
4588                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4589                            i * 4,
4590                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4591         }
4592
4593         kfree(regs);
4594
4595         for (i = 0; i < tp->irq_cnt; i++) {
4596                 struct tg3_napi *tnapi = &tp->napi[i];
4597
4598                 /* SW status block */
4599                 netdev_err(tp->dev,
4600                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4601                            i,
4602                            tnapi->hw_status->status,
4603                            tnapi->hw_status->status_tag,
4604                            tnapi->hw_status->rx_jumbo_consumer,
4605                            tnapi->hw_status->rx_consumer,
4606                            tnapi->hw_status->rx_mini_consumer,
4607                            tnapi->hw_status->idx[0].rx_producer,
4608                            tnapi->hw_status->idx[0].tx_consumer);
4609
4610                 netdev_err(tp->dev,
4611                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4612                            i,
4613                            tnapi->last_tag, tnapi->last_irq_tag,
4614                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4615                            tnapi->rx_rcb_ptr,
4616                            tnapi->prodring.rx_std_prod_idx,
4617                            tnapi->prodring.rx_std_cons_idx,
4618                            tnapi->prodring.rx_jmb_prod_idx,
4619                            tnapi->prodring.rx_jmb_cons_idx);
4620         }
4621 }
4622
4623 /* This is called whenever we suspect that the system chipset is re-
4624  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4625  * is bogus tx completions. We try to recover by setting the
4626  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4627  * in the workqueue.
4628  */
4629 static void tg3_tx_recover(struct tg3 *tp)
4630 {
4631         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4632                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4633
4634         netdev_warn(tp->dev,
4635                     "The system may be re-ordering memory-mapped I/O "
4636                     "cycles to the network device, attempting to recover. "
4637                     "Please report the problem to the driver maintainer "
4638                     "and include system chipset information.\n");
4639
4640         spin_lock(&tp->lock);
4641         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4642         spin_unlock(&tp->lock);
4643 }
4644
4645 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4646 {
4647         /* Tell compiler to fetch tx indices from memory. */
4648         barrier();
4649         return tnapi->tx_pending -
4650                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4651 }
4652
4653 /* Tigon3 never reports partial packet sends.  So we do not
4654  * need special logic to handle SKBs that have not had all
4655  * of their frags sent yet, like SunGEM does.
4656  */
4657 static void tg3_tx(struct tg3_napi *tnapi)
4658 {
4659         struct tg3 *tp = tnapi->tp;
4660         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4661         u32 sw_idx = tnapi->tx_cons;
4662         struct netdev_queue *txq;
4663         int index = tnapi - tp->napi;
4664
4665         if (tg3_flag(tp, ENABLE_TSS))
4666                 index--;
4667
4668         txq = netdev_get_tx_queue(tp->dev, index);
4669
4670         while (sw_idx != hw_idx) {
4671                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4672                 struct sk_buff *skb = ri->skb;
4673                 int i, tx_bug = 0;
4674
4675                 if (unlikely(skb == NULL)) {
4676                         tg3_tx_recover(tp);
4677                         return;
4678                 }
4679
4680                 pci_unmap_single(tp->pdev,
4681                                  dma_unmap_addr(ri, mapping),
4682                                  skb_headlen(skb),
4683                                  PCI_DMA_TODEVICE);
4684
4685                 ri->skb = NULL;
4686
4687                 sw_idx = NEXT_TX(sw_idx);
4688
4689                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4690                         ri = &tnapi->tx_buffers[sw_idx];
4691                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4692                                 tx_bug = 1;
4693
4694                         pci_unmap_page(tp->pdev,
4695                                        dma_unmap_addr(ri, mapping),
4696                                        skb_shinfo(skb)->frags[i].size,
4697                                        PCI_DMA_TODEVICE);
4698                         sw_idx = NEXT_TX(sw_idx);
4699                 }
4700
4701                 dev_kfree_skb(skb);
4702
4703                 if (unlikely(tx_bug)) {
4704                         tg3_tx_recover(tp);
4705                         return;
4706                 }
4707         }
4708
4709         tnapi->tx_cons = sw_idx;
4710
4711         /* Need to make the tx_cons update visible to tg3_start_xmit()
4712          * before checking for netif_queue_stopped().  Without the
4713          * memory barrier, there is a small possibility that tg3_start_xmit()
4714          * will miss it and cause the queue to be stopped forever.
4715          */
4716         smp_mb();
4717
4718         if (unlikely(netif_tx_queue_stopped(txq) &&
4719                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4720                 __netif_tx_lock(txq, smp_processor_id());
4721                 if (netif_tx_queue_stopped(txq) &&
4722                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4723                         netif_tx_wake_queue(txq);
4724                 __netif_tx_unlock(txq);
4725         }
4726 }
4727
4728 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4729 {
4730         if (!ri->skb)
4731                 return;
4732
4733         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4734                          map_sz, PCI_DMA_FROMDEVICE);
4735         dev_kfree_skb_any(ri->skb);
4736         ri->skb = NULL;
4737 }
4738
4739 /* Returns size of skb allocated or < 0 on error.
4740  *
4741  * We only need to fill in the address because the other members
4742  * of the RX descriptor are invariant, see tg3_init_rings.
4743  *
4744  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4745  * posting buffers we only dirty the first cache line of the RX
4746  * descriptor (containing the address).  Whereas for the RX status
4747  * buffers the cpu only reads the last cacheline of the RX descriptor
4748  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4749  */
4750 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4751                             u32 opaque_key, u32 dest_idx_unmasked)
4752 {
4753         struct tg3_rx_buffer_desc *desc;
4754         struct ring_info *map;
4755         struct sk_buff *skb;
4756         dma_addr_t mapping;
4757         int skb_size, dest_idx;
4758
4759         switch (opaque_key) {
4760         case RXD_OPAQUE_RING_STD:
4761                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4762                 desc = &tpr->rx_std[dest_idx];
4763                 map = &tpr->rx_std_buffers[dest_idx];
4764                 skb_size = tp->rx_pkt_map_sz;
4765                 break;
4766
4767         case RXD_OPAQUE_RING_JUMBO:
4768                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4769                 desc = &tpr->rx_jmb[dest_idx].std;
4770                 map = &tpr->rx_jmb_buffers[dest_idx];
4771                 skb_size = TG3_RX_JMB_MAP_SZ;
4772                 break;
4773
4774         default:
4775                 return -EINVAL;
4776         }
4777
4778         /* Do not overwrite any of the map or rp information
4779          * until we are sure we can commit to a new buffer.
4780          *
4781          * Callers depend upon this behavior and assume that
4782          * we leave everything unchanged if we fail.
4783          */
4784         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4785         if (skb == NULL)
4786                 return -ENOMEM;
4787
4788         skb_reserve(skb, tp->rx_offset);
4789
4790         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4791                                  PCI_DMA_FROMDEVICE);
4792         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4793                 dev_kfree_skb(skb);
4794                 return -EIO;
4795         }
4796
4797         map->skb = skb;
4798         dma_unmap_addr_set(map, mapping, mapping);
4799
4800         desc->addr_hi = ((u64)mapping >> 32);
4801         desc->addr_lo = ((u64)mapping & 0xffffffff);
4802
4803         return skb_size;
4804 }
4805
4806 /* We only need to move over in the address because the other
4807  * members of the RX descriptor are invariant.  See notes above
4808  * tg3_alloc_rx_skb for full details.
4809  */
4810 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4811                            struct tg3_rx_prodring_set *dpr,
4812                            u32 opaque_key, int src_idx,
4813                            u32 dest_idx_unmasked)
4814 {
4815         struct tg3 *tp = tnapi->tp;
4816         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4817         struct ring_info *src_map, *dest_map;
4818         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4819         int dest_idx;
4820
4821         switch (opaque_key) {
4822         case RXD_OPAQUE_RING_STD:
4823                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4824                 dest_desc = &dpr->rx_std[dest_idx];
4825                 dest_map = &dpr->rx_std_buffers[dest_idx];
4826                 src_desc = &spr->rx_std[src_idx];
4827                 src_map = &spr->rx_std_buffers[src_idx];
4828                 break;
4829
4830         case RXD_OPAQUE_RING_JUMBO:
4831                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4832                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4833                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4834                 src_desc = &spr->rx_jmb[src_idx].std;
4835                 src_map = &spr->rx_jmb_buffers[src_idx];
4836                 break;
4837
4838         default:
4839                 return;
4840         }
4841
4842         dest_map->skb = src_map->skb;
4843         dma_unmap_addr_set(dest_map, mapping,
4844                            dma_unmap_addr(src_map, mapping));
4845         dest_desc->addr_hi = src_desc->addr_hi;
4846         dest_desc->addr_lo = src_desc->addr_lo;
4847
4848         /* Ensure that the update to the skb happens after the physical
4849          * addresses have been transferred to the new BD location.
4850          */
4851         smp_wmb();
4852
4853         src_map->skb = NULL;
4854 }
4855
4856 /* The RX ring scheme is composed of multiple rings which post fresh
4857  * buffers to the chip, and one special ring the chip uses to report
4858  * status back to the host.
4859  *
4860  * The special ring reports the status of received packets to the
4861  * host.  The chip does not write into the original descriptor the
4862  * RX buffer was obtained from.  The chip simply takes the original
4863  * descriptor as provided by the host, updates the status and length
4864  * field, then writes this into the next status ring entry.
4865  *
4866  * Each ring the host uses to post buffers to the chip is described
4867  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4868  * it is first placed into the on-chip ram.  When the packet's length
4869  * is known, it walks down the TG3_BDINFO entries to select the ring.
4870  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4871  * which is within the range of the new packet's length is chosen.
4872  *
4873  * The "separate ring for rx status" scheme may sound queer, but it makes
4874  * sense from a cache coherency perspective.  If only the host writes
4875  * to the buffer post rings, and only the chip writes to the rx status
4876  * rings, then cache lines never move beyond shared-modified state.
4877  * If both the host and chip were to write into the same ring, cache line
4878  * eviction could occur since both entities want it in an exclusive state.
4879  */
4880 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4881 {
4882         struct tg3 *tp = tnapi->tp;
4883         u32 work_mask, rx_std_posted = 0;
4884         u32 std_prod_idx, jmb_prod_idx;
4885         u32 sw_idx = tnapi->rx_rcb_ptr;
4886         u16 hw_idx;
4887         int received;
4888         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4889
4890         hw_idx = *(tnapi->rx_rcb_prod_idx);
4891         /*
4892          * We need to order the read of hw_idx and the read of
4893          * the opaque cookie.
4894          */
4895         rmb();
4896         work_mask = 0;
4897         received = 0;
4898         std_prod_idx = tpr->rx_std_prod_idx;
4899         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4900         while (sw_idx != hw_idx && budget > 0) {
4901                 struct ring_info *ri;
4902                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4903                 unsigned int len;
4904                 struct sk_buff *skb;
4905                 dma_addr_t dma_addr;
4906                 u32 opaque_key, desc_idx, *post_ptr;
4907
4908                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4909                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4910                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4911                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4912                         dma_addr = dma_unmap_addr(ri, mapping);
4913                         skb = ri->skb;
4914                         post_ptr = &std_prod_idx;
4915                         rx_std_posted++;
4916                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4917                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4918                         dma_addr = dma_unmap_addr(ri, mapping);
4919                         skb = ri->skb;
4920                         post_ptr = &jmb_prod_idx;
4921                 } else
4922                         goto next_pkt_nopost;
4923
4924                 work_mask |= opaque_key;
4925
4926                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4927                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4928                 drop_it:
4929                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4930                                        desc_idx, *post_ptr);
4931                 drop_it_no_recycle:
4932                         /* Other statistics kept track of by card. */
4933                         tp->rx_dropped++;
4934                         goto next_pkt;
4935                 }
4936
4937                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4938                       ETH_FCS_LEN;
4939
4940                 if (len > TG3_RX_COPY_THRESH(tp)) {
4941                         int skb_size;
4942
4943                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4944                                                     *post_ptr);
4945                         if (skb_size < 0)
4946                                 goto drop_it;
4947
4948                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4949                                          PCI_DMA_FROMDEVICE);
4950
4951                         /* Ensure that the update to the skb happens
4952                          * after the usage of the old DMA mapping.
4953                          */
4954                         smp_wmb();
4955
4956                         ri->skb = NULL;
4957
4958                         skb_put(skb, len);
4959                 } else {
4960                         struct sk_buff *copy_skb;
4961
4962                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4963                                        desc_idx, *post_ptr);
4964
4965                         copy_skb = netdev_alloc_skb(tp->dev, len +
4966                                                     TG3_RAW_IP_ALIGN);
4967                         if (copy_skb == NULL)
4968                                 goto drop_it_no_recycle;
4969
4970                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4971                         skb_put(copy_skb, len);
4972                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4973                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4974                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4975
4976                         /* We'll reuse the original ring buffer. */
4977                         skb = copy_skb;
4978                 }
4979
4980                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4981                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4982                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4983                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4984                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4985                 else
4986                         skb_checksum_none_assert(skb);
4987
4988                 skb->protocol = eth_type_trans(skb, tp->dev);
4989
4990                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4991                     skb->protocol != htons(ETH_P_8021Q)) {
4992                         dev_kfree_skb(skb);
4993                         goto drop_it_no_recycle;
4994                 }
4995
4996                 if (desc->type_flags & RXD_FLAG_VLAN &&
4997                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4998                         __vlan_hwaccel_put_tag(skb,
4999                                                desc->err_vlan & RXD_VLAN_MASK);
5000
5001                 napi_gro_receive(&tnapi->napi, skb);
5002
5003                 received++;
5004                 budget--;
5005
5006 next_pkt:
5007                 (*post_ptr)++;
5008
5009                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5010                         tpr->rx_std_prod_idx = std_prod_idx &
5011                                                tp->rx_std_ring_mask;
5012                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5013                                      tpr->rx_std_prod_idx);
5014                         work_mask &= ~RXD_OPAQUE_RING_STD;
5015                         rx_std_posted = 0;
5016                 }
5017 next_pkt_nopost:
5018                 sw_idx++;
5019                 sw_idx &= tp->rx_ret_ring_mask;
5020
5021                 /* Refresh hw_idx to see if there is new work */
5022                 if (sw_idx == hw_idx) {
5023                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5024                         rmb();
5025                 }
5026         }
5027
5028         /* ACK the status ring. */
5029         tnapi->rx_rcb_ptr = sw_idx;
5030         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5031
5032         /* Refill RX ring(s). */
5033         if (!tg3_flag(tp, ENABLE_RSS)) {
5034                 if (work_mask & RXD_OPAQUE_RING_STD) {
5035                         tpr->rx_std_prod_idx = std_prod_idx &
5036                                                tp->rx_std_ring_mask;
5037                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5038                                      tpr->rx_std_prod_idx);
5039                 }
5040                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5041                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5042                                                tp->rx_jmb_ring_mask;
5043                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5044                                      tpr->rx_jmb_prod_idx);
5045                 }
5046                 mmiowb();
5047         } else if (work_mask) {
5048                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5049                  * updated before the producer indices can be updated.
5050                  */
5051                 smp_wmb();
5052
5053                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5054                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5055
5056                 if (tnapi != &tp->napi[1])
5057                         napi_schedule(&tp->napi[1].napi);
5058         }
5059
5060         return received;
5061 }
5062
5063 static void tg3_poll_link(struct tg3 *tp)
5064 {
5065         /* handle link change and other phy events */
5066         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5067                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5068
5069                 if (sblk->status & SD_STATUS_LINK_CHG) {
5070                         sblk->status = SD_STATUS_UPDATED |
5071                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5072                         spin_lock(&tp->lock);
5073                         if (tg3_flag(tp, USE_PHYLIB)) {
5074                                 tw32_f(MAC_STATUS,
5075                                      (MAC_STATUS_SYNC_CHANGED |
5076                                       MAC_STATUS_CFG_CHANGED |
5077                                       MAC_STATUS_MI_COMPLETION |
5078                                       MAC_STATUS_LNKSTATE_CHANGED));
5079                                 udelay(40);
5080                         } else
5081                                 tg3_setup_phy(tp, 0);
5082                         spin_unlock(&tp->lock);
5083                 }
5084         }
5085 }
5086
5087 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5088                                 struct tg3_rx_prodring_set *dpr,
5089                                 struct tg3_rx_prodring_set *spr)
5090 {
5091         u32 si, di, cpycnt, src_prod_idx;
5092         int i, err = 0;
5093
5094         while (1) {
5095                 src_prod_idx = spr->rx_std_prod_idx;
5096
5097                 /* Make sure updates to the rx_std_buffers[] entries and the
5098                  * standard producer index are seen in the correct order.
5099                  */
5100                 smp_rmb();
5101
5102                 if (spr->rx_std_cons_idx == src_prod_idx)
5103                         break;
5104
5105                 if (spr->rx_std_cons_idx < src_prod_idx)
5106                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5107                 else
5108                         cpycnt = tp->rx_std_ring_mask + 1 -
5109                                  spr->rx_std_cons_idx;
5110
5111                 cpycnt = min(cpycnt,
5112                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5113
5114                 si = spr->rx_std_cons_idx;
5115                 di = dpr->rx_std_prod_idx;
5116
5117                 for (i = di; i < di + cpycnt; i++) {
5118                         if (dpr->rx_std_buffers[i].skb) {
5119                                 cpycnt = i - di;
5120                                 err = -ENOSPC;
5121                                 break;
5122                         }
5123                 }
5124
5125                 if (!cpycnt)
5126                         break;
5127
5128                 /* Ensure that updates to the rx_std_buffers ring and the
5129                  * shadowed hardware producer ring from tg3_recycle_skb() are
5130                  * ordered correctly WRT the skb check above.
5131                  */
5132                 smp_rmb();
5133
5134                 memcpy(&dpr->rx_std_buffers[di],
5135                        &spr->rx_std_buffers[si],
5136                        cpycnt * sizeof(struct ring_info));
5137
5138                 for (i = 0; i < cpycnt; i++, di++, si++) {
5139                         struct tg3_rx_buffer_desc *sbd, *dbd;
5140                         sbd = &spr->rx_std[si];
5141                         dbd = &dpr->rx_std[di];
5142                         dbd->addr_hi = sbd->addr_hi;
5143                         dbd->addr_lo = sbd->addr_lo;
5144                 }
5145
5146                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5147                                        tp->rx_std_ring_mask;
5148                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5149                                        tp->rx_std_ring_mask;
5150         }
5151
5152         while (1) {
5153                 src_prod_idx = spr->rx_jmb_prod_idx;
5154
5155                 /* Make sure updates to the rx_jmb_buffers[] entries and
5156                  * the jumbo producer index are seen in the correct order.
5157                  */
5158                 smp_rmb();
5159
5160                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5161                         break;
5162
5163                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5164                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5165                 else
5166                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5167                                  spr->rx_jmb_cons_idx;
5168
5169                 cpycnt = min(cpycnt,
5170                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5171
5172                 si = spr->rx_jmb_cons_idx;
5173                 di = dpr->rx_jmb_prod_idx;
5174
5175                 for (i = di; i < di + cpycnt; i++) {
5176                         if (dpr->rx_jmb_buffers[i].skb) {
5177                                 cpycnt = i - di;
5178                                 err = -ENOSPC;
5179                                 break;
5180                         }
5181                 }
5182
5183                 if (!cpycnt)
5184                         break;
5185
5186                 /* Ensure that updates to the rx_jmb_buffers ring and the
5187                  * shadowed hardware producer ring from tg3_recycle_skb() are
5188                  * ordered correctly WRT the skb check above.
5189                  */
5190                 smp_rmb();
5191
5192                 memcpy(&dpr->rx_jmb_buffers[di],
5193                        &spr->rx_jmb_buffers[si],
5194                        cpycnt * sizeof(struct ring_info));
5195
5196                 for (i = 0; i < cpycnt; i++, di++, si++) {
5197                         struct tg3_rx_buffer_desc *sbd, *dbd;
5198                         sbd = &spr->rx_jmb[si].std;
5199                         dbd = &dpr->rx_jmb[di].std;
5200                         dbd->addr_hi = sbd->addr_hi;
5201                         dbd->addr_lo = sbd->addr_lo;
5202                 }
5203
5204                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5205                                        tp->rx_jmb_ring_mask;
5206                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5207                                        tp->rx_jmb_ring_mask;
5208         }
5209
5210         return err;
5211 }
5212
5213 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5214 {
5215         struct tg3 *tp = tnapi->tp;
5216
5217         /* run TX completion thread */
5218         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5219                 tg3_tx(tnapi);
5220                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5221                         return work_done;
5222         }
5223
5224         if (!tnapi->rx_rcb_prod_idx)
5225                 return work_done;
5226
5227         /* run RX thread, within the bounds set by NAPI.
5228          * All RX "locking" is done by ensuring outside
5229          * code synchronizes with tg3->napi.poll()
5230          */
5231         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5232                 work_done += tg3_rx(tnapi, budget - work_done);
5233
5234         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5235                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5236                 int i, err = 0;
5237                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5238                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5239
5240                 for (i = 1; i < tp->irq_cnt; i++)
5241                         err |= tg3_rx_prodring_xfer(tp, dpr,
5242                                                     &tp->napi[i].prodring);
5243
5244                 wmb();
5245
5246                 if (std_prod_idx != dpr->rx_std_prod_idx)
5247                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5248                                      dpr->rx_std_prod_idx);
5249
5250                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5251                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5252                                      dpr->rx_jmb_prod_idx);
5253
5254                 mmiowb();
5255
5256                 if (err)
5257                         tw32_f(HOSTCC_MODE, tp->coal_now);
5258         }
5259
5260         return work_done;
5261 }
5262
5263 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5264 {
5265         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5266         struct tg3 *tp = tnapi->tp;
5267         int work_done = 0;
5268         struct tg3_hw_status *sblk = tnapi->hw_status;
5269
5270         while (1) {
5271                 work_done = tg3_poll_work(tnapi, work_done, budget);
5272
5273                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5274                         goto tx_recovery;
5275
5276                 if (unlikely(work_done >= budget))
5277                         break;
5278
5279                 /* tp->last_tag is used in tg3_int_reenable() below
5280                  * to tell the hw how much work has been processed,
5281                  * so we must read it before checking for more work.
5282                  */
5283                 tnapi->last_tag = sblk->status_tag;
5284                 tnapi->last_irq_tag = tnapi->last_tag;
5285                 rmb();
5286
5287                 /* check for RX/TX work to do */
5288                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5289                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5290                         napi_complete(napi);
5291                         /* Reenable interrupts. */
5292                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5293                         mmiowb();
5294                         break;
5295                 }
5296         }
5297
5298         return work_done;
5299
5300 tx_recovery:
5301         /* work_done is guaranteed to be less than budget. */
5302         napi_complete(napi);
5303         schedule_work(&tp->reset_task);
5304         return work_done;
5305 }
5306
5307 static void tg3_process_error(struct tg3 *tp)
5308 {
5309         u32 val;
5310         bool real_error = false;
5311
5312         if (tg3_flag(tp, ERROR_PROCESSED))
5313                 return;
5314
5315         /* Check Flow Attention register */
5316         val = tr32(HOSTCC_FLOW_ATTN);
5317         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5318                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5319                 real_error = true;
5320         }
5321
5322         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5323                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5324                 real_error = true;
5325         }
5326
5327         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5328                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5329                 real_error = true;
5330         }
5331
5332         if (!real_error)
5333                 return;
5334
5335         tg3_dump_state(tp);
5336
5337         tg3_flag_set(tp, ERROR_PROCESSED);
5338         schedule_work(&tp->reset_task);
5339 }
5340
5341 static int tg3_poll(struct napi_struct *napi, int budget)
5342 {
5343         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5344         struct tg3 *tp = tnapi->tp;
5345         int work_done = 0;
5346         struct tg3_hw_status *sblk = tnapi->hw_status;
5347
5348         while (1) {
5349                 if (sblk->status & SD_STATUS_ERROR)
5350                         tg3_process_error(tp);
5351
5352                 tg3_poll_link(tp);
5353
5354                 work_done = tg3_poll_work(tnapi, work_done, budget);
5355
5356                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5357                         goto tx_recovery;
5358
5359                 if (unlikely(work_done >= budget))
5360                         break;
5361
5362                 if (tg3_flag(tp, TAGGED_STATUS)) {
5363                         /* tp->last_tag is used in tg3_int_reenable() below
5364                          * to tell the hw how much work has been processed,
5365                          * so we must read it before checking for more work.
5366                          */
5367                         tnapi->last_tag = sblk->status_tag;
5368                         tnapi->last_irq_tag = tnapi->last_tag;
5369                         rmb();
5370                 } else
5371                         sblk->status &= ~SD_STATUS_UPDATED;
5372
5373                 if (likely(!tg3_has_work(tnapi))) {
5374                         napi_complete(napi);
5375                         tg3_int_reenable(tnapi);
5376                         break;
5377                 }
5378         }
5379
5380         return work_done;
5381
5382 tx_recovery:
5383         /* work_done is guaranteed to be less than budget. */
5384         napi_complete(napi);
5385         schedule_work(&tp->reset_task);
5386         return work_done;
5387 }
5388
5389 static void tg3_napi_disable(struct tg3 *tp)
5390 {
5391         int i;
5392
5393         for (i = tp->irq_cnt - 1; i >= 0; i--)
5394                 napi_disable(&tp->napi[i].napi);
5395 }
5396
5397 static void tg3_napi_enable(struct tg3 *tp)
5398 {
5399         int i;
5400
5401         for (i = 0; i < tp->irq_cnt; i++)
5402                 napi_enable(&tp->napi[i].napi);
5403 }
5404
5405 static void tg3_napi_init(struct tg3 *tp)
5406 {
5407         int i;
5408
5409         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5410         for (i = 1; i < tp->irq_cnt; i++)
5411                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5412 }
5413
5414 static void tg3_napi_fini(struct tg3 *tp)
5415 {
5416         int i;
5417
5418         for (i = 0; i < tp->irq_cnt; i++)
5419                 netif_napi_del(&tp->napi[i].napi);
5420 }
5421
5422 static inline void tg3_netif_stop(struct tg3 *tp)
5423 {
5424         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5425         tg3_napi_disable(tp);
5426         netif_tx_disable(tp->dev);
5427 }
5428
5429 static inline void tg3_netif_start(struct tg3 *tp)
5430 {
5431         /* NOTE: unconditional netif_tx_wake_all_queues is only
5432          * appropriate so long as all callers are assured to
5433          * have free tx slots (such as after tg3_init_hw)
5434          */
5435         netif_tx_wake_all_queues(tp->dev);
5436
5437         tg3_napi_enable(tp);
5438         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5439         tg3_enable_ints(tp);
5440 }
5441
5442 static void tg3_irq_quiesce(struct tg3 *tp)
5443 {
5444         int i;
5445
5446         BUG_ON(tp->irq_sync);
5447
5448         tp->irq_sync = 1;
5449         smp_mb();
5450
5451         for (i = 0; i < tp->irq_cnt; i++)
5452                 synchronize_irq(tp->napi[i].irq_vec);
5453 }
5454
5455 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5456  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5457  * with as well.  Most of the time, this is not necessary except when
5458  * shutting down the device.
5459  */
5460 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5461 {
5462         spin_lock_bh(&tp->lock);
5463         if (irq_sync)
5464                 tg3_irq_quiesce(tp);
5465 }
5466
5467 static inline void tg3_full_unlock(struct tg3 *tp)
5468 {
5469         spin_unlock_bh(&tp->lock);
5470 }
5471
5472 /* One-shot MSI handler - Chip automatically disables interrupt
5473  * after sending MSI so driver doesn't have to do it.
5474  */
5475 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5476 {
5477         struct tg3_napi *tnapi = dev_id;
5478         struct tg3 *tp = tnapi->tp;
5479
5480         prefetch(tnapi->hw_status);
5481         if (tnapi->rx_rcb)
5482                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5483
5484         if (likely(!tg3_irq_sync(tp)))
5485                 napi_schedule(&tnapi->napi);
5486
5487         return IRQ_HANDLED;
5488 }
5489
5490 /* MSI ISR - No need to check for interrupt sharing and no need to
5491  * flush status block and interrupt mailbox. PCI ordering rules
5492  * guarantee that MSI will arrive after the status block.
5493  */
5494 static irqreturn_t tg3_msi(int irq, void *dev_id)
5495 {
5496         struct tg3_napi *tnapi = dev_id;
5497         struct tg3 *tp = tnapi->tp;
5498
5499         prefetch(tnapi->hw_status);
5500         if (tnapi->rx_rcb)
5501                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5502         /*
5503          * Writing any value to intr-mbox-0 clears PCI INTA# and
5504          * chip-internal interrupt pending events.
5505          * Writing non-zero to intr-mbox-0 additional tells the
5506          * NIC to stop sending us irqs, engaging "in-intr-handler"
5507          * event coalescing.
5508          */
5509         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5510         if (likely(!tg3_irq_sync(tp)))
5511                 napi_schedule(&tnapi->napi);
5512
5513         return IRQ_RETVAL(1);
5514 }
5515
5516 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5517 {
5518         struct tg3_napi *tnapi = dev_id;
5519         struct tg3 *tp = tnapi->tp;
5520         struct tg3_hw_status *sblk = tnapi->hw_status;
5521         unsigned int handled = 1;
5522
5523         /* In INTx mode, it is possible for the interrupt to arrive at
5524          * the CPU before the status block posted prior to the interrupt.
5525          * Reading the PCI State register will confirm whether the
5526          * interrupt is ours and will flush the status block.
5527          */
5528         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5529                 if (tg3_flag(tp, CHIP_RESETTING) ||
5530                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5531                         handled = 0;
5532                         goto out;
5533                 }
5534         }
5535
5536         /*
5537          * Writing any value to intr-mbox-0 clears PCI INTA# and
5538          * chip-internal interrupt pending events.
5539          * Writing non-zero to intr-mbox-0 additional tells the
5540          * NIC to stop sending us irqs, engaging "in-intr-handler"
5541          * event coalescing.
5542          *
5543          * Flush the mailbox to de-assert the IRQ immediately to prevent
5544          * spurious interrupts.  The flush impacts performance but
5545          * excessive spurious interrupts can be worse in some cases.
5546          */
5547         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5548         if (tg3_irq_sync(tp))
5549                 goto out;
5550         sblk->status &= ~SD_STATUS_UPDATED;
5551         if (likely(tg3_has_work(tnapi))) {
5552                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5553                 napi_schedule(&tnapi->napi);
5554         } else {
5555                 /* No work, shared interrupt perhaps?  re-enable
5556                  * interrupts, and flush that PCI write
5557                  */
5558                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5559                                0x00000000);
5560         }
5561 out:
5562         return IRQ_RETVAL(handled);
5563 }
5564
5565 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5566 {
5567         struct tg3_napi *tnapi = dev_id;
5568         struct tg3 *tp = tnapi->tp;
5569         struct tg3_hw_status *sblk = tnapi->hw_status;
5570         unsigned int handled = 1;
5571
5572         /* In INTx mode, it is possible for the interrupt to arrive at
5573          * the CPU before the status block posted prior to the interrupt.
5574          * Reading the PCI State register will confirm whether the
5575          * interrupt is ours and will flush the status block.
5576          */
5577         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5578                 if (tg3_flag(tp, CHIP_RESETTING) ||
5579                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5580                         handled = 0;
5581                         goto out;
5582                 }
5583         }
5584
5585         /*
5586          * writing any value to intr-mbox-0 clears PCI INTA# and
5587          * chip-internal interrupt pending events.
5588          * writing non-zero to intr-mbox-0 additional tells the
5589          * NIC to stop sending us irqs, engaging "in-intr-handler"
5590          * event coalescing.
5591          *
5592          * Flush the mailbox to de-assert the IRQ immediately to prevent
5593          * spurious interrupts.  The flush impacts performance but
5594          * excessive spurious interrupts can be worse in some cases.
5595          */
5596         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5597
5598         /*
5599          * In a shared interrupt configuration, sometimes other devices'
5600          * interrupts will scream.  We record the current status tag here
5601          * so that the above check can report that the screaming interrupts
5602          * are unhandled.  Eventually they will be silenced.
5603          */
5604         tnapi->last_irq_tag = sblk->status_tag;
5605
5606         if (tg3_irq_sync(tp))
5607                 goto out;
5608
5609         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5610
5611         napi_schedule(&tnapi->napi);
5612
5613 out:
5614         return IRQ_RETVAL(handled);
5615 }
5616
5617 /* ISR for interrupt test */
5618 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5619 {
5620         struct tg3_napi *tnapi = dev_id;
5621         struct tg3 *tp = tnapi->tp;
5622         struct tg3_hw_status *sblk = tnapi->hw_status;
5623
5624         if ((sblk->status & SD_STATUS_UPDATED) ||
5625             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5626                 tg3_disable_ints(tp);
5627                 return IRQ_RETVAL(1);
5628         }
5629         return IRQ_RETVAL(0);
5630 }
5631
5632 static int tg3_init_hw(struct tg3 *, int);
5633 static int tg3_halt(struct tg3 *, int, int);
5634
5635 /* Restart hardware after configuration changes, self-test, etc.
5636  * Invoked with tp->lock held.
5637  */
5638 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5639         __releases(tp->lock)
5640         __acquires(tp->lock)
5641 {
5642         int err;
5643
5644         err = tg3_init_hw(tp, reset_phy);
5645         if (err) {
5646                 netdev_err(tp->dev,
5647                            "Failed to re-initialize device, aborting\n");
5648                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5649                 tg3_full_unlock(tp);
5650                 del_timer_sync(&tp->timer);
5651                 tp->irq_sync = 0;
5652                 tg3_napi_enable(tp);
5653                 dev_close(tp->dev);
5654                 tg3_full_lock(tp, 0);
5655         }
5656         return err;
5657 }
5658
5659 #ifdef CONFIG_NET_POLL_CONTROLLER
5660 static void tg3_poll_controller(struct net_device *dev)
5661 {
5662         int i;
5663         struct tg3 *tp = netdev_priv(dev);
5664
5665         if (tg3_irq_sync(tp))
5666                 return;
5667
5668         for (i = 0; i < tp->irq_cnt; i++)
5669                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5670 }
5671 #endif
5672
5673 static void tg3_reset_task(struct work_struct *work)
5674 {
5675         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5676         int err;
5677         unsigned int restart_timer;
5678
5679         tg3_full_lock(tp, 0);
5680
5681         if (!netif_running(tp->dev)) {
5682                 tg3_full_unlock(tp);
5683                 return;
5684         }
5685
5686         tg3_full_unlock(tp);
5687
5688         tg3_phy_stop(tp);
5689
5690         tg3_netif_stop(tp);
5691
5692         tg3_full_lock(tp, 1);
5693
5694         restart_timer = tg3_flag(tp, RESTART_TIMER);
5695         tg3_flag_clear(tp, RESTART_TIMER);
5696
5697         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5698                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5699                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5700                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5701                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5702         }
5703
5704         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5705         err = tg3_init_hw(tp, 1);
5706         if (err)
5707                 goto out;
5708
5709         tg3_netif_start(tp);
5710
5711         if (restart_timer)
5712                 mod_timer(&tp->timer, jiffies + 1);
5713
5714 out:
5715         tg3_full_unlock(tp);
5716
5717         if (!err)
5718                 tg3_phy_start(tp);
5719 }
5720
5721 static void tg3_tx_timeout(struct net_device *dev)
5722 {
5723         struct tg3 *tp = netdev_priv(dev);
5724
5725         if (netif_msg_tx_err(tp)) {
5726                 netdev_err(dev, "transmit timed out, resetting\n");
5727                 tg3_dump_state(tp);
5728         }
5729
5730         schedule_work(&tp->reset_task);
5731 }
5732
5733 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5734 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5735 {
5736         u32 base = (u32) mapping & 0xffffffff;
5737
5738         return (base > 0xffffdcc0) && (base + len + 8 < base);
5739 }
5740
5741 /* Test for DMA addresses > 40-bit */
5742 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5743                                           int len)
5744 {
5745 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5746         if (tg3_flag(tp, 40BIT_DMA_BUG))
5747                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5748         return 0;
5749 #else
5750         return 0;
5751 #endif
5752 }
5753
5754 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5755                         dma_addr_t mapping, int len, u32 flags,
5756                         u32 mss_and_is_end)
5757 {
5758         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5759         int is_end = (mss_and_is_end & 0x1);
5760         u32 mss = (mss_and_is_end >> 1);
5761         u32 vlan_tag = 0;
5762
5763         if (is_end)
5764                 flags |= TXD_FLAG_END;
5765         if (flags & TXD_FLAG_VLAN) {
5766                 vlan_tag = flags >> 16;
5767                 flags &= 0xffff;
5768         }
5769         vlan_tag |= (mss << TXD_MSS_SHIFT);
5770
5771         txd->addr_hi = ((u64) mapping >> 32);
5772         txd->addr_lo = ((u64) mapping & 0xffffffff);
5773         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5774         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5775 }
5776
5777 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5778                                 struct sk_buff *skb, int last)
5779 {
5780         int i;
5781         u32 entry = tnapi->tx_prod;
5782         struct ring_info *txb = &tnapi->tx_buffers[entry];
5783
5784         pci_unmap_single(tnapi->tp->pdev,
5785                          dma_unmap_addr(txb, mapping),
5786                          skb_headlen(skb),
5787                          PCI_DMA_TODEVICE);
5788         for (i = 0; i < last; i++) {
5789                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5790
5791                 entry = NEXT_TX(entry);
5792                 txb = &tnapi->tx_buffers[entry];
5793
5794                 pci_unmap_page(tnapi->tp->pdev,
5795                                dma_unmap_addr(txb, mapping),
5796                                frag->size, PCI_DMA_TODEVICE);
5797         }
5798 }
5799
5800 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5801 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5802                                        struct sk_buff *skb,
5803                                        u32 base_flags, u32 mss)
5804 {
5805         struct tg3 *tp = tnapi->tp;
5806         struct sk_buff *new_skb;
5807         dma_addr_t new_addr = 0;
5808         u32 entry = tnapi->tx_prod;
5809         int ret = 0;
5810
5811         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5812                 new_skb = skb_copy(skb, GFP_ATOMIC);
5813         else {
5814                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5815
5816                 new_skb = skb_copy_expand(skb,
5817                                           skb_headroom(skb) + more_headroom,
5818                                           skb_tailroom(skb), GFP_ATOMIC);
5819         }
5820
5821         if (!new_skb) {
5822                 ret = -1;
5823         } else {
5824                 /* New SKB is guaranteed to be linear. */
5825                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5826                                           PCI_DMA_TODEVICE);
5827                 /* Make sure the mapping succeeded */
5828                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5829                         ret = -1;
5830                         dev_kfree_skb(new_skb);
5831
5832                 /* Make sure new skb does not cross any 4G boundaries.
5833                  * Drop the packet if it does.
5834                  */
5835                 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5836                            tg3_4g_overflow_test(new_addr, new_skb->len)) {
5837                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5838                                          PCI_DMA_TODEVICE);
5839                         ret = -1;
5840                         dev_kfree_skb(new_skb);
5841                 } else {
5842                         tnapi->tx_buffers[entry].skb = new_skb;
5843                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5844                                            mapping, new_addr);
5845
5846                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5847                                     base_flags, 1 | (mss << 1));
5848                 }
5849         }
5850
5851         dev_kfree_skb(skb);
5852
5853         return ret;
5854 }
5855
5856 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5857
5858 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5859  * TSO header is greater than 80 bytes.
5860  */
5861 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5862 {
5863         struct sk_buff *segs, *nskb;
5864         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5865
5866         /* Estimate the number of fragments in the worst case */
5867         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5868                 netif_stop_queue(tp->dev);
5869
5870                 /* netif_tx_stop_queue() must be done before checking
5871                  * checking tx index in tg3_tx_avail() below, because in
5872                  * tg3_tx(), we update tx index before checking for
5873                  * netif_tx_queue_stopped().
5874                  */
5875                 smp_mb();
5876                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5877                         return NETDEV_TX_BUSY;
5878
5879                 netif_wake_queue(tp->dev);
5880         }
5881
5882         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5883         if (IS_ERR(segs))
5884                 goto tg3_tso_bug_end;
5885
5886         do {
5887                 nskb = segs;
5888                 segs = segs->next;
5889                 nskb->next = NULL;
5890                 tg3_start_xmit(nskb, tp->dev);
5891         } while (segs);
5892
5893 tg3_tso_bug_end:
5894         dev_kfree_skb(skb);
5895
5896         return NETDEV_TX_OK;
5897 }
5898
5899 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5900  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5901  */
5902 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5903 {
5904         struct tg3 *tp = netdev_priv(dev);
5905         u32 len, entry, base_flags, mss;
5906         int i = -1, would_hit_hwbug;
5907         dma_addr_t mapping;
5908         struct tg3_napi *tnapi;
5909         struct netdev_queue *txq;
5910         unsigned int last;
5911
5912         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5913         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5914         if (tg3_flag(tp, ENABLE_TSS))
5915                 tnapi++;
5916
5917         /* We are running in BH disabled context with netif_tx_lock
5918          * and TX reclaim runs via tp->napi.poll inside of a software
5919          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5920          * no IRQ context deadlocks to worry about either.  Rejoice!
5921          */
5922         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5923                 if (!netif_tx_queue_stopped(txq)) {
5924                         netif_tx_stop_queue(txq);
5925
5926                         /* This is a hard error, log it. */
5927                         netdev_err(dev,
5928                                    "BUG! Tx Ring full when queue awake!\n");
5929                 }
5930                 return NETDEV_TX_BUSY;
5931         }
5932
5933         entry = tnapi->tx_prod;
5934         base_flags = 0;
5935         if (skb->ip_summed == CHECKSUM_PARTIAL)
5936                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5937
5938         mss = skb_shinfo(skb)->gso_size;
5939         if (mss) {
5940                 struct iphdr *iph;
5941                 u32 tcp_opt_len, hdr_len;
5942
5943                 if (skb_header_cloned(skb) &&
5944                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5945                         dev_kfree_skb(skb);
5946                         goto out_unlock;
5947                 }
5948
5949                 iph = ip_hdr(skb);
5950                 tcp_opt_len = tcp_optlen(skb);
5951
5952                 if (skb_is_gso_v6(skb)) {
5953                         hdr_len = skb_headlen(skb) - ETH_HLEN;
5954                 } else {
5955                         u32 ip_tcp_len;
5956
5957                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5958                         hdr_len = ip_tcp_len + tcp_opt_len;
5959
5960                         iph->check = 0;
5961                         iph->tot_len = htons(mss + hdr_len);
5962                 }
5963
5964                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5965                     tg3_flag(tp, TSO_BUG))
5966                         return tg3_tso_bug(tp, skb);
5967
5968                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5969                                TXD_FLAG_CPU_POST_DMA);
5970
5971                 if (tg3_flag(tp, HW_TSO_1) ||
5972                     tg3_flag(tp, HW_TSO_2) ||
5973                     tg3_flag(tp, HW_TSO_3)) {
5974                         tcp_hdr(skb)->check = 0;
5975                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5976                 } else
5977                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5978                                                                  iph->daddr, 0,
5979                                                                  IPPROTO_TCP,
5980                                                                  0);
5981
5982                 if (tg3_flag(tp, HW_TSO_3)) {
5983                         mss |= (hdr_len & 0xc) << 12;
5984                         if (hdr_len & 0x10)
5985                                 base_flags |= 0x00000010;
5986                         base_flags |= (hdr_len & 0x3e0) << 5;
5987                 } else if (tg3_flag(tp, HW_TSO_2))
5988                         mss |= hdr_len << 9;
5989                 else if (tg3_flag(tp, HW_TSO_1) ||
5990                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5991                         if (tcp_opt_len || iph->ihl > 5) {
5992                                 int tsflags;
5993
5994                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5995                                 mss |= (tsflags << 11);
5996                         }
5997                 } else {
5998                         if (tcp_opt_len || iph->ihl > 5) {
5999                                 int tsflags;
6000
6001                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6002                                 base_flags |= tsflags << 12;
6003                         }
6004                 }
6005         }
6006
6007         if (vlan_tx_tag_present(skb))
6008                 base_flags |= (TXD_FLAG_VLAN |
6009                                (vlan_tx_tag_get(skb) << 16));
6010
6011         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6012             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6013                 base_flags |= TXD_FLAG_JMB_PKT;
6014
6015         len = skb_headlen(skb);
6016
6017         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6018         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6019                 dev_kfree_skb(skb);
6020                 goto out_unlock;
6021         }
6022
6023         tnapi->tx_buffers[entry].skb = skb;
6024         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6025
6026         would_hit_hwbug = 0;
6027
6028         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6029                 would_hit_hwbug = 1;
6030
6031         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6032             tg3_4g_overflow_test(mapping, len))
6033                 would_hit_hwbug = 1;
6034
6035         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6036             tg3_40bit_overflow_test(tp, mapping, len))
6037                 would_hit_hwbug = 1;
6038
6039         if (tg3_flag(tp, 5701_DMA_BUG))
6040                 would_hit_hwbug = 1;
6041
6042         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6043                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6044
6045         entry = NEXT_TX(entry);
6046
6047         /* Now loop through additional data fragments, and queue them. */
6048         if (skb_shinfo(skb)->nr_frags > 0) {
6049                 last = skb_shinfo(skb)->nr_frags - 1;
6050                 for (i = 0; i <= last; i++) {
6051                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6052
6053                         len = frag->size;
6054                         mapping = pci_map_page(tp->pdev,
6055                                                frag->page,
6056                                                frag->page_offset,
6057                                                len, PCI_DMA_TODEVICE);
6058
6059                         tnapi->tx_buffers[entry].skb = NULL;
6060                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6061                                            mapping);
6062                         if (pci_dma_mapping_error(tp->pdev, mapping))
6063                                 goto dma_error;
6064
6065                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6066                             len <= 8)
6067                                 would_hit_hwbug = 1;
6068
6069                         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6070                             tg3_4g_overflow_test(mapping, len))
6071                                 would_hit_hwbug = 1;
6072
6073                         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6074                             tg3_40bit_overflow_test(tp, mapping, len))
6075                                 would_hit_hwbug = 1;
6076
6077                         if (tg3_flag(tp, HW_TSO_1) ||
6078                             tg3_flag(tp, HW_TSO_2) ||
6079                             tg3_flag(tp, HW_TSO_3))
6080                                 tg3_set_txd(tnapi, entry, mapping, len,
6081                                             base_flags, (i == last)|(mss << 1));
6082                         else
6083                                 tg3_set_txd(tnapi, entry, mapping, len,
6084                                             base_flags, (i == last));
6085
6086                         entry = NEXT_TX(entry);
6087                 }
6088         }
6089
6090         if (would_hit_hwbug) {
6091                 tg3_skb_error_unmap(tnapi, skb, i);
6092
6093                 /* If the workaround fails due to memory/mapping
6094                  * failure, silently drop this packet.
6095                  */
6096                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6097                         goto out_unlock;
6098
6099                 entry = NEXT_TX(tnapi->tx_prod);
6100         }
6101
6102         /* Packets are ready, update Tx producer idx local and on card. */
6103         tw32_tx_mbox(tnapi->prodmbox, entry);
6104
6105         tnapi->tx_prod = entry;
6106         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6107                 netif_tx_stop_queue(txq);
6108
6109                 /* netif_tx_stop_queue() must be done before checking
6110                  * checking tx index in tg3_tx_avail() below, because in
6111                  * tg3_tx(), we update tx index before checking for
6112                  * netif_tx_queue_stopped().
6113                  */
6114                 smp_mb();
6115                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6116                         netif_tx_wake_queue(txq);
6117         }
6118
6119 out_unlock:
6120         mmiowb();
6121
6122         return NETDEV_TX_OK;
6123
6124 dma_error:
6125         tg3_skb_error_unmap(tnapi, skb, i);
6126         dev_kfree_skb(skb);
6127         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6128         return NETDEV_TX_OK;
6129 }
6130
6131 static void tg3_set_loopback(struct net_device *dev, u32 features)
6132 {
6133         struct tg3 *tp = netdev_priv(dev);
6134
6135         if (features & NETIF_F_LOOPBACK) {
6136                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6137                         return;
6138
6139                 /*
6140                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6141                  * loopback mode if Half-Duplex mode was negotiated earlier.
6142                  */
6143                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6144
6145                 /* Enable internal MAC loopback mode */
6146                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6147                 spin_lock_bh(&tp->lock);
6148                 tw32(MAC_MODE, tp->mac_mode);
6149                 netif_carrier_on(tp->dev);
6150                 spin_unlock_bh(&tp->lock);
6151                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6152         } else {
6153                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6154                         return;
6155
6156                 /* Disable internal MAC loopback mode */
6157                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6158                 spin_lock_bh(&tp->lock);
6159                 tw32(MAC_MODE, tp->mac_mode);
6160                 /* Force link status check */
6161                 tg3_setup_phy(tp, 1);
6162                 spin_unlock_bh(&tp->lock);
6163                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6164         }
6165 }
6166
6167 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6168 {
6169         struct tg3 *tp = netdev_priv(dev);
6170
6171         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6172                 features &= ~NETIF_F_ALL_TSO;
6173
6174         return features;
6175 }
6176
6177 static int tg3_set_features(struct net_device *dev, u32 features)
6178 {
6179         u32 changed = dev->features ^ features;
6180
6181         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6182                 tg3_set_loopback(dev, features);
6183
6184         return 0;
6185 }
6186
6187 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6188                                int new_mtu)
6189 {
6190         dev->mtu = new_mtu;
6191
6192         if (new_mtu > ETH_DATA_LEN) {
6193                 if (tg3_flag(tp, 5780_CLASS)) {
6194                         netdev_update_features(dev);
6195                         tg3_flag_clear(tp, TSO_CAPABLE);
6196                 } else {
6197                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6198                 }
6199         } else {
6200                 if (tg3_flag(tp, 5780_CLASS)) {
6201                         tg3_flag_set(tp, TSO_CAPABLE);
6202                         netdev_update_features(dev);
6203                 }
6204                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6205         }
6206 }
6207
6208 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6209 {
6210         struct tg3 *tp = netdev_priv(dev);
6211         int err;
6212
6213         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6214                 return -EINVAL;
6215
6216         if (!netif_running(dev)) {
6217                 /* We'll just catch it later when the
6218                  * device is up'd.
6219                  */
6220                 tg3_set_mtu(dev, tp, new_mtu);
6221                 return 0;
6222         }
6223
6224         tg3_phy_stop(tp);
6225
6226         tg3_netif_stop(tp);
6227
6228         tg3_full_lock(tp, 1);
6229
6230         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6231
6232         tg3_set_mtu(dev, tp, new_mtu);
6233
6234         err = tg3_restart_hw(tp, 0);
6235
6236         if (!err)
6237                 tg3_netif_start(tp);
6238
6239         tg3_full_unlock(tp);
6240
6241         if (!err)
6242                 tg3_phy_start(tp);
6243
6244         return err;
6245 }
6246
6247 static void tg3_rx_prodring_free(struct tg3 *tp,
6248                                  struct tg3_rx_prodring_set *tpr)
6249 {
6250         int i;
6251
6252         if (tpr != &tp->napi[0].prodring) {
6253                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6254                      i = (i + 1) & tp->rx_std_ring_mask)
6255                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6256                                         tp->rx_pkt_map_sz);
6257
6258                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6259                         for (i = tpr->rx_jmb_cons_idx;
6260                              i != tpr->rx_jmb_prod_idx;
6261                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6262                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6263                                                 TG3_RX_JMB_MAP_SZ);
6264                         }
6265                 }
6266
6267                 return;
6268         }
6269
6270         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6271                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6272                                 tp->rx_pkt_map_sz);
6273
6274         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6275                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6276                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6277                                         TG3_RX_JMB_MAP_SZ);
6278         }
6279 }
6280
6281 /* Initialize rx rings for packet processing.
6282  *
6283  * The chip has been shut down and the driver detached from
6284  * the networking, so no interrupts or new tx packets will
6285  * end up in the driver.  tp->{tx,}lock are held and thus
6286  * we may not sleep.
6287  */
6288 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6289                                  struct tg3_rx_prodring_set *tpr)
6290 {
6291         u32 i, rx_pkt_dma_sz;
6292
6293         tpr->rx_std_cons_idx = 0;
6294         tpr->rx_std_prod_idx = 0;
6295         tpr->rx_jmb_cons_idx = 0;
6296         tpr->rx_jmb_prod_idx = 0;
6297
6298         if (tpr != &tp->napi[0].prodring) {
6299                 memset(&tpr->rx_std_buffers[0], 0,
6300                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6301                 if (tpr->rx_jmb_buffers)
6302                         memset(&tpr->rx_jmb_buffers[0], 0,
6303                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6304                 goto done;
6305         }
6306
6307         /* Zero out all descriptors. */
6308         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6309
6310         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6311         if (tg3_flag(tp, 5780_CLASS) &&
6312             tp->dev->mtu > ETH_DATA_LEN)
6313                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6314         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6315
6316         /* Initialize invariants of the rings, we only set this
6317          * stuff once.  This works because the card does not
6318          * write into the rx buffer posting rings.
6319          */
6320         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6321                 struct tg3_rx_buffer_desc *rxd;
6322
6323                 rxd = &tpr->rx_std[i];
6324                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6325                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6326                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6327                                (i << RXD_OPAQUE_INDEX_SHIFT));
6328         }
6329
6330         /* Now allocate fresh SKBs for each rx ring. */
6331         for (i = 0; i < tp->rx_pending; i++) {
6332                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6333                         netdev_warn(tp->dev,
6334                                     "Using a smaller RX standard ring. Only "
6335                                     "%d out of %d buffers were allocated "
6336                                     "successfully\n", i, tp->rx_pending);
6337                         if (i == 0)
6338                                 goto initfail;
6339                         tp->rx_pending = i;
6340                         break;
6341                 }
6342         }
6343
6344         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6345                 goto done;
6346
6347         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6348
6349         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6350                 goto done;
6351
6352         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6353                 struct tg3_rx_buffer_desc *rxd;
6354
6355                 rxd = &tpr->rx_jmb[i].std;
6356                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6357                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6358                                   RXD_FLAG_JUMBO;
6359                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6360                        (i << RXD_OPAQUE_INDEX_SHIFT));
6361         }
6362
6363         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6364                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6365                         netdev_warn(tp->dev,
6366                                     "Using a smaller RX jumbo ring. Only %d "
6367                                     "out of %d buffers were allocated "
6368                                     "successfully\n", i, tp->rx_jumbo_pending);
6369                         if (i == 0)
6370                                 goto initfail;
6371                         tp->rx_jumbo_pending = i;
6372                         break;
6373                 }
6374         }
6375
6376 done:
6377         return 0;
6378
6379 initfail:
6380         tg3_rx_prodring_free(tp, tpr);
6381         return -ENOMEM;
6382 }
6383
6384 static void tg3_rx_prodring_fini(struct tg3 *tp,
6385                                  struct tg3_rx_prodring_set *tpr)
6386 {
6387         kfree(tpr->rx_std_buffers);
6388         tpr->rx_std_buffers = NULL;
6389         kfree(tpr->rx_jmb_buffers);
6390         tpr->rx_jmb_buffers = NULL;
6391         if (tpr->rx_std) {
6392                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6393                                   tpr->rx_std, tpr->rx_std_mapping);
6394                 tpr->rx_std = NULL;
6395         }
6396         if (tpr->rx_jmb) {
6397                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6398                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6399                 tpr->rx_jmb = NULL;
6400         }
6401 }
6402
6403 static int tg3_rx_prodring_init(struct tg3 *tp,
6404                                 struct tg3_rx_prodring_set *tpr)
6405 {
6406         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6407                                       GFP_KERNEL);
6408         if (!tpr->rx_std_buffers)
6409                 return -ENOMEM;
6410
6411         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6412                                          TG3_RX_STD_RING_BYTES(tp),
6413                                          &tpr->rx_std_mapping,
6414                                          GFP_KERNEL);
6415         if (!tpr->rx_std)
6416                 goto err_out;
6417
6418         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6419                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6420                                               GFP_KERNEL);
6421                 if (!tpr->rx_jmb_buffers)
6422                         goto err_out;
6423
6424                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6425                                                  TG3_RX_JMB_RING_BYTES(tp),
6426                                                  &tpr->rx_jmb_mapping,
6427                                                  GFP_KERNEL);
6428                 if (!tpr->rx_jmb)
6429                         goto err_out;
6430         }
6431
6432         return 0;
6433
6434 err_out:
6435         tg3_rx_prodring_fini(tp, tpr);
6436         return -ENOMEM;
6437 }
6438
6439 /* Free up pending packets in all rx/tx rings.
6440  *
6441  * The chip has been shut down and the driver detached from
6442  * the networking, so no interrupts or new tx packets will
6443  * end up in the driver.  tp->{tx,}lock is not held and we are not
6444  * in an interrupt context and thus may sleep.
6445  */
6446 static void tg3_free_rings(struct tg3 *tp)
6447 {
6448         int i, j;
6449
6450         for (j = 0; j < tp->irq_cnt; j++) {
6451                 struct tg3_napi *tnapi = &tp->napi[j];
6452
6453                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6454
6455                 if (!tnapi->tx_buffers)
6456                         continue;
6457
6458                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6459                         struct ring_info *txp;
6460                         struct sk_buff *skb;
6461                         unsigned int k;
6462
6463                         txp = &tnapi->tx_buffers[i];
6464                         skb = txp->skb;
6465
6466                         if (skb == NULL) {
6467                                 i++;
6468                                 continue;
6469                         }
6470
6471                         pci_unmap_single(tp->pdev,
6472                                          dma_unmap_addr(txp, mapping),
6473                                          skb_headlen(skb),
6474                                          PCI_DMA_TODEVICE);
6475                         txp->skb = NULL;
6476
6477                         i++;
6478
6479                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6480                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6481                                 pci_unmap_page(tp->pdev,
6482                                                dma_unmap_addr(txp, mapping),
6483                                                skb_shinfo(skb)->frags[k].size,
6484                                                PCI_DMA_TODEVICE);
6485                                 i++;
6486                         }
6487
6488                         dev_kfree_skb_any(skb);
6489                 }
6490         }
6491 }
6492
6493 /* Initialize tx/rx rings for packet processing.
6494  *
6495  * The chip has been shut down and the driver detached from
6496  * the networking, so no interrupts or new tx packets will
6497  * end up in the driver.  tp->{tx,}lock are held and thus
6498  * we may not sleep.
6499  */
6500 static int tg3_init_rings(struct tg3 *tp)
6501 {
6502         int i;
6503
6504         /* Free up all the SKBs. */
6505         tg3_free_rings(tp);
6506
6507         for (i = 0; i < tp->irq_cnt; i++) {
6508                 struct tg3_napi *tnapi = &tp->napi[i];
6509
6510                 tnapi->last_tag = 0;
6511                 tnapi->last_irq_tag = 0;
6512                 tnapi->hw_status->status = 0;
6513                 tnapi->hw_status->status_tag = 0;
6514                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6515
6516                 tnapi->tx_prod = 0;
6517                 tnapi->tx_cons = 0;
6518                 if (tnapi->tx_ring)
6519                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6520
6521                 tnapi->rx_rcb_ptr = 0;
6522                 if (tnapi->rx_rcb)
6523                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6524
6525                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6526                         tg3_free_rings(tp);
6527                         return -ENOMEM;
6528                 }
6529         }
6530
6531         return 0;
6532 }
6533
6534 /*
6535  * Must not be invoked with interrupt sources disabled and
6536  * the hardware shutdown down.
6537  */
6538 static void tg3_free_consistent(struct tg3 *tp)
6539 {
6540         int i;
6541
6542         for (i = 0; i < tp->irq_cnt; i++) {
6543                 struct tg3_napi *tnapi = &tp->napi[i];
6544
6545                 if (tnapi->tx_ring) {
6546                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6547                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6548                         tnapi->tx_ring = NULL;
6549                 }
6550
6551                 kfree(tnapi->tx_buffers);
6552                 tnapi->tx_buffers = NULL;
6553
6554                 if (tnapi->rx_rcb) {
6555                         dma_free_coherent(&tp->pdev->dev,
6556                                           TG3_RX_RCB_RING_BYTES(tp),
6557                                           tnapi->rx_rcb,
6558                                           tnapi->rx_rcb_mapping);
6559                         tnapi->rx_rcb = NULL;
6560                 }
6561
6562                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6563
6564                 if (tnapi->hw_status) {
6565                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6566                                           tnapi->hw_status,
6567                                           tnapi->status_mapping);
6568                         tnapi->hw_status = NULL;
6569                 }
6570         }
6571
6572         if (tp->hw_stats) {
6573                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6574                                   tp->hw_stats, tp->stats_mapping);
6575                 tp->hw_stats = NULL;
6576         }
6577 }
6578
6579 /*
6580  * Must not be invoked with interrupt sources disabled and
6581  * the hardware shutdown down.  Can sleep.
6582  */
6583 static int tg3_alloc_consistent(struct tg3 *tp)
6584 {
6585         int i;
6586
6587         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6588                                           sizeof(struct tg3_hw_stats),
6589                                           &tp->stats_mapping,
6590                                           GFP_KERNEL);
6591         if (!tp->hw_stats)
6592                 goto err_out;
6593
6594         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6595
6596         for (i = 0; i < tp->irq_cnt; i++) {
6597                 struct tg3_napi *tnapi = &tp->napi[i];
6598                 struct tg3_hw_status *sblk;
6599
6600                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6601                                                       TG3_HW_STATUS_SIZE,
6602                                                       &tnapi->status_mapping,
6603                                                       GFP_KERNEL);
6604                 if (!tnapi->hw_status)
6605                         goto err_out;
6606
6607                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6608                 sblk = tnapi->hw_status;
6609
6610                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6611                         goto err_out;
6612
6613                 /* If multivector TSS is enabled, vector 0 does not handle
6614                  * tx interrupts.  Don't allocate any resources for it.
6615                  */
6616                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6617                     (i && tg3_flag(tp, ENABLE_TSS))) {
6618                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6619                                                     TG3_TX_RING_SIZE,
6620                                                     GFP_KERNEL);
6621                         if (!tnapi->tx_buffers)
6622                                 goto err_out;
6623
6624                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6625                                                             TG3_TX_RING_BYTES,
6626                                                         &tnapi->tx_desc_mapping,
6627                                                             GFP_KERNEL);
6628                         if (!tnapi->tx_ring)
6629                                 goto err_out;
6630                 }
6631
6632                 /*
6633                  * When RSS is enabled, the status block format changes
6634                  * slightly.  The "rx_jumbo_consumer", "reserved",
6635                  * and "rx_mini_consumer" members get mapped to the
6636                  * other three rx return ring producer indexes.
6637                  */
6638                 switch (i) {
6639                 default:
6640                         if (tg3_flag(tp, ENABLE_RSS)) {
6641                                 tnapi->rx_rcb_prod_idx = NULL;
6642                                 break;
6643                         }
6644                         /* Fall through */
6645                 case 1:
6646                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6647                         break;
6648                 case 2:
6649                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6650                         break;
6651                 case 3:
6652                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6653                         break;
6654                 case 4:
6655                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6656                         break;
6657                 }
6658
6659                 /*
6660                  * If multivector RSS is enabled, vector 0 does not handle
6661                  * rx or tx interrupts.  Don't allocate any resources for it.
6662                  */
6663                 if (!i && tg3_flag(tp, ENABLE_RSS))
6664                         continue;
6665
6666                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6667                                                    TG3_RX_RCB_RING_BYTES(tp),
6668                                                    &tnapi->rx_rcb_mapping,
6669                                                    GFP_KERNEL);
6670                 if (!tnapi->rx_rcb)
6671                         goto err_out;
6672
6673                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6674         }
6675
6676         return 0;
6677
6678 err_out:
6679         tg3_free_consistent(tp);
6680         return -ENOMEM;
6681 }
6682
6683 #define MAX_WAIT_CNT 1000
6684
6685 /* To stop a block, clear the enable bit and poll till it
6686  * clears.  tp->lock is held.
6687  */
6688 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6689 {
6690         unsigned int i;
6691         u32 val;
6692
6693         if (tg3_flag(tp, 5705_PLUS)) {
6694                 switch (ofs) {
6695                 case RCVLSC_MODE:
6696                 case DMAC_MODE:
6697                 case MBFREE_MODE:
6698                 case BUFMGR_MODE:
6699                 case MEMARB_MODE:
6700                         /* We can't enable/disable these bits of the
6701                          * 5705/5750, just say success.
6702                          */
6703                         return 0;
6704
6705                 default:
6706                         break;
6707                 }
6708         }
6709
6710         val = tr32(ofs);
6711         val &= ~enable_bit;
6712         tw32_f(ofs, val);
6713
6714         for (i = 0; i < MAX_WAIT_CNT; i++) {
6715                 udelay(100);
6716                 val = tr32(ofs);
6717                 if ((val & enable_bit) == 0)
6718                         break;
6719         }
6720
6721         if (i == MAX_WAIT_CNT && !silent) {
6722                 dev_err(&tp->pdev->dev,
6723                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6724                         ofs, enable_bit);
6725                 return -ENODEV;
6726         }
6727
6728         return 0;
6729 }
6730
6731 /* tp->lock is held. */
6732 static int tg3_abort_hw(struct tg3 *tp, int silent)
6733 {
6734         int i, err;
6735
6736         tg3_disable_ints(tp);
6737
6738         tp->rx_mode &= ~RX_MODE_ENABLE;
6739         tw32_f(MAC_RX_MODE, tp->rx_mode);
6740         udelay(10);
6741
6742         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6743         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6744         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6745         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6746         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6747         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6748
6749         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6750         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6751         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6752         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6753         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6754         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6755         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6756
6757         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6758         tw32_f(MAC_MODE, tp->mac_mode);
6759         udelay(40);
6760
6761         tp->tx_mode &= ~TX_MODE_ENABLE;
6762         tw32_f(MAC_TX_MODE, tp->tx_mode);
6763
6764         for (i = 0; i < MAX_WAIT_CNT; i++) {
6765                 udelay(100);
6766                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6767                         break;
6768         }
6769         if (i >= MAX_WAIT_CNT) {
6770                 dev_err(&tp->pdev->dev,
6771                         "%s timed out, TX_MODE_ENABLE will not clear "
6772                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6773                 err |= -ENODEV;
6774         }
6775
6776         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6777         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6778         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6779
6780         tw32(FTQ_RESET, 0xffffffff);
6781         tw32(FTQ_RESET, 0x00000000);
6782
6783         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6784         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6785
6786         for (i = 0; i < tp->irq_cnt; i++) {
6787                 struct tg3_napi *tnapi = &tp->napi[i];
6788                 if (tnapi->hw_status)
6789                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6790         }
6791         if (tp->hw_stats)
6792                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6793
6794         return err;
6795 }
6796
6797 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6798 {
6799         int i;
6800         u32 apedata;
6801
6802         /* NCSI does not support APE events */
6803         if (tg3_flag(tp, APE_HAS_NCSI))
6804                 return;
6805
6806         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6807         if (apedata != APE_SEG_SIG_MAGIC)
6808                 return;
6809
6810         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6811         if (!(apedata & APE_FW_STATUS_READY))
6812                 return;
6813
6814         /* Wait for up to 1 millisecond for APE to service previous event. */
6815         for (i = 0; i < 10; i++) {
6816                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6817                         return;
6818
6819                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6820
6821                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6822                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6823                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6824
6825                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6826
6827                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6828                         break;
6829
6830                 udelay(100);
6831         }
6832
6833         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6834                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6835 }
6836
6837 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6838 {
6839         u32 event;
6840         u32 apedata;
6841
6842         if (!tg3_flag(tp, ENABLE_APE))
6843                 return;
6844
6845         switch (kind) {
6846         case RESET_KIND_INIT:
6847                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6848                                 APE_HOST_SEG_SIG_MAGIC);
6849                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6850                                 APE_HOST_SEG_LEN_MAGIC);
6851                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6852                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6853                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6854                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6855                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6856                                 APE_HOST_BEHAV_NO_PHYLOCK);
6857                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6858                                     TG3_APE_HOST_DRVR_STATE_START);
6859
6860                 event = APE_EVENT_STATUS_STATE_START;
6861                 break;
6862         case RESET_KIND_SHUTDOWN:
6863                 /* With the interface we are currently using,
6864                  * APE does not track driver state.  Wiping
6865                  * out the HOST SEGMENT SIGNATURE forces
6866                  * the APE to assume OS absent status.
6867                  */
6868                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6869
6870                 if (device_may_wakeup(&tp->pdev->dev) &&
6871                     tg3_flag(tp, WOL_ENABLE)) {
6872                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6873                                             TG3_APE_HOST_WOL_SPEED_AUTO);
6874                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6875                 } else
6876                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6877
6878                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6879
6880                 event = APE_EVENT_STATUS_STATE_UNLOAD;
6881                 break;
6882         case RESET_KIND_SUSPEND:
6883                 event = APE_EVENT_STATUS_STATE_SUSPEND;
6884                 break;
6885         default:
6886                 return;
6887         }
6888
6889         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6890
6891         tg3_ape_send_event(tp, event);
6892 }
6893
6894 /* tp->lock is held. */
6895 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6896 {
6897         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6898                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6899
6900         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6901                 switch (kind) {
6902                 case RESET_KIND_INIT:
6903                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6904                                       DRV_STATE_START);
6905                         break;
6906
6907                 case RESET_KIND_SHUTDOWN:
6908                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6909                                       DRV_STATE_UNLOAD);
6910                         break;
6911
6912                 case RESET_KIND_SUSPEND:
6913                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6914                                       DRV_STATE_SUSPEND);
6915                         break;
6916
6917                 default:
6918                         break;
6919                 }
6920         }
6921
6922         if (kind == RESET_KIND_INIT ||
6923             kind == RESET_KIND_SUSPEND)
6924                 tg3_ape_driver_state_change(tp, kind);
6925 }
6926
6927 /* tp->lock is held. */
6928 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6929 {
6930         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6931                 switch (kind) {
6932                 case RESET_KIND_INIT:
6933                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6934                                       DRV_STATE_START_DONE);
6935                         break;
6936
6937                 case RESET_KIND_SHUTDOWN:
6938                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6939                                       DRV_STATE_UNLOAD_DONE);
6940                         break;
6941
6942                 default:
6943                         break;
6944                 }
6945         }
6946
6947         if (kind == RESET_KIND_SHUTDOWN)
6948                 tg3_ape_driver_state_change(tp, kind);
6949 }
6950
6951 /* tp->lock is held. */
6952 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6953 {
6954         if (tg3_flag(tp, ENABLE_ASF)) {
6955                 switch (kind) {
6956                 case RESET_KIND_INIT:
6957                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6958                                       DRV_STATE_START);
6959                         break;
6960
6961                 case RESET_KIND_SHUTDOWN:
6962                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6963                                       DRV_STATE_UNLOAD);
6964                         break;
6965
6966                 case RESET_KIND_SUSPEND:
6967                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6968                                       DRV_STATE_SUSPEND);
6969                         break;
6970
6971                 default:
6972                         break;
6973                 }
6974         }
6975 }
6976
6977 static int tg3_poll_fw(struct tg3 *tp)
6978 {
6979         int i;
6980         u32 val;
6981
6982         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6983                 /* Wait up to 20ms for init done. */
6984                 for (i = 0; i < 200; i++) {
6985                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6986                                 return 0;
6987                         udelay(100);
6988                 }
6989                 return -ENODEV;
6990         }
6991
6992         /* Wait for firmware initialization to complete. */
6993         for (i = 0; i < 100000; i++) {
6994                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6995                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6996                         break;
6997                 udelay(10);
6998         }
6999
7000         /* Chip might not be fitted with firmware.  Some Sun onboard
7001          * parts are configured like that.  So don't signal the timeout
7002          * of the above loop as an error, but do report the lack of
7003          * running firmware once.
7004          */
7005         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7006                 tg3_flag_set(tp, NO_FWARE_REPORTED);
7007
7008                 netdev_info(tp->dev, "No firmware running\n");
7009         }
7010
7011         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7012                 /* The 57765 A0 needs a little more
7013                  * time to do some important work.
7014                  */
7015                 mdelay(10);
7016         }
7017
7018         return 0;
7019 }
7020
7021 /* Save PCI command register before chip reset */
7022 static void tg3_save_pci_state(struct tg3 *tp)
7023 {
7024         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7025 }
7026
7027 /* Restore PCI state after chip reset */
7028 static void tg3_restore_pci_state(struct tg3 *tp)
7029 {
7030         u32 val;
7031
7032         /* Re-enable indirect register accesses. */
7033         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7034                                tp->misc_host_ctrl);
7035
7036         /* Set MAX PCI retry to zero. */
7037         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7038         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7039             tg3_flag(tp, PCIX_MODE))
7040                 val |= PCISTATE_RETRY_SAME_DMA;
7041         /* Allow reads and writes to the APE register and memory space. */
7042         if (tg3_flag(tp, ENABLE_APE))
7043                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7044                        PCISTATE_ALLOW_APE_SHMEM_WR |
7045                        PCISTATE_ALLOW_APE_PSPACE_WR;
7046         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7047
7048         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7049
7050         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7051                 if (tg3_flag(tp, PCI_EXPRESS))
7052                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7053                 else {
7054                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7055                                               tp->pci_cacheline_sz);
7056                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7057                                               tp->pci_lat_timer);
7058                 }
7059         }
7060
7061         /* Make sure PCI-X relaxed ordering bit is clear. */
7062         if (tg3_flag(tp, PCIX_MODE)) {
7063                 u16 pcix_cmd;
7064
7065                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7066                                      &pcix_cmd);
7067                 pcix_cmd &= ~PCI_X_CMD_ERO;
7068                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7069                                       pcix_cmd);
7070         }
7071
7072         if (tg3_flag(tp, 5780_CLASS)) {
7073
7074                 /* Chip reset on 5780 will reset MSI enable bit,
7075                  * so need to restore it.
7076                  */
7077                 if (tg3_flag(tp, USING_MSI)) {
7078                         u16 ctrl;
7079
7080                         pci_read_config_word(tp->pdev,
7081                                              tp->msi_cap + PCI_MSI_FLAGS,
7082                                              &ctrl);
7083                         pci_write_config_word(tp->pdev,
7084                                               tp->msi_cap + PCI_MSI_FLAGS,
7085                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7086                         val = tr32(MSGINT_MODE);
7087                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7088                 }
7089         }
7090 }
7091
7092 static void tg3_stop_fw(struct tg3 *);
7093
7094 /* tp->lock is held. */
7095 static int tg3_chip_reset(struct tg3 *tp)
7096 {
7097         u32 val;
7098         void (*write_op)(struct tg3 *, u32, u32);
7099         int i, err;
7100
7101         tg3_nvram_lock(tp);
7102
7103         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7104
7105         /* No matching tg3_nvram_unlock() after this because
7106          * chip reset below will undo the nvram lock.
7107          */
7108         tp->nvram_lock_cnt = 0;
7109
7110         /* GRC_MISC_CFG core clock reset will clear the memory
7111          * enable bit in PCI register 4 and the MSI enable bit
7112          * on some chips, so we save relevant registers here.
7113          */
7114         tg3_save_pci_state(tp);
7115
7116         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7117             tg3_flag(tp, 5755_PLUS))
7118                 tw32(GRC_FASTBOOT_PC, 0);
7119
7120         /*
7121          * We must avoid the readl() that normally takes place.
7122          * It locks machines, causes machine checks, and other
7123          * fun things.  So, temporarily disable the 5701
7124          * hardware workaround, while we do the reset.
7125          */
7126         write_op = tp->write32;
7127         if (write_op == tg3_write_flush_reg32)
7128                 tp->write32 = tg3_write32;
7129
7130         /* Prevent the irq handler from reading or writing PCI registers
7131          * during chip reset when the memory enable bit in the PCI command
7132          * register may be cleared.  The chip does not generate interrupt
7133          * at this time, but the irq handler may still be called due to irq
7134          * sharing or irqpoll.
7135          */
7136         tg3_flag_set(tp, CHIP_RESETTING);
7137         for (i = 0; i < tp->irq_cnt; i++) {
7138                 struct tg3_napi *tnapi = &tp->napi[i];
7139                 if (tnapi->hw_status) {
7140                         tnapi->hw_status->status = 0;
7141                         tnapi->hw_status->status_tag = 0;
7142                 }
7143                 tnapi->last_tag = 0;
7144                 tnapi->last_irq_tag = 0;
7145         }
7146         smp_mb();
7147
7148         for (i = 0; i < tp->irq_cnt; i++)
7149                 synchronize_irq(tp->napi[i].irq_vec);
7150
7151         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7152                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7153                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7154         }
7155
7156         /* do the reset */
7157         val = GRC_MISC_CFG_CORECLK_RESET;
7158
7159         if (tg3_flag(tp, PCI_EXPRESS)) {
7160                 /* Force PCIe 1.0a mode */
7161                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7162                     !tg3_flag(tp, 57765_PLUS) &&
7163                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7164                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7165                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7166
7167                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7168                         tw32(GRC_MISC_CFG, (1 << 29));
7169                         val |= (1 << 29);
7170                 }
7171         }
7172
7173         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7174                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7175                 tw32(GRC_VCPU_EXT_CTRL,
7176                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7177         }
7178
7179         /* Manage gphy power for all CPMU absent PCIe devices. */
7180         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7181                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7182
7183         tw32(GRC_MISC_CFG, val);
7184
7185         /* restore 5701 hardware bug workaround write method */
7186         tp->write32 = write_op;
7187
7188         /* Unfortunately, we have to delay before the PCI read back.
7189          * Some 575X chips even will not respond to a PCI cfg access
7190          * when the reset command is given to the chip.
7191          *
7192          * How do these hardware designers expect things to work
7193          * properly if the PCI write is posted for a long period
7194          * of time?  It is always necessary to have some method by
7195          * which a register read back can occur to push the write
7196          * out which does the reset.
7197          *
7198          * For most tg3 variants the trick below was working.
7199          * Ho hum...
7200          */
7201         udelay(120);
7202
7203         /* Flush PCI posted writes.  The normal MMIO registers
7204          * are inaccessible at this time so this is the only
7205          * way to make this reliably (actually, this is no longer
7206          * the case, see above).  I tried to use indirect
7207          * register read/write but this upset some 5701 variants.
7208          */
7209         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7210
7211         udelay(120);
7212
7213         if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7214                 u16 val16;
7215
7216                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7217                         int i;
7218                         u32 cfg_val;
7219
7220                         /* Wait for link training to complete.  */
7221                         for (i = 0; i < 5000; i++)
7222                                 udelay(100);
7223
7224                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7225                         pci_write_config_dword(tp->pdev, 0xc4,
7226                                                cfg_val | (1 << 15));
7227                 }
7228
7229                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7230                 pci_read_config_word(tp->pdev,
7231                                      tp->pcie_cap + PCI_EXP_DEVCTL,
7232                                      &val16);
7233                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7234                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7235                 /*
7236                  * Older PCIe devices only support the 128 byte
7237                  * MPS setting.  Enforce the restriction.
7238                  */
7239                 if (!tg3_flag(tp, CPMU_PRESENT))
7240                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7241                 pci_write_config_word(tp->pdev,
7242                                       tp->pcie_cap + PCI_EXP_DEVCTL,
7243                                       val16);
7244
7245                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7246
7247                 /* Clear error status */
7248                 pci_write_config_word(tp->pdev,
7249                                       tp->pcie_cap + PCI_EXP_DEVSTA,
7250                                       PCI_EXP_DEVSTA_CED |
7251                                       PCI_EXP_DEVSTA_NFED |
7252                                       PCI_EXP_DEVSTA_FED |
7253                                       PCI_EXP_DEVSTA_URD);
7254         }
7255
7256         tg3_restore_pci_state(tp);
7257
7258         tg3_flag_clear(tp, CHIP_RESETTING);
7259         tg3_flag_clear(tp, ERROR_PROCESSED);
7260
7261         val = 0;
7262         if (tg3_flag(tp, 5780_CLASS))
7263                 val = tr32(MEMARB_MODE);
7264         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7265
7266         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7267                 tg3_stop_fw(tp);
7268                 tw32(0x5000, 0x400);
7269         }
7270
7271         tw32(GRC_MODE, tp->grc_mode);
7272
7273         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7274                 val = tr32(0xc4);
7275
7276                 tw32(0xc4, val | (1 << 15));
7277         }
7278
7279         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7280             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7281                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7282                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7283                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7284                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7285         }
7286
7287         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7288                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7289                 val = tp->mac_mode;
7290         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7291                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7292                 val = tp->mac_mode;
7293         } else
7294                 val = 0;
7295
7296         tw32_f(MAC_MODE, val);
7297         udelay(40);
7298
7299         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7300
7301         err = tg3_poll_fw(tp);
7302         if (err)
7303                 return err;
7304
7305         tg3_mdio_start(tp);
7306
7307         if (tg3_flag(tp, PCI_EXPRESS) &&
7308             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7309             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7310             !tg3_flag(tp, 57765_PLUS)) {
7311                 val = tr32(0x7c00);
7312
7313                 tw32(0x7c00, val | (1 << 25));
7314         }
7315
7316         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7317                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7318                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7319         }
7320
7321         /* Reprobe ASF enable state.  */
7322         tg3_flag_clear(tp, ENABLE_ASF);
7323         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7324         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7325         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7326                 u32 nic_cfg;
7327
7328                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7329                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7330                         tg3_flag_set(tp, ENABLE_ASF);
7331                         tp->last_event_jiffies = jiffies;
7332                         if (tg3_flag(tp, 5750_PLUS))
7333                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7334                 }
7335         }
7336
7337         return 0;
7338 }
7339
7340 /* tp->lock is held. */
7341 static void tg3_stop_fw(struct tg3 *tp)
7342 {
7343         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7344                 /* Wait for RX cpu to ACK the previous event. */
7345                 tg3_wait_for_event_ack(tp);
7346
7347                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7348
7349                 tg3_generate_fw_event(tp);
7350
7351                 /* Wait for RX cpu to ACK this event. */
7352                 tg3_wait_for_event_ack(tp);
7353         }
7354 }
7355
7356 /* tp->lock is held. */
7357 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7358 {
7359         int err;
7360
7361         tg3_stop_fw(tp);
7362
7363         tg3_write_sig_pre_reset(tp, kind);
7364
7365         tg3_abort_hw(tp, silent);
7366         err = tg3_chip_reset(tp);
7367
7368         __tg3_set_mac_addr(tp, 0);
7369
7370         tg3_write_sig_legacy(tp, kind);
7371         tg3_write_sig_post_reset(tp, kind);
7372
7373         if (err)
7374                 return err;
7375
7376         return 0;
7377 }
7378
7379 #define RX_CPU_SCRATCH_BASE     0x30000
7380 #define RX_CPU_SCRATCH_SIZE     0x04000
7381 #define TX_CPU_SCRATCH_BASE     0x34000
7382 #define TX_CPU_SCRATCH_SIZE     0x04000
7383
7384 /* tp->lock is held. */
7385 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7386 {
7387         int i;
7388
7389         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7390
7391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7392                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7393
7394                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7395                 return 0;
7396         }
7397         if (offset == RX_CPU_BASE) {
7398                 for (i = 0; i < 10000; i++) {
7399                         tw32(offset + CPU_STATE, 0xffffffff);
7400                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7401                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7402                                 break;
7403                 }
7404
7405                 tw32(offset + CPU_STATE, 0xffffffff);
7406                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7407                 udelay(10);
7408         } else {
7409                 for (i = 0; i < 10000; i++) {
7410                         tw32(offset + CPU_STATE, 0xffffffff);
7411                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7412                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7413                                 break;
7414                 }
7415         }
7416
7417         if (i >= 10000) {
7418                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7419                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7420                 return -ENODEV;
7421         }
7422
7423         /* Clear firmware's nvram arbitration. */
7424         if (tg3_flag(tp, NVRAM))
7425                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7426         return 0;
7427 }
7428
7429 struct fw_info {
7430         unsigned int fw_base;
7431         unsigned int fw_len;
7432         const __be32 *fw_data;
7433 };
7434
7435 /* tp->lock is held. */
7436 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7437                                  int cpu_scratch_size, struct fw_info *info)
7438 {
7439         int err, lock_err, i;
7440         void (*write_op)(struct tg3 *, u32, u32);
7441
7442         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7443                 netdev_err(tp->dev,
7444                            "%s: Trying to load TX cpu firmware which is 5705\n",
7445                            __func__);
7446                 return -EINVAL;
7447         }
7448
7449         if (tg3_flag(tp, 5705_PLUS))
7450                 write_op = tg3_write_mem;
7451         else
7452                 write_op = tg3_write_indirect_reg32;
7453
7454         /* It is possible that bootcode is still loading at this point.
7455          * Get the nvram lock first before halting the cpu.
7456          */
7457         lock_err = tg3_nvram_lock(tp);
7458         err = tg3_halt_cpu(tp, cpu_base);
7459         if (!lock_err)
7460                 tg3_nvram_unlock(tp);
7461         if (err)
7462                 goto out;
7463
7464         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7465                 write_op(tp, cpu_scratch_base + i, 0);
7466         tw32(cpu_base + CPU_STATE, 0xffffffff);
7467         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7468         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7469                 write_op(tp, (cpu_scratch_base +
7470                               (info->fw_base & 0xffff) +
7471                               (i * sizeof(u32))),
7472                               be32_to_cpu(info->fw_data[i]));
7473
7474         err = 0;
7475
7476 out:
7477         return err;
7478 }
7479
7480 /* tp->lock is held. */
7481 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7482 {
7483         struct fw_info info;
7484         const __be32 *fw_data;
7485         int err, i;
7486
7487         fw_data = (void *)tp->fw->data;
7488
7489         /* Firmware blob starts with version numbers, followed by
7490            start address and length. We are setting complete length.
7491            length = end_address_of_bss - start_address_of_text.
7492            Remainder is the blob to be loaded contiguously
7493            from start address. */
7494
7495         info.fw_base = be32_to_cpu(fw_data[1]);
7496         info.fw_len = tp->fw->size - 12;
7497         info.fw_data = &fw_data[3];
7498
7499         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7500                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7501                                     &info);
7502         if (err)
7503                 return err;
7504
7505         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7506                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7507                                     &info);
7508         if (err)
7509                 return err;
7510
7511         /* Now startup only the RX cpu. */
7512         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7513         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7514
7515         for (i = 0; i < 5; i++) {
7516                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7517                         break;
7518                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7519                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7520                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7521                 udelay(1000);
7522         }
7523         if (i >= 5) {
7524                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7525                            "should be %08x\n", __func__,
7526                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7527                 return -ENODEV;
7528         }
7529         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7530         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7531
7532         return 0;
7533 }
7534
7535 /* tp->lock is held. */
7536 static int tg3_load_tso_firmware(struct tg3 *tp)
7537 {
7538         struct fw_info info;
7539         const __be32 *fw_data;
7540         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7541         int err, i;
7542
7543         if (tg3_flag(tp, HW_TSO_1) ||
7544             tg3_flag(tp, HW_TSO_2) ||
7545             tg3_flag(tp, HW_TSO_3))
7546                 return 0;
7547
7548         fw_data = (void *)tp->fw->data;
7549
7550         /* Firmware blob starts with version numbers, followed by
7551            start address and length. We are setting complete length.
7552            length = end_address_of_bss - start_address_of_text.
7553            Remainder is the blob to be loaded contiguously
7554            from start address. */
7555
7556         info.fw_base = be32_to_cpu(fw_data[1]);
7557         cpu_scratch_size = tp->fw_len;
7558         info.fw_len = tp->fw->size - 12;
7559         info.fw_data = &fw_data[3];
7560
7561         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7562                 cpu_base = RX_CPU_BASE;
7563                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7564         } else {
7565                 cpu_base = TX_CPU_BASE;
7566                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7567                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7568         }
7569
7570         err = tg3_load_firmware_cpu(tp, cpu_base,
7571                                     cpu_scratch_base, cpu_scratch_size,
7572                                     &info);
7573         if (err)
7574                 return err;
7575
7576         /* Now startup the cpu. */
7577         tw32(cpu_base + CPU_STATE, 0xffffffff);
7578         tw32_f(cpu_base + CPU_PC, info.fw_base);
7579
7580         for (i = 0; i < 5; i++) {
7581                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7582                         break;
7583                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7584                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7585                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7586                 udelay(1000);
7587         }
7588         if (i >= 5) {
7589                 netdev_err(tp->dev,
7590                            "%s fails to set CPU PC, is %08x should be %08x\n",
7591                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7592                 return -ENODEV;
7593         }
7594         tw32(cpu_base + CPU_STATE, 0xffffffff);
7595         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7596         return 0;
7597 }
7598
7599
7600 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7601 {
7602         struct tg3 *tp = netdev_priv(dev);
7603         struct sockaddr *addr = p;
7604         int err = 0, skip_mac_1 = 0;
7605
7606         if (!is_valid_ether_addr(addr->sa_data))
7607                 return -EINVAL;
7608
7609         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7610
7611         if (!netif_running(dev))
7612                 return 0;
7613
7614         if (tg3_flag(tp, ENABLE_ASF)) {
7615                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7616
7617                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7618                 addr0_low = tr32(MAC_ADDR_0_LOW);
7619                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7620                 addr1_low = tr32(MAC_ADDR_1_LOW);
7621
7622                 /* Skip MAC addr 1 if ASF is using it. */
7623                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7624                     !(addr1_high == 0 && addr1_low == 0))
7625                         skip_mac_1 = 1;
7626         }
7627         spin_lock_bh(&tp->lock);
7628         __tg3_set_mac_addr(tp, skip_mac_1);
7629         spin_unlock_bh(&tp->lock);
7630
7631         return err;
7632 }
7633
7634 /* tp->lock is held. */
7635 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7636                            dma_addr_t mapping, u32 maxlen_flags,
7637                            u32 nic_addr)
7638 {
7639         tg3_write_mem(tp,
7640                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7641                       ((u64) mapping >> 32));
7642         tg3_write_mem(tp,
7643                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7644                       ((u64) mapping & 0xffffffff));
7645         tg3_write_mem(tp,
7646                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7647                        maxlen_flags);
7648
7649         if (!tg3_flag(tp, 5705_PLUS))
7650                 tg3_write_mem(tp,
7651                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7652                               nic_addr);
7653 }
7654
7655 static void __tg3_set_rx_mode(struct net_device *);
7656 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7657 {
7658         int i;
7659
7660         if (!tg3_flag(tp, ENABLE_TSS)) {
7661                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7662                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7663                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7664         } else {
7665                 tw32(HOSTCC_TXCOL_TICKS, 0);
7666                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7667                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7668         }
7669
7670         if (!tg3_flag(tp, ENABLE_RSS)) {
7671                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7672                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7673                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7674         } else {
7675                 tw32(HOSTCC_RXCOL_TICKS, 0);
7676                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7677                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7678         }
7679
7680         if (!tg3_flag(tp, 5705_PLUS)) {
7681                 u32 val = ec->stats_block_coalesce_usecs;
7682
7683                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7684                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7685
7686                 if (!netif_carrier_ok(tp->dev))
7687                         val = 0;
7688
7689                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7690         }
7691
7692         for (i = 0; i < tp->irq_cnt - 1; i++) {
7693                 u32 reg;
7694
7695                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7696                 tw32(reg, ec->rx_coalesce_usecs);
7697                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7698                 tw32(reg, ec->rx_max_coalesced_frames);
7699                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7700                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7701
7702                 if (tg3_flag(tp, ENABLE_TSS)) {
7703                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7704                         tw32(reg, ec->tx_coalesce_usecs);
7705                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7706                         tw32(reg, ec->tx_max_coalesced_frames);
7707                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7708                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7709                 }
7710         }
7711
7712         for (; i < tp->irq_max - 1; i++) {
7713                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7714                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7715                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7716
7717                 if (tg3_flag(tp, ENABLE_TSS)) {
7718                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7719                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7720                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7721                 }
7722         }
7723 }
7724
7725 /* tp->lock is held. */
7726 static void tg3_rings_reset(struct tg3 *tp)
7727 {
7728         int i;
7729         u32 stblk, txrcb, rxrcb, limit;
7730         struct tg3_napi *tnapi = &tp->napi[0];
7731
7732         /* Disable all transmit rings but the first. */
7733         if (!tg3_flag(tp, 5705_PLUS))
7734                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7735         else if (tg3_flag(tp, 5717_PLUS))
7736                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7737         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7738                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7739         else
7740                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7741
7742         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7743              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7744                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7745                               BDINFO_FLAGS_DISABLED);
7746
7747
7748         /* Disable all receive return rings but the first. */
7749         if (tg3_flag(tp, 5717_PLUS))
7750                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7751         else if (!tg3_flag(tp, 5705_PLUS))
7752                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7753         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7754                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7755                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7756         else
7757                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7758
7759         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7760              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7761                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7762                               BDINFO_FLAGS_DISABLED);
7763
7764         /* Disable interrupts */
7765         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7766
7767         /* Zero mailbox registers. */
7768         if (tg3_flag(tp, SUPPORT_MSIX)) {
7769                 for (i = 1; i < tp->irq_max; i++) {
7770                         tp->napi[i].tx_prod = 0;
7771                         tp->napi[i].tx_cons = 0;
7772                         if (tg3_flag(tp, ENABLE_TSS))
7773                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7774                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7775                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7776                 }
7777                 if (!tg3_flag(tp, ENABLE_TSS))
7778                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7779         } else {
7780                 tp->napi[0].tx_prod = 0;
7781                 tp->napi[0].tx_cons = 0;
7782                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7783                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7784         }
7785
7786         /* Make sure the NIC-based send BD rings are disabled. */
7787         if (!tg3_flag(tp, 5705_PLUS)) {
7788                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7789                 for (i = 0; i < 16; i++)
7790                         tw32_tx_mbox(mbox + i * 8, 0);
7791         }
7792
7793         txrcb = NIC_SRAM_SEND_RCB;
7794         rxrcb = NIC_SRAM_RCV_RET_RCB;
7795
7796         /* Clear status block in ram. */
7797         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7798
7799         /* Set status block DMA address */
7800         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7801              ((u64) tnapi->status_mapping >> 32));
7802         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7803              ((u64) tnapi->status_mapping & 0xffffffff));
7804
7805         if (tnapi->tx_ring) {
7806                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7807                                (TG3_TX_RING_SIZE <<
7808                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7809                                NIC_SRAM_TX_BUFFER_DESC);
7810                 txrcb += TG3_BDINFO_SIZE;
7811         }
7812
7813         if (tnapi->rx_rcb) {
7814                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7815                                (tp->rx_ret_ring_mask + 1) <<
7816                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7817                 rxrcb += TG3_BDINFO_SIZE;
7818         }
7819
7820         stblk = HOSTCC_STATBLCK_RING1;
7821
7822         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7823                 u64 mapping = (u64)tnapi->status_mapping;
7824                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7825                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7826
7827                 /* Clear status block in ram. */
7828                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7829
7830                 if (tnapi->tx_ring) {
7831                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7832                                        (TG3_TX_RING_SIZE <<
7833                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7834                                        NIC_SRAM_TX_BUFFER_DESC);
7835                         txrcb += TG3_BDINFO_SIZE;
7836                 }
7837
7838                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7839                                ((tp->rx_ret_ring_mask + 1) <<
7840                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7841
7842                 stblk += 8;
7843                 rxrcb += TG3_BDINFO_SIZE;
7844         }
7845 }
7846
7847 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7848 {
7849         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7850
7851         if (!tg3_flag(tp, 5750_PLUS) ||
7852             tg3_flag(tp, 5780_CLASS) ||
7853             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7854             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7855                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7856         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7857                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7858                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7859         else
7860                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7861
7862         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7863         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7864
7865         val = min(nic_rep_thresh, host_rep_thresh);
7866         tw32(RCVBDI_STD_THRESH, val);
7867
7868         if (tg3_flag(tp, 57765_PLUS))
7869                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7870
7871         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7872                 return;
7873
7874         if (!tg3_flag(tp, 5705_PLUS))
7875                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7876         else
7877                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7878
7879         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7880
7881         val = min(bdcache_maxcnt / 2, host_rep_thresh);
7882         tw32(RCVBDI_JUMBO_THRESH, val);
7883
7884         if (tg3_flag(tp, 57765_PLUS))
7885                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7886 }
7887
7888 /* tp->lock is held. */
7889 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7890 {
7891         u32 val, rdmac_mode;
7892         int i, err, limit;
7893         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7894
7895         tg3_disable_ints(tp);
7896
7897         tg3_stop_fw(tp);
7898
7899         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7900
7901         if (tg3_flag(tp, INIT_COMPLETE))
7902                 tg3_abort_hw(tp, 1);
7903
7904         /* Enable MAC control of LPI */
7905         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7906                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7907                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7908                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
7909
7910                 tw32_f(TG3_CPMU_EEE_CTRL,
7911                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7912
7913                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7914                       TG3_CPMU_EEEMD_LPI_IN_TX |
7915                       TG3_CPMU_EEEMD_LPI_IN_RX |
7916                       TG3_CPMU_EEEMD_EEE_ENABLE;
7917
7918                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7919                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7920
7921                 if (tg3_flag(tp, ENABLE_APE))
7922                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7923
7924                 tw32_f(TG3_CPMU_EEE_MODE, val);
7925
7926                 tw32_f(TG3_CPMU_EEE_DBTMR1,
7927                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7928                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7929
7930                 tw32_f(TG3_CPMU_EEE_DBTMR2,
7931                        TG3_CPMU_DBTMR2_APE_TX_2047US |
7932                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7933         }
7934
7935         if (reset_phy)
7936                 tg3_phy_reset(tp);
7937
7938         err = tg3_chip_reset(tp);
7939         if (err)
7940                 return err;
7941
7942         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7943
7944         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7945                 val = tr32(TG3_CPMU_CTRL);
7946                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7947                 tw32(TG3_CPMU_CTRL, val);
7948
7949                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7950                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7951                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7952                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7953
7954                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7955                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7956                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7957                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7958
7959                 val = tr32(TG3_CPMU_HST_ACC);
7960                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7961                 val |= CPMU_HST_ACC_MACCLK_6_25;
7962                 tw32(TG3_CPMU_HST_ACC, val);
7963         }
7964
7965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7966                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7967                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7968                        PCIE_PWR_MGMT_L1_THRESH_4MS;
7969                 tw32(PCIE_PWR_MGMT_THRESH, val);
7970
7971                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7972                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7973
7974                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7975
7976                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7977                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7978         }
7979
7980         if (tg3_flag(tp, L1PLLPD_EN)) {
7981                 u32 grc_mode = tr32(GRC_MODE);
7982
7983                 /* Access the lower 1K of PL PCIE block registers. */
7984                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7985                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7986
7987                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7988                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7989                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7990
7991                 tw32(GRC_MODE, grc_mode);
7992         }
7993
7994         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7995                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7996                         u32 grc_mode = tr32(GRC_MODE);
7997
7998                         /* Access the lower 1K of PL PCIE block registers. */
7999                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8000                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8001
8002                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8003                                    TG3_PCIE_PL_LO_PHYCTL5);
8004                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8005                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8006
8007                         tw32(GRC_MODE, grc_mode);
8008                 }
8009
8010                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8011                         u32 grc_mode = tr32(GRC_MODE);
8012
8013                         /* Access the lower 1K of DL PCIE block registers. */
8014                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8015                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8016
8017                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8018                                    TG3_PCIE_DL_LO_FTSMAX);
8019                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8020                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8021                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8022
8023                         tw32(GRC_MODE, grc_mode);
8024                 }
8025
8026                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8027                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8028                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8029                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8030         }
8031
8032         /* This works around an issue with Athlon chipsets on
8033          * B3 tigon3 silicon.  This bit has no effect on any
8034          * other revision.  But do not set this on PCI Express
8035          * chips and don't even touch the clocks if the CPMU is present.
8036          */
8037         if (!tg3_flag(tp, CPMU_PRESENT)) {
8038                 if (!tg3_flag(tp, PCI_EXPRESS))
8039                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8040                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8041         }
8042
8043         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8044             tg3_flag(tp, PCIX_MODE)) {
8045                 val = tr32(TG3PCI_PCISTATE);
8046                 val |= PCISTATE_RETRY_SAME_DMA;
8047                 tw32(TG3PCI_PCISTATE, val);
8048         }
8049
8050         if (tg3_flag(tp, ENABLE_APE)) {
8051                 /* Allow reads and writes to the
8052                  * APE register and memory space.
8053                  */
8054                 val = tr32(TG3PCI_PCISTATE);
8055                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8056                        PCISTATE_ALLOW_APE_SHMEM_WR |
8057                        PCISTATE_ALLOW_APE_PSPACE_WR;
8058                 tw32(TG3PCI_PCISTATE, val);
8059         }
8060
8061         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8062                 /* Enable some hw fixes.  */
8063                 val = tr32(TG3PCI_MSI_DATA);
8064                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8065                 tw32(TG3PCI_MSI_DATA, val);
8066         }
8067
8068         /* Descriptor ring init may make accesses to the
8069          * NIC SRAM area to setup the TX descriptors, so we
8070          * can only do this after the hardware has been
8071          * successfully reset.
8072          */
8073         err = tg3_init_rings(tp);
8074         if (err)
8075                 return err;
8076
8077         if (tg3_flag(tp, 57765_PLUS)) {
8078                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8079                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8080                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8081                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8082                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8083                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8084                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8085                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8086         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8087                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8088                 /* This value is determined during the probe time DMA
8089                  * engine test, tg3_test_dma.
8090                  */
8091                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8092         }
8093
8094         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8095                           GRC_MODE_4X_NIC_SEND_RINGS |
8096                           GRC_MODE_NO_TX_PHDR_CSUM |
8097                           GRC_MODE_NO_RX_PHDR_CSUM);
8098         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8099
8100         /* Pseudo-header checksum is done by hardware logic and not
8101          * the offload processers, so make the chip do the pseudo-
8102          * header checksums on receive.  For transmit it is more
8103          * convenient to do the pseudo-header checksum in software
8104          * as Linux does that on transmit for us in all cases.
8105          */
8106         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8107
8108         tw32(GRC_MODE,
8109              tp->grc_mode |
8110              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8111
8112         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8113         val = tr32(GRC_MISC_CFG);
8114         val &= ~0xff;
8115         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8116         tw32(GRC_MISC_CFG, val);
8117
8118         /* Initialize MBUF/DESC pool. */
8119         if (tg3_flag(tp, 5750_PLUS)) {
8120                 /* Do nothing.  */
8121         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8122                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8123                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8124                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8125                 else
8126                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8127                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8128                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8129         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8130                 int fw_len;
8131
8132                 fw_len = tp->fw_len;
8133                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8134                 tw32(BUFMGR_MB_POOL_ADDR,
8135                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8136                 tw32(BUFMGR_MB_POOL_SIZE,
8137                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8138         }
8139
8140         if (tp->dev->mtu <= ETH_DATA_LEN) {
8141                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8142                      tp->bufmgr_config.mbuf_read_dma_low_water);
8143                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8144                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8145                 tw32(BUFMGR_MB_HIGH_WATER,
8146                      tp->bufmgr_config.mbuf_high_water);
8147         } else {
8148                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8149                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8150                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8151                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8152                 tw32(BUFMGR_MB_HIGH_WATER,
8153                      tp->bufmgr_config.mbuf_high_water_jumbo);
8154         }
8155         tw32(BUFMGR_DMA_LOW_WATER,
8156              tp->bufmgr_config.dma_low_water);
8157         tw32(BUFMGR_DMA_HIGH_WATER,
8158              tp->bufmgr_config.dma_high_water);
8159
8160         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8161         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8162                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8164             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8165             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8166                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8167         tw32(BUFMGR_MODE, val);
8168         for (i = 0; i < 2000; i++) {
8169                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8170                         break;
8171                 udelay(10);
8172         }
8173         if (i >= 2000) {
8174                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8175                 return -ENODEV;
8176         }
8177
8178         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8179                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8180
8181         tg3_setup_rxbd_thresholds(tp);
8182
8183         /* Initialize TG3_BDINFO's at:
8184          *  RCVDBDI_STD_BD:     standard eth size rx ring
8185          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8186          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8187          *
8188          * like so:
8189          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8190          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8191          *                              ring attribute flags
8192          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8193          *
8194          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8195          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8196          *
8197          * The size of each ring is fixed in the firmware, but the location is
8198          * configurable.
8199          */
8200         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8201              ((u64) tpr->rx_std_mapping >> 32));
8202         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8203              ((u64) tpr->rx_std_mapping & 0xffffffff));
8204         if (!tg3_flag(tp, 5717_PLUS))
8205                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8206                      NIC_SRAM_RX_BUFFER_DESC);
8207
8208         /* Disable the mini ring */
8209         if (!tg3_flag(tp, 5705_PLUS))
8210                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8211                      BDINFO_FLAGS_DISABLED);
8212
8213         /* Program the jumbo buffer descriptor ring control
8214          * blocks on those devices that have them.
8215          */
8216         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8217             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8218
8219                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8220                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8221                              ((u64) tpr->rx_jmb_mapping >> 32));
8222                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8223                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8224                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8225                               BDINFO_FLAGS_MAXLEN_SHIFT;
8226                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8227                              val | BDINFO_FLAGS_USE_EXT_RECV);
8228                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8229                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8230                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8231                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8232                 } else {
8233                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8234                              BDINFO_FLAGS_DISABLED);
8235                 }
8236
8237                 if (tg3_flag(tp, 57765_PLUS)) {
8238                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8239                                 val = TG3_RX_STD_MAX_SIZE_5700;
8240                         else
8241                                 val = TG3_RX_STD_MAX_SIZE_5717;
8242                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8243                         val |= (TG3_RX_STD_DMA_SZ << 2);
8244                 } else
8245                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8246         } else
8247                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8248
8249         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8250
8251         tpr->rx_std_prod_idx = tp->rx_pending;
8252         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8253
8254         tpr->rx_jmb_prod_idx =
8255                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8256         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8257
8258         tg3_rings_reset(tp);
8259
8260         /* Initialize MAC address and backoff seed. */
8261         __tg3_set_mac_addr(tp, 0);
8262
8263         /* MTU + ethernet header + FCS + optional VLAN tag */
8264         tw32(MAC_RX_MTU_SIZE,
8265              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8266
8267         /* The slot time is changed by tg3_setup_phy if we
8268          * run at gigabit with half duplex.
8269          */
8270         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8271               (6 << TX_LENGTHS_IPG_SHIFT) |
8272               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8273
8274         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8275                 val |= tr32(MAC_TX_LENGTHS) &
8276                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8277                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8278
8279         tw32(MAC_TX_LENGTHS, val);
8280
8281         /* Receive rules. */
8282         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8283         tw32(RCVLPC_CONFIG, 0x0181);
8284
8285         /* Calculate RDMAC_MODE setting early, we need it to determine
8286          * the RCVLPC_STATE_ENABLE mask.
8287          */
8288         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8289                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8290                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8291                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8292                       RDMAC_MODE_LNGREAD_ENAB);
8293
8294         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8295                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8296
8297         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8298             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8299             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8300                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8301                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8302                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8303
8304         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8305             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8306                 if (tg3_flag(tp, TSO_CAPABLE) &&
8307                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8308                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8309                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8310                            !tg3_flag(tp, IS_5788)) {
8311                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8312                 }
8313         }
8314
8315         if (tg3_flag(tp, PCI_EXPRESS))
8316                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8317
8318         if (tg3_flag(tp, HW_TSO_1) ||
8319             tg3_flag(tp, HW_TSO_2) ||
8320             tg3_flag(tp, HW_TSO_3))
8321                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8322
8323         if (tg3_flag(tp, 57765_PLUS) ||
8324             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8325             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8326                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8327
8328         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8329                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8330
8331         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8332             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8333             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8334             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8335             tg3_flag(tp, 57765_PLUS)) {
8336                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8337                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8338                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8339                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8340                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8341                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8342                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8343                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8344                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8345                 }
8346                 tw32(TG3_RDMA_RSRVCTRL_REG,
8347                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8348         }
8349
8350         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8351             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8352                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8353                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8354                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8355                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8356         }
8357
8358         /* Receive/send statistics. */
8359         if (tg3_flag(tp, 5750_PLUS)) {
8360                 val = tr32(RCVLPC_STATS_ENABLE);
8361                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8362                 tw32(RCVLPC_STATS_ENABLE, val);
8363         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8364                    tg3_flag(tp, TSO_CAPABLE)) {
8365                 val = tr32(RCVLPC_STATS_ENABLE);
8366                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8367                 tw32(RCVLPC_STATS_ENABLE, val);
8368         } else {
8369                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8370         }
8371         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8372         tw32(SNDDATAI_STATSENAB, 0xffffff);
8373         tw32(SNDDATAI_STATSCTRL,
8374              (SNDDATAI_SCTRL_ENABLE |
8375               SNDDATAI_SCTRL_FASTUPD));
8376
8377         /* Setup host coalescing engine. */
8378         tw32(HOSTCC_MODE, 0);
8379         for (i = 0; i < 2000; i++) {
8380                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8381                         break;
8382                 udelay(10);
8383         }
8384
8385         __tg3_set_coalesce(tp, &tp->coal);
8386
8387         if (!tg3_flag(tp, 5705_PLUS)) {
8388                 /* Status/statistics block address.  See tg3_timer,
8389                  * the tg3_periodic_fetch_stats call there, and
8390                  * tg3_get_stats to see how this works for 5705/5750 chips.
8391                  */
8392                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8393                      ((u64) tp->stats_mapping >> 32));
8394                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8395                      ((u64) tp->stats_mapping & 0xffffffff));
8396                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8397
8398                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8399
8400                 /* Clear statistics and status block memory areas */
8401                 for (i = NIC_SRAM_STATS_BLK;
8402                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8403                      i += sizeof(u32)) {
8404                         tg3_write_mem(tp, i, 0);
8405                         udelay(40);
8406                 }
8407         }
8408
8409         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8410
8411         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8412         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8413         if (!tg3_flag(tp, 5705_PLUS))
8414                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8415
8416         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8417                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8418                 /* reset to prevent losing 1st rx packet intermittently */
8419                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8420                 udelay(10);
8421         }
8422
8423         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8424                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8425                         MAC_MODE_FHDE_ENABLE;
8426         if (tg3_flag(tp, ENABLE_APE))
8427                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8428         if (!tg3_flag(tp, 5705_PLUS) &&
8429             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8430             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8431                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8432         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8433         udelay(40);
8434
8435         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8436          * If TG3_FLAG_IS_NIC is zero, we should read the
8437          * register to preserve the GPIO settings for LOMs. The GPIOs,
8438          * whether used as inputs or outputs, are set by boot code after
8439          * reset.
8440          */
8441         if (!tg3_flag(tp, IS_NIC)) {
8442                 u32 gpio_mask;
8443
8444                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8445                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8446                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8447
8448                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8449                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8450                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8451
8452                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8453                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8454
8455                 tp->grc_local_ctrl &= ~gpio_mask;
8456                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8457
8458                 /* GPIO1 must be driven high for eeprom write protect */
8459                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8460                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8461                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8462         }
8463         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8464         udelay(100);
8465
8466         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8467                 val = tr32(MSGINT_MODE);
8468                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8469                 tw32(MSGINT_MODE, val);
8470         }
8471
8472         if (!tg3_flag(tp, 5705_PLUS)) {
8473                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8474                 udelay(40);
8475         }
8476
8477         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8478                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8479                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8480                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8481                WDMAC_MODE_LNGREAD_ENAB);
8482
8483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8484             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8485                 if (tg3_flag(tp, TSO_CAPABLE) &&
8486                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8487                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8488                         /* nothing */
8489                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8490                            !tg3_flag(tp, IS_5788)) {
8491                         val |= WDMAC_MODE_RX_ACCEL;
8492                 }
8493         }
8494
8495         /* Enable host coalescing bug fix */
8496         if (tg3_flag(tp, 5755_PLUS))
8497                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8498
8499         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8500                 val |= WDMAC_MODE_BURST_ALL_DATA;
8501
8502         tw32_f(WDMAC_MODE, val);
8503         udelay(40);
8504
8505         if (tg3_flag(tp, PCIX_MODE)) {
8506                 u16 pcix_cmd;
8507
8508                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8509                                      &pcix_cmd);
8510                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8511                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8512                         pcix_cmd |= PCI_X_CMD_READ_2K;
8513                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8514                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8515                         pcix_cmd |= PCI_X_CMD_READ_2K;
8516                 }
8517                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8518                                       pcix_cmd);
8519         }
8520
8521         tw32_f(RDMAC_MODE, rdmac_mode);
8522         udelay(40);
8523
8524         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8525         if (!tg3_flag(tp, 5705_PLUS))
8526                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8527
8528         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8529                 tw32(SNDDATAC_MODE,
8530                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8531         else
8532                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8533
8534         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8535         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8536         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8537         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8538                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8539         tw32(RCVDBDI_MODE, val);
8540         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8541         if (tg3_flag(tp, HW_TSO_1) ||
8542             tg3_flag(tp, HW_TSO_2) ||
8543             tg3_flag(tp, HW_TSO_3))
8544                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8545         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8546         if (tg3_flag(tp, ENABLE_TSS))
8547                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8548         tw32(SNDBDI_MODE, val);
8549         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8550
8551         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8552                 err = tg3_load_5701_a0_firmware_fix(tp);
8553                 if (err)
8554                         return err;
8555         }
8556
8557         if (tg3_flag(tp, TSO_CAPABLE)) {
8558                 err = tg3_load_tso_firmware(tp);
8559                 if (err)
8560                         return err;
8561         }
8562
8563         tp->tx_mode = TX_MODE_ENABLE;
8564
8565         if (tg3_flag(tp, 5755_PLUS) ||
8566             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8567                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8568
8569         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8570                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8571                 tp->tx_mode &= ~val;
8572                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8573         }
8574
8575         tw32_f(MAC_TX_MODE, tp->tx_mode);
8576         udelay(100);
8577
8578         if (tg3_flag(tp, ENABLE_RSS)) {
8579                 u32 reg = MAC_RSS_INDIR_TBL_0;
8580                 u8 *ent = (u8 *)&val;
8581
8582                 /* Setup the indirection table */
8583                 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8584                         int idx = i % sizeof(val);
8585
8586                         ent[idx] = i % (tp->irq_cnt - 1);
8587                         if (idx == sizeof(val) - 1) {
8588                                 tw32(reg, val);
8589                                 reg += 4;
8590                         }
8591                 }
8592
8593                 /* Setup the "secret" hash key. */
8594                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8595                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8596                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8597                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8598                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8599                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8600                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8601                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8602                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8603                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8604         }
8605
8606         tp->rx_mode = RX_MODE_ENABLE;
8607         if (tg3_flag(tp, 5755_PLUS))
8608                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8609
8610         if (tg3_flag(tp, ENABLE_RSS))
8611                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8612                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8613                                RX_MODE_RSS_IPV6_HASH_EN |
8614                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8615                                RX_MODE_RSS_IPV4_HASH_EN |
8616                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8617
8618         tw32_f(MAC_RX_MODE, tp->rx_mode);
8619         udelay(10);
8620
8621         tw32(MAC_LED_CTRL, tp->led_ctrl);
8622
8623         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8624         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8625                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8626                 udelay(10);
8627         }
8628         tw32_f(MAC_RX_MODE, tp->rx_mode);
8629         udelay(10);
8630
8631         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8632                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8633                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8634                         /* Set drive transmission level to 1.2V  */
8635                         /* only if the signal pre-emphasis bit is not set  */
8636                         val = tr32(MAC_SERDES_CFG);
8637                         val &= 0xfffff000;
8638                         val |= 0x880;
8639                         tw32(MAC_SERDES_CFG, val);
8640                 }
8641                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8642                         tw32(MAC_SERDES_CFG, 0x616000);
8643         }
8644
8645         /* Prevent chip from dropping frames when flow control
8646          * is enabled.
8647          */
8648         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8649                 val = 1;
8650         else
8651                 val = 2;
8652         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8653
8654         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8655             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8656                 /* Use hardware link auto-negotiation */
8657                 tg3_flag_set(tp, HW_AUTONEG);
8658         }
8659
8660         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8662                 u32 tmp;
8663
8664                 tmp = tr32(SERDES_RX_CTRL);
8665                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8666                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8667                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8668                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8669         }
8670
8671         if (!tg3_flag(tp, USE_PHYLIB)) {
8672                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8673                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8674                         tp->link_config.speed = tp->link_config.orig_speed;
8675                         tp->link_config.duplex = tp->link_config.orig_duplex;
8676                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8677                 }
8678
8679                 err = tg3_setup_phy(tp, 0);
8680                 if (err)
8681                         return err;
8682
8683                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8684                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8685                         u32 tmp;
8686
8687                         /* Clear CRC stats. */
8688                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8689                                 tg3_writephy(tp, MII_TG3_TEST1,
8690                                              tmp | MII_TG3_TEST1_CRC_EN);
8691                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8692                         }
8693                 }
8694         }
8695
8696         __tg3_set_rx_mode(tp->dev);
8697
8698         /* Initialize receive rules. */
8699         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8700         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8701         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8702         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8703
8704         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8705                 limit = 8;
8706         else
8707                 limit = 16;
8708         if (tg3_flag(tp, ENABLE_ASF))
8709                 limit -= 4;
8710         switch (limit) {
8711         case 16:
8712                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8713         case 15:
8714                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8715         case 14:
8716                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8717         case 13:
8718                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8719         case 12:
8720                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8721         case 11:
8722                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8723         case 10:
8724                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8725         case 9:
8726                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8727         case 8:
8728                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8729         case 7:
8730                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8731         case 6:
8732                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8733         case 5:
8734                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8735         case 4:
8736                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8737         case 3:
8738                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8739         case 2:
8740         case 1:
8741
8742         default:
8743                 break;
8744         }
8745
8746         if (tg3_flag(tp, ENABLE_APE))
8747                 /* Write our heartbeat update interval to APE. */
8748                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8749                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8750
8751         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8752
8753         return 0;
8754 }
8755
8756 /* Called at device open time to get the chip ready for
8757  * packet processing.  Invoked with tp->lock held.
8758  */
8759 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8760 {
8761         tg3_switch_clocks(tp);
8762
8763         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8764
8765         return tg3_reset_hw(tp, reset_phy);
8766 }
8767
8768 #define TG3_STAT_ADD32(PSTAT, REG) \
8769 do {    u32 __val = tr32(REG); \
8770         (PSTAT)->low += __val; \
8771         if ((PSTAT)->low < __val) \
8772                 (PSTAT)->high += 1; \
8773 } while (0)
8774
8775 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8776 {
8777         struct tg3_hw_stats *sp = tp->hw_stats;
8778
8779         if (!netif_carrier_ok(tp->dev))
8780                 return;
8781
8782         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8783         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8784         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8785         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8786         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8787         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8788         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8789         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8790         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8791         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8792         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8793         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8794         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8795
8796         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8797         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8798         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8799         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8800         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8801         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8802         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8803         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8804         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8805         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8806         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8807         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8808         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8809         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8810
8811         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8812         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8813             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8814             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8815                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8816         } else {
8817                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8818                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8819                 if (val) {
8820                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8821                         sp->rx_discards.low += val;
8822                         if (sp->rx_discards.low < val)
8823                                 sp->rx_discards.high += 1;
8824                 }
8825                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8826         }
8827         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8828 }
8829
8830 static void tg3_timer(unsigned long __opaque)
8831 {
8832         struct tg3 *tp = (struct tg3 *) __opaque;
8833
8834         if (tp->irq_sync)
8835                 goto restart_timer;
8836
8837         spin_lock(&tp->lock);
8838
8839         if (!tg3_flag(tp, TAGGED_STATUS)) {
8840                 /* All of this garbage is because when using non-tagged
8841                  * IRQ status the mailbox/status_block protocol the chip
8842                  * uses with the cpu is race prone.
8843                  */
8844                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8845                         tw32(GRC_LOCAL_CTRL,
8846                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8847                 } else {
8848                         tw32(HOSTCC_MODE, tp->coalesce_mode |
8849                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8850                 }
8851
8852                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8853                         tg3_flag_set(tp, RESTART_TIMER);
8854                         spin_unlock(&tp->lock);
8855                         schedule_work(&tp->reset_task);
8856                         return;
8857                 }
8858         }
8859
8860         /* This part only runs once per second. */
8861         if (!--tp->timer_counter) {
8862                 if (tg3_flag(tp, 5705_PLUS))
8863                         tg3_periodic_fetch_stats(tp);
8864
8865                 if (tp->setlpicnt && !--tp->setlpicnt)
8866                         tg3_phy_eee_enable(tp);
8867
8868                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8869                         u32 mac_stat;
8870                         int phy_event;
8871
8872                         mac_stat = tr32(MAC_STATUS);
8873
8874                         phy_event = 0;
8875                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8876                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8877                                         phy_event = 1;
8878                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8879                                 phy_event = 1;
8880
8881                         if (phy_event)
8882                                 tg3_setup_phy(tp, 0);
8883                 } else if (tg3_flag(tp, POLL_SERDES)) {
8884                         u32 mac_stat = tr32(MAC_STATUS);
8885                         int need_setup = 0;
8886
8887                         if (netif_carrier_ok(tp->dev) &&
8888                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8889                                 need_setup = 1;
8890                         }
8891                         if (!netif_carrier_ok(tp->dev) &&
8892                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
8893                                          MAC_STATUS_SIGNAL_DET))) {
8894                                 need_setup = 1;
8895                         }
8896                         if (need_setup) {
8897                                 if (!tp->serdes_counter) {
8898                                         tw32_f(MAC_MODE,
8899                                              (tp->mac_mode &
8900                                               ~MAC_MODE_PORT_MODE_MASK));
8901                                         udelay(40);
8902                                         tw32_f(MAC_MODE, tp->mac_mode);
8903                                         udelay(40);
8904                                 }
8905                                 tg3_setup_phy(tp, 0);
8906                         }
8907                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8908                            tg3_flag(tp, 5780_CLASS)) {
8909                         tg3_serdes_parallel_detect(tp);
8910                 }
8911
8912                 tp->timer_counter = tp->timer_multiplier;
8913         }
8914
8915         /* Heartbeat is only sent once every 2 seconds.
8916          *
8917          * The heartbeat is to tell the ASF firmware that the host
8918          * driver is still alive.  In the event that the OS crashes,
8919          * ASF needs to reset the hardware to free up the FIFO space
8920          * that may be filled with rx packets destined for the host.
8921          * If the FIFO is full, ASF will no longer function properly.
8922          *
8923          * Unintended resets have been reported on real time kernels
8924          * where the timer doesn't run on time.  Netpoll will also have
8925          * same problem.
8926          *
8927          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8928          * to check the ring condition when the heartbeat is expiring
8929          * before doing the reset.  This will prevent most unintended
8930          * resets.
8931          */
8932         if (!--tp->asf_counter) {
8933                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8934                         tg3_wait_for_event_ack(tp);
8935
8936                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8937                                       FWCMD_NICDRV_ALIVE3);
8938                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8939                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8940                                       TG3_FW_UPDATE_TIMEOUT_SEC);
8941
8942                         tg3_generate_fw_event(tp);
8943                 }
8944                 tp->asf_counter = tp->asf_multiplier;
8945         }
8946
8947         spin_unlock(&tp->lock);
8948
8949 restart_timer:
8950         tp->timer.expires = jiffies + tp->timer_offset;
8951         add_timer(&tp->timer);
8952 }
8953
8954 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8955 {
8956         irq_handler_t fn;
8957         unsigned long flags;
8958         char *name;
8959         struct tg3_napi *tnapi = &tp->napi[irq_num];
8960
8961         if (tp->irq_cnt == 1)
8962                 name = tp->dev->name;
8963         else {
8964                 name = &tnapi->irq_lbl[0];
8965                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8966                 name[IFNAMSIZ-1] = 0;
8967         }
8968
8969         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8970                 fn = tg3_msi;
8971                 if (tg3_flag(tp, 1SHOT_MSI))
8972                         fn = tg3_msi_1shot;
8973                 flags = 0;
8974         } else {
8975                 fn = tg3_interrupt;
8976                 if (tg3_flag(tp, TAGGED_STATUS))
8977                         fn = tg3_interrupt_tagged;
8978                 flags = IRQF_SHARED;
8979         }
8980
8981         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8982 }
8983
8984 static int tg3_test_interrupt(struct tg3 *tp)
8985 {
8986         struct tg3_napi *tnapi = &tp->napi[0];
8987         struct net_device *dev = tp->dev;
8988         int err, i, intr_ok = 0;
8989         u32 val;
8990
8991         if (!netif_running(dev))
8992                 return -ENODEV;
8993
8994         tg3_disable_ints(tp);
8995
8996         free_irq(tnapi->irq_vec, tnapi);
8997
8998         /*
8999          * Turn off MSI one shot mode.  Otherwise this test has no
9000          * observable way to know whether the interrupt was delivered.
9001          */
9002         if (tg3_flag(tp, 57765_PLUS)) {
9003                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9004                 tw32(MSGINT_MODE, val);
9005         }
9006
9007         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9008                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9009         if (err)
9010                 return err;
9011
9012         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9013         tg3_enable_ints(tp);
9014
9015         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9016                tnapi->coal_now);
9017
9018         for (i = 0; i < 5; i++) {
9019                 u32 int_mbox, misc_host_ctrl;
9020
9021                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9022                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9023
9024                 if ((int_mbox != 0) ||
9025                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9026                         intr_ok = 1;
9027                         break;
9028                 }
9029
9030                 if (tg3_flag(tp, 57765_PLUS) &&
9031                     tnapi->hw_status->status_tag != tnapi->last_tag)
9032                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9033
9034                 msleep(10);
9035         }
9036
9037         tg3_disable_ints(tp);
9038
9039         free_irq(tnapi->irq_vec, tnapi);
9040
9041         err = tg3_request_irq(tp, 0);
9042
9043         if (err)
9044                 return err;
9045
9046         if (intr_ok) {
9047                 /* Reenable MSI one shot mode. */
9048                 if (tg3_flag(tp, 57765_PLUS)) {
9049                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9050                         tw32(MSGINT_MODE, val);
9051                 }
9052                 return 0;
9053         }
9054
9055         return -EIO;
9056 }
9057
9058 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9059  * successfully restored
9060  */
9061 static int tg3_test_msi(struct tg3 *tp)
9062 {
9063         int err;
9064         u16 pci_cmd;
9065
9066         if (!tg3_flag(tp, USING_MSI))
9067                 return 0;
9068
9069         /* Turn off SERR reporting in case MSI terminates with Master
9070          * Abort.
9071          */
9072         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9073         pci_write_config_word(tp->pdev, PCI_COMMAND,
9074                               pci_cmd & ~PCI_COMMAND_SERR);
9075
9076         err = tg3_test_interrupt(tp);
9077
9078         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9079
9080         if (!err)
9081                 return 0;
9082
9083         /* other failures */
9084         if (err != -EIO)
9085                 return err;
9086
9087         /* MSI test failed, go back to INTx mode */
9088         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9089                     "to INTx mode. Please report this failure to the PCI "
9090                     "maintainer and include system chipset information\n");
9091
9092         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9093
9094         pci_disable_msi(tp->pdev);
9095
9096         tg3_flag_clear(tp, USING_MSI);
9097         tp->napi[0].irq_vec = tp->pdev->irq;
9098
9099         err = tg3_request_irq(tp, 0);
9100         if (err)
9101                 return err;
9102
9103         /* Need to reset the chip because the MSI cycle may have terminated
9104          * with Master Abort.
9105          */
9106         tg3_full_lock(tp, 1);
9107
9108         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9109         err = tg3_init_hw(tp, 1);
9110
9111         tg3_full_unlock(tp);
9112
9113         if (err)
9114                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9115
9116         return err;
9117 }
9118
9119 static int tg3_request_firmware(struct tg3 *tp)
9120 {
9121         const __be32 *fw_data;
9122
9123         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9124                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9125                            tp->fw_needed);
9126                 return -ENOENT;
9127         }
9128
9129         fw_data = (void *)tp->fw->data;
9130
9131         /* Firmware blob starts with version numbers, followed by
9132          * start address and _full_ length including BSS sections
9133          * (which must be longer than the actual data, of course
9134          */
9135
9136         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9137         if (tp->fw_len < (tp->fw->size - 12)) {
9138                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9139                            tp->fw_len, tp->fw_needed);
9140                 release_firmware(tp->fw);
9141                 tp->fw = NULL;
9142                 return -EINVAL;
9143         }
9144
9145         /* We no longer need firmware; we have it. */
9146         tp->fw_needed = NULL;
9147         return 0;
9148 }
9149
9150 static bool tg3_enable_msix(struct tg3 *tp)
9151 {
9152         int i, rc, cpus = num_online_cpus();
9153         struct msix_entry msix_ent[tp->irq_max];
9154
9155         if (cpus == 1)
9156                 /* Just fallback to the simpler MSI mode. */
9157                 return false;
9158
9159         /*
9160          * We want as many rx rings enabled as there are cpus.
9161          * The first MSIX vector only deals with link interrupts, etc,
9162          * so we add one to the number of vectors we are requesting.
9163          */
9164         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9165
9166         for (i = 0; i < tp->irq_max; i++) {
9167                 msix_ent[i].entry  = i;
9168                 msix_ent[i].vector = 0;
9169         }
9170
9171         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9172         if (rc < 0) {
9173                 return false;
9174         } else if (rc != 0) {
9175                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9176                         return false;
9177                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9178                               tp->irq_cnt, rc);
9179                 tp->irq_cnt = rc;
9180         }
9181
9182         for (i = 0; i < tp->irq_max; i++)
9183                 tp->napi[i].irq_vec = msix_ent[i].vector;
9184
9185         netif_set_real_num_tx_queues(tp->dev, 1);
9186         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9187         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9188                 pci_disable_msix(tp->pdev);
9189                 return false;
9190         }
9191
9192         if (tp->irq_cnt > 1) {
9193                 tg3_flag_set(tp, ENABLE_RSS);
9194
9195                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9196                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9197                         tg3_flag_set(tp, ENABLE_TSS);
9198                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9199                 }
9200         }
9201
9202         return true;
9203 }
9204
9205 static void tg3_ints_init(struct tg3 *tp)
9206 {
9207         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9208             !tg3_flag(tp, TAGGED_STATUS)) {
9209                 /* All MSI supporting chips should support tagged
9210                  * status.  Assert that this is the case.
9211                  */
9212                 netdev_warn(tp->dev,
9213                             "MSI without TAGGED_STATUS? Not using MSI\n");
9214                 goto defcfg;
9215         }
9216
9217         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9218                 tg3_flag_set(tp, USING_MSIX);
9219         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9220                 tg3_flag_set(tp, USING_MSI);
9221
9222         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9223                 u32 msi_mode = tr32(MSGINT_MODE);
9224                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9225                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9226                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9227         }
9228 defcfg:
9229         if (!tg3_flag(tp, USING_MSIX)) {
9230                 tp->irq_cnt = 1;
9231                 tp->napi[0].irq_vec = tp->pdev->irq;
9232                 netif_set_real_num_tx_queues(tp->dev, 1);
9233                 netif_set_real_num_rx_queues(tp->dev, 1);
9234         }
9235 }
9236
9237 static void tg3_ints_fini(struct tg3 *tp)
9238 {
9239         if (tg3_flag(tp, USING_MSIX))
9240                 pci_disable_msix(tp->pdev);
9241         else if (tg3_flag(tp, USING_MSI))
9242                 pci_disable_msi(tp->pdev);
9243         tg3_flag_clear(tp, USING_MSI);
9244         tg3_flag_clear(tp, USING_MSIX);
9245         tg3_flag_clear(tp, ENABLE_RSS);
9246         tg3_flag_clear(tp, ENABLE_TSS);
9247 }
9248
9249 static int tg3_open(struct net_device *dev)
9250 {
9251         struct tg3 *tp = netdev_priv(dev);
9252         int i, err;
9253
9254         if (tp->fw_needed) {
9255                 err = tg3_request_firmware(tp);
9256                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9257                         if (err)
9258                                 return err;
9259                 } else if (err) {
9260                         netdev_warn(tp->dev, "TSO capability disabled\n");
9261                         tg3_flag_clear(tp, TSO_CAPABLE);
9262                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9263                         netdev_notice(tp->dev, "TSO capability restored\n");
9264                         tg3_flag_set(tp, TSO_CAPABLE);
9265                 }
9266         }
9267
9268         netif_carrier_off(tp->dev);
9269
9270         err = tg3_power_up(tp);
9271         if (err)
9272                 return err;
9273
9274         tg3_full_lock(tp, 0);
9275
9276         tg3_disable_ints(tp);
9277         tg3_flag_clear(tp, INIT_COMPLETE);
9278
9279         tg3_full_unlock(tp);
9280
9281         /*
9282          * Setup interrupts first so we know how
9283          * many NAPI resources to allocate
9284          */
9285         tg3_ints_init(tp);
9286
9287         /* The placement of this call is tied
9288          * to the setup and use of Host TX descriptors.
9289          */
9290         err = tg3_alloc_consistent(tp);
9291         if (err)
9292                 goto err_out1;
9293
9294         tg3_napi_init(tp);
9295
9296         tg3_napi_enable(tp);
9297
9298         for (i = 0; i < tp->irq_cnt; i++) {
9299                 struct tg3_napi *tnapi = &tp->napi[i];
9300                 err = tg3_request_irq(tp, i);
9301                 if (err) {
9302                         for (i--; i >= 0; i--)
9303                                 free_irq(tnapi->irq_vec, tnapi);
9304                         break;
9305                 }
9306         }
9307
9308         if (err)
9309                 goto err_out2;
9310
9311         tg3_full_lock(tp, 0);
9312
9313         err = tg3_init_hw(tp, 1);
9314         if (err) {
9315                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9316                 tg3_free_rings(tp);
9317         } else {
9318                 if (tg3_flag(tp, TAGGED_STATUS))
9319                         tp->timer_offset = HZ;
9320                 else
9321                         tp->timer_offset = HZ / 10;
9322
9323                 BUG_ON(tp->timer_offset > HZ);
9324                 tp->timer_counter = tp->timer_multiplier =
9325                         (HZ / tp->timer_offset);
9326                 tp->asf_counter = tp->asf_multiplier =
9327                         ((HZ / tp->timer_offset) * 2);
9328
9329                 init_timer(&tp->timer);
9330                 tp->timer.expires = jiffies + tp->timer_offset;
9331                 tp->timer.data = (unsigned long) tp;
9332                 tp->timer.function = tg3_timer;
9333         }
9334
9335         tg3_full_unlock(tp);
9336
9337         if (err)
9338                 goto err_out3;
9339
9340         if (tg3_flag(tp, USING_MSI)) {
9341                 err = tg3_test_msi(tp);
9342
9343                 if (err) {
9344                         tg3_full_lock(tp, 0);
9345                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9346                         tg3_free_rings(tp);
9347                         tg3_full_unlock(tp);
9348
9349                         goto err_out2;
9350                 }
9351
9352                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9353                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9354
9355                         tw32(PCIE_TRANSACTION_CFG,
9356                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9357                 }
9358         }
9359
9360         tg3_phy_start(tp);
9361
9362         tg3_full_lock(tp, 0);
9363
9364         add_timer(&tp->timer);
9365         tg3_flag_set(tp, INIT_COMPLETE);
9366         tg3_enable_ints(tp);
9367
9368         tg3_full_unlock(tp);
9369
9370         netif_tx_start_all_queues(dev);
9371
9372         /*
9373          * Reset loopback feature if it was turned on while the device was down
9374          * make sure that it's installed properly now.
9375          */
9376         if (dev->features & NETIF_F_LOOPBACK)
9377                 tg3_set_loopback(dev, dev->features);
9378
9379         return 0;
9380
9381 err_out3:
9382         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9383                 struct tg3_napi *tnapi = &tp->napi[i];
9384                 free_irq(tnapi->irq_vec, tnapi);
9385         }
9386
9387 err_out2:
9388         tg3_napi_disable(tp);
9389         tg3_napi_fini(tp);
9390         tg3_free_consistent(tp);
9391
9392 err_out1:
9393         tg3_ints_fini(tp);
9394         return err;
9395 }
9396
9397 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9398                                                  struct rtnl_link_stats64 *);
9399 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9400
9401 static int tg3_close(struct net_device *dev)
9402 {
9403         int i;
9404         struct tg3 *tp = netdev_priv(dev);
9405
9406         tg3_napi_disable(tp);
9407         cancel_work_sync(&tp->reset_task);
9408
9409         netif_tx_stop_all_queues(dev);
9410
9411         del_timer_sync(&tp->timer);
9412
9413         tg3_phy_stop(tp);
9414
9415         tg3_full_lock(tp, 1);
9416
9417         tg3_disable_ints(tp);
9418
9419         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9420         tg3_free_rings(tp);
9421         tg3_flag_clear(tp, INIT_COMPLETE);
9422
9423         tg3_full_unlock(tp);
9424
9425         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9426                 struct tg3_napi *tnapi = &tp->napi[i];
9427                 free_irq(tnapi->irq_vec, tnapi);
9428         }
9429
9430         tg3_ints_fini(tp);
9431
9432         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9433
9434         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9435                sizeof(tp->estats_prev));
9436
9437         tg3_napi_fini(tp);
9438
9439         tg3_free_consistent(tp);
9440
9441         tg3_power_down(tp);
9442
9443         netif_carrier_off(tp->dev);
9444
9445         return 0;
9446 }
9447
9448 static inline u64 get_stat64(tg3_stat64_t *val)
9449 {
9450        return ((u64)val->high << 32) | ((u64)val->low);
9451 }
9452
9453 static u64 calc_crc_errors(struct tg3 *tp)
9454 {
9455         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9456
9457         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9458             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9459              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9460                 u32 val;
9461
9462                 spin_lock_bh(&tp->lock);
9463                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9464                         tg3_writephy(tp, MII_TG3_TEST1,
9465                                      val | MII_TG3_TEST1_CRC_EN);
9466                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9467                 } else
9468                         val = 0;
9469                 spin_unlock_bh(&tp->lock);
9470
9471                 tp->phy_crc_errors += val;
9472
9473                 return tp->phy_crc_errors;
9474         }
9475
9476         return get_stat64(&hw_stats->rx_fcs_errors);
9477 }
9478
9479 #define ESTAT_ADD(member) \
9480         estats->member =        old_estats->member + \
9481                                 get_stat64(&hw_stats->member)
9482
9483 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9484 {
9485         struct tg3_ethtool_stats *estats = &tp->estats;
9486         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9487         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9488
9489         if (!hw_stats)
9490                 return old_estats;
9491
9492         ESTAT_ADD(rx_octets);
9493         ESTAT_ADD(rx_fragments);
9494         ESTAT_ADD(rx_ucast_packets);
9495         ESTAT_ADD(rx_mcast_packets);
9496         ESTAT_ADD(rx_bcast_packets);
9497         ESTAT_ADD(rx_fcs_errors);
9498         ESTAT_ADD(rx_align_errors);
9499         ESTAT_ADD(rx_xon_pause_rcvd);
9500         ESTAT_ADD(rx_xoff_pause_rcvd);
9501         ESTAT_ADD(rx_mac_ctrl_rcvd);
9502         ESTAT_ADD(rx_xoff_entered);
9503         ESTAT_ADD(rx_frame_too_long_errors);
9504         ESTAT_ADD(rx_jabbers);
9505         ESTAT_ADD(rx_undersize_packets);
9506         ESTAT_ADD(rx_in_length_errors);
9507         ESTAT_ADD(rx_out_length_errors);
9508         ESTAT_ADD(rx_64_or_less_octet_packets);
9509         ESTAT_ADD(rx_65_to_127_octet_packets);
9510         ESTAT_ADD(rx_128_to_255_octet_packets);
9511         ESTAT_ADD(rx_256_to_511_octet_packets);
9512         ESTAT_ADD(rx_512_to_1023_octet_packets);
9513         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9514         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9515         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9516         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9517         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9518
9519         ESTAT_ADD(tx_octets);
9520         ESTAT_ADD(tx_collisions);
9521         ESTAT_ADD(tx_xon_sent);
9522         ESTAT_ADD(tx_xoff_sent);
9523         ESTAT_ADD(tx_flow_control);
9524         ESTAT_ADD(tx_mac_errors);
9525         ESTAT_ADD(tx_single_collisions);
9526         ESTAT_ADD(tx_mult_collisions);
9527         ESTAT_ADD(tx_deferred);
9528         ESTAT_ADD(tx_excessive_collisions);
9529         ESTAT_ADD(tx_late_collisions);
9530         ESTAT_ADD(tx_collide_2times);
9531         ESTAT_ADD(tx_collide_3times);
9532         ESTAT_ADD(tx_collide_4times);
9533         ESTAT_ADD(tx_collide_5times);
9534         ESTAT_ADD(tx_collide_6times);
9535         ESTAT_ADD(tx_collide_7times);
9536         ESTAT_ADD(tx_collide_8times);
9537         ESTAT_ADD(tx_collide_9times);
9538         ESTAT_ADD(tx_collide_10times);
9539         ESTAT_ADD(tx_collide_11times);
9540         ESTAT_ADD(tx_collide_12times);
9541         ESTAT_ADD(tx_collide_13times);
9542         ESTAT_ADD(tx_collide_14times);
9543         ESTAT_ADD(tx_collide_15times);
9544         ESTAT_ADD(tx_ucast_packets);
9545         ESTAT_ADD(tx_mcast_packets);
9546         ESTAT_ADD(tx_bcast_packets);
9547         ESTAT_ADD(tx_carrier_sense_errors);
9548         ESTAT_ADD(tx_discards);
9549         ESTAT_ADD(tx_errors);
9550
9551         ESTAT_ADD(dma_writeq_full);
9552         ESTAT_ADD(dma_write_prioq_full);
9553         ESTAT_ADD(rxbds_empty);
9554         ESTAT_ADD(rx_discards);
9555         ESTAT_ADD(rx_errors);
9556         ESTAT_ADD(rx_threshold_hit);
9557
9558         ESTAT_ADD(dma_readq_full);
9559         ESTAT_ADD(dma_read_prioq_full);
9560         ESTAT_ADD(tx_comp_queue_full);
9561
9562         ESTAT_ADD(ring_set_send_prod_index);
9563         ESTAT_ADD(ring_status_update);
9564         ESTAT_ADD(nic_irqs);
9565         ESTAT_ADD(nic_avoided_irqs);
9566         ESTAT_ADD(nic_tx_threshold_hit);
9567
9568         ESTAT_ADD(mbuf_lwm_thresh_hit);
9569
9570         return estats;
9571 }
9572
9573 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9574                                                  struct rtnl_link_stats64 *stats)
9575 {
9576         struct tg3 *tp = netdev_priv(dev);
9577         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9578         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9579
9580         if (!hw_stats)
9581                 return old_stats;
9582
9583         stats->rx_packets = old_stats->rx_packets +
9584                 get_stat64(&hw_stats->rx_ucast_packets) +
9585                 get_stat64(&hw_stats->rx_mcast_packets) +
9586                 get_stat64(&hw_stats->rx_bcast_packets);
9587
9588         stats->tx_packets = old_stats->tx_packets +
9589                 get_stat64(&hw_stats->tx_ucast_packets) +
9590                 get_stat64(&hw_stats->tx_mcast_packets) +
9591                 get_stat64(&hw_stats->tx_bcast_packets);
9592
9593         stats->rx_bytes = old_stats->rx_bytes +
9594                 get_stat64(&hw_stats->rx_octets);
9595         stats->tx_bytes = old_stats->tx_bytes +
9596                 get_stat64(&hw_stats->tx_octets);
9597
9598         stats->rx_errors = old_stats->rx_errors +
9599                 get_stat64(&hw_stats->rx_errors);
9600         stats->tx_errors = old_stats->tx_errors +
9601                 get_stat64(&hw_stats->tx_errors) +
9602                 get_stat64(&hw_stats->tx_mac_errors) +
9603                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9604                 get_stat64(&hw_stats->tx_discards);
9605
9606         stats->multicast = old_stats->multicast +
9607                 get_stat64(&hw_stats->rx_mcast_packets);
9608         stats->collisions = old_stats->collisions +
9609                 get_stat64(&hw_stats->tx_collisions);
9610
9611         stats->rx_length_errors = old_stats->rx_length_errors +
9612                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9613                 get_stat64(&hw_stats->rx_undersize_packets);
9614
9615         stats->rx_over_errors = old_stats->rx_over_errors +
9616                 get_stat64(&hw_stats->rxbds_empty);
9617         stats->rx_frame_errors = old_stats->rx_frame_errors +
9618                 get_stat64(&hw_stats->rx_align_errors);
9619         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9620                 get_stat64(&hw_stats->tx_discards);
9621         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9622                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9623
9624         stats->rx_crc_errors = old_stats->rx_crc_errors +
9625                 calc_crc_errors(tp);
9626
9627         stats->rx_missed_errors = old_stats->rx_missed_errors +
9628                 get_stat64(&hw_stats->rx_discards);
9629
9630         stats->rx_dropped = tp->rx_dropped;
9631
9632         return stats;
9633 }
9634
9635 static inline u32 calc_crc(unsigned char *buf, int len)
9636 {
9637         u32 reg;
9638         u32 tmp;
9639         int j, k;
9640
9641         reg = 0xffffffff;
9642
9643         for (j = 0; j < len; j++) {
9644                 reg ^= buf[j];
9645
9646                 for (k = 0; k < 8; k++) {
9647                         tmp = reg & 0x01;
9648
9649                         reg >>= 1;
9650
9651                         if (tmp)
9652                                 reg ^= 0xedb88320;
9653                 }
9654         }
9655
9656         return ~reg;
9657 }
9658
9659 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9660 {
9661         /* accept or reject all multicast frames */
9662         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9663         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9664         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9665         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9666 }
9667
9668 static void __tg3_set_rx_mode(struct net_device *dev)
9669 {
9670         struct tg3 *tp = netdev_priv(dev);
9671         u32 rx_mode;
9672
9673         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9674                                   RX_MODE_KEEP_VLAN_TAG);
9675
9676 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9677         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9678          * flag clear.
9679          */
9680         if (!tg3_flag(tp, ENABLE_ASF))
9681                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9682 #endif
9683
9684         if (dev->flags & IFF_PROMISC) {
9685                 /* Promiscuous mode. */
9686                 rx_mode |= RX_MODE_PROMISC;
9687         } else if (dev->flags & IFF_ALLMULTI) {
9688                 /* Accept all multicast. */
9689                 tg3_set_multi(tp, 1);
9690         } else if (netdev_mc_empty(dev)) {
9691                 /* Reject all multicast. */
9692                 tg3_set_multi(tp, 0);
9693         } else {
9694                 /* Accept one or more multicast(s). */
9695                 struct netdev_hw_addr *ha;
9696                 u32 mc_filter[4] = { 0, };
9697                 u32 regidx;
9698                 u32 bit;
9699                 u32 crc;
9700
9701                 netdev_for_each_mc_addr(ha, dev) {
9702                         crc = calc_crc(ha->addr, ETH_ALEN);
9703                         bit = ~crc & 0x7f;
9704                         regidx = (bit & 0x60) >> 5;
9705                         bit &= 0x1f;
9706                         mc_filter[regidx] |= (1 << bit);
9707                 }
9708
9709                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9710                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9711                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9712                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9713         }
9714
9715         if (rx_mode != tp->rx_mode) {
9716                 tp->rx_mode = rx_mode;
9717                 tw32_f(MAC_RX_MODE, rx_mode);
9718                 udelay(10);
9719         }
9720 }
9721
9722 static void tg3_set_rx_mode(struct net_device *dev)
9723 {
9724         struct tg3 *tp = netdev_priv(dev);
9725
9726         if (!netif_running(dev))
9727                 return;
9728
9729         tg3_full_lock(tp, 0);
9730         __tg3_set_rx_mode(dev);
9731         tg3_full_unlock(tp);
9732 }
9733
9734 static int tg3_get_regs_len(struct net_device *dev)
9735 {
9736         return TG3_REG_BLK_SIZE;
9737 }
9738
9739 static void tg3_get_regs(struct net_device *dev,
9740                 struct ethtool_regs *regs, void *_p)
9741 {
9742         struct tg3 *tp = netdev_priv(dev);
9743
9744         regs->version = 0;
9745
9746         memset(_p, 0, TG3_REG_BLK_SIZE);
9747
9748         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9749                 return;
9750
9751         tg3_full_lock(tp, 0);
9752
9753         tg3_dump_legacy_regs(tp, (u32 *)_p);
9754
9755         tg3_full_unlock(tp);
9756 }
9757
9758 static int tg3_get_eeprom_len(struct net_device *dev)
9759 {
9760         struct tg3 *tp = netdev_priv(dev);
9761
9762         return tp->nvram_size;
9763 }
9764
9765 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9766 {
9767         struct tg3 *tp = netdev_priv(dev);
9768         int ret;
9769         u8  *pd;
9770         u32 i, offset, len, b_offset, b_count;
9771         __be32 val;
9772
9773         if (tg3_flag(tp, NO_NVRAM))
9774                 return -EINVAL;
9775
9776         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9777                 return -EAGAIN;
9778
9779         offset = eeprom->offset;
9780         len = eeprom->len;
9781         eeprom->len = 0;
9782
9783         eeprom->magic = TG3_EEPROM_MAGIC;
9784
9785         if (offset & 3) {
9786                 /* adjustments to start on required 4 byte boundary */
9787                 b_offset = offset & 3;
9788                 b_count = 4 - b_offset;
9789                 if (b_count > len) {
9790                         /* i.e. offset=1 len=2 */
9791                         b_count = len;
9792                 }
9793                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9794                 if (ret)
9795                         return ret;
9796                 memcpy(data, ((char *)&val) + b_offset, b_count);
9797                 len -= b_count;
9798                 offset += b_count;
9799                 eeprom->len += b_count;
9800         }
9801
9802         /* read bytes up to the last 4 byte boundary */
9803         pd = &data[eeprom->len];
9804         for (i = 0; i < (len - (len & 3)); i += 4) {
9805                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9806                 if (ret) {
9807                         eeprom->len += i;
9808                         return ret;
9809                 }
9810                 memcpy(pd + i, &val, 4);
9811         }
9812         eeprom->len += i;
9813
9814         if (len & 3) {
9815                 /* read last bytes not ending on 4 byte boundary */
9816                 pd = &data[eeprom->len];
9817                 b_count = len & 3;
9818                 b_offset = offset + len - b_count;
9819                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9820                 if (ret)
9821                         return ret;
9822                 memcpy(pd, &val, b_count);
9823                 eeprom->len += b_count;
9824         }
9825         return 0;
9826 }
9827
9828 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9829
9830 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9831 {
9832         struct tg3 *tp = netdev_priv(dev);
9833         int ret;
9834         u32 offset, len, b_offset, odd_len;
9835         u8 *buf;
9836         __be32 start, end;
9837
9838         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9839                 return -EAGAIN;
9840
9841         if (tg3_flag(tp, NO_NVRAM) ||
9842             eeprom->magic != TG3_EEPROM_MAGIC)
9843                 return -EINVAL;
9844
9845         offset = eeprom->offset;
9846         len = eeprom->len;
9847
9848         if ((b_offset = (offset & 3))) {
9849                 /* adjustments to start on required 4 byte boundary */
9850                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9851                 if (ret)
9852                         return ret;
9853                 len += b_offset;
9854                 offset &= ~3;
9855                 if (len < 4)
9856                         len = 4;
9857         }
9858
9859         odd_len = 0;
9860         if (len & 3) {
9861                 /* adjustments to end on required 4 byte boundary */
9862                 odd_len = 1;
9863                 len = (len + 3) & ~3;
9864                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9865                 if (ret)
9866                         return ret;
9867         }
9868
9869         buf = data;
9870         if (b_offset || odd_len) {
9871                 buf = kmalloc(len, GFP_KERNEL);
9872                 if (!buf)
9873                         return -ENOMEM;
9874                 if (b_offset)
9875                         memcpy(buf, &start, 4);
9876                 if (odd_len)
9877                         memcpy(buf+len-4, &end, 4);
9878                 memcpy(buf + b_offset, data, eeprom->len);
9879         }
9880
9881         ret = tg3_nvram_write_block(tp, offset, len, buf);
9882
9883         if (buf != data)
9884                 kfree(buf);
9885
9886         return ret;
9887 }
9888
9889 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9890 {
9891         struct tg3 *tp = netdev_priv(dev);
9892
9893         if (tg3_flag(tp, USE_PHYLIB)) {
9894                 struct phy_device *phydev;
9895                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9896                         return -EAGAIN;
9897                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9898                 return phy_ethtool_gset(phydev, cmd);
9899         }
9900
9901         cmd->supported = (SUPPORTED_Autoneg);
9902
9903         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9904                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9905                                    SUPPORTED_1000baseT_Full);
9906
9907         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9908                 cmd->supported |= (SUPPORTED_100baseT_Half |
9909                                   SUPPORTED_100baseT_Full |
9910                                   SUPPORTED_10baseT_Half |
9911                                   SUPPORTED_10baseT_Full |
9912                                   SUPPORTED_TP);
9913                 cmd->port = PORT_TP;
9914         } else {
9915                 cmd->supported |= SUPPORTED_FIBRE;
9916                 cmd->port = PORT_FIBRE;
9917         }
9918
9919         cmd->advertising = tp->link_config.advertising;
9920         if (netif_running(dev)) {
9921                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9922                 cmd->duplex = tp->link_config.active_duplex;
9923         } else {
9924                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9925                 cmd->duplex = DUPLEX_INVALID;
9926         }
9927         cmd->phy_address = tp->phy_addr;
9928         cmd->transceiver = XCVR_INTERNAL;
9929         cmd->autoneg = tp->link_config.autoneg;
9930         cmd->maxtxpkt = 0;
9931         cmd->maxrxpkt = 0;
9932         return 0;
9933 }
9934
9935 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9936 {
9937         struct tg3 *tp = netdev_priv(dev);
9938         u32 speed = ethtool_cmd_speed(cmd);
9939
9940         if (tg3_flag(tp, USE_PHYLIB)) {
9941                 struct phy_device *phydev;
9942                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9943                         return -EAGAIN;
9944                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9945                 return phy_ethtool_sset(phydev, cmd);
9946         }
9947
9948         if (cmd->autoneg != AUTONEG_ENABLE &&
9949             cmd->autoneg != AUTONEG_DISABLE)
9950                 return -EINVAL;
9951
9952         if (cmd->autoneg == AUTONEG_DISABLE &&
9953             cmd->duplex != DUPLEX_FULL &&
9954             cmd->duplex != DUPLEX_HALF)
9955                 return -EINVAL;
9956
9957         if (cmd->autoneg == AUTONEG_ENABLE) {
9958                 u32 mask = ADVERTISED_Autoneg |
9959                            ADVERTISED_Pause |
9960                            ADVERTISED_Asym_Pause;
9961
9962                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9963                         mask |= ADVERTISED_1000baseT_Half |
9964                                 ADVERTISED_1000baseT_Full;
9965
9966                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9967                         mask |= ADVERTISED_100baseT_Half |
9968                                 ADVERTISED_100baseT_Full |
9969                                 ADVERTISED_10baseT_Half |
9970                                 ADVERTISED_10baseT_Full |
9971                                 ADVERTISED_TP;
9972                 else
9973                         mask |= ADVERTISED_FIBRE;
9974
9975                 if (cmd->advertising & ~mask)
9976                         return -EINVAL;
9977
9978                 mask &= (ADVERTISED_1000baseT_Half |
9979                          ADVERTISED_1000baseT_Full |
9980                          ADVERTISED_100baseT_Half |
9981                          ADVERTISED_100baseT_Full |
9982                          ADVERTISED_10baseT_Half |
9983                          ADVERTISED_10baseT_Full);
9984
9985                 cmd->advertising &= mask;
9986         } else {
9987                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9988                         if (speed != SPEED_1000)
9989                                 return -EINVAL;
9990
9991                         if (cmd->duplex != DUPLEX_FULL)
9992                                 return -EINVAL;
9993                 } else {
9994                         if (speed != SPEED_100 &&
9995                             speed != SPEED_10)
9996                                 return -EINVAL;
9997                 }
9998         }
9999
10000         tg3_full_lock(tp, 0);
10001
10002         tp->link_config.autoneg = cmd->autoneg;
10003         if (cmd->autoneg == AUTONEG_ENABLE) {
10004                 tp->link_config.advertising = (cmd->advertising |
10005                                               ADVERTISED_Autoneg);
10006                 tp->link_config.speed = SPEED_INVALID;
10007                 tp->link_config.duplex = DUPLEX_INVALID;
10008         } else {
10009                 tp->link_config.advertising = 0;
10010                 tp->link_config.speed = speed;
10011                 tp->link_config.duplex = cmd->duplex;
10012         }
10013
10014         tp->link_config.orig_speed = tp->link_config.speed;
10015         tp->link_config.orig_duplex = tp->link_config.duplex;
10016         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10017
10018         if (netif_running(dev))
10019                 tg3_setup_phy(tp, 1);
10020
10021         tg3_full_unlock(tp);
10022
10023         return 0;
10024 }
10025
10026 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10027 {
10028         struct tg3 *tp = netdev_priv(dev);
10029
10030         strcpy(info->driver, DRV_MODULE_NAME);
10031         strcpy(info->version, DRV_MODULE_VERSION);
10032         strcpy(info->fw_version, tp->fw_ver);
10033         strcpy(info->bus_info, pci_name(tp->pdev));
10034 }
10035
10036 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10037 {
10038         struct tg3 *tp = netdev_priv(dev);
10039
10040         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10041                 wol->supported = WAKE_MAGIC;
10042         else
10043                 wol->supported = 0;
10044         wol->wolopts = 0;
10045         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10046                 wol->wolopts = WAKE_MAGIC;
10047         memset(&wol->sopass, 0, sizeof(wol->sopass));
10048 }
10049
10050 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10051 {
10052         struct tg3 *tp = netdev_priv(dev);
10053         struct device *dp = &tp->pdev->dev;
10054
10055         if (wol->wolopts & ~WAKE_MAGIC)
10056                 return -EINVAL;
10057         if ((wol->wolopts & WAKE_MAGIC) &&
10058             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10059                 return -EINVAL;
10060
10061         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10062
10063         spin_lock_bh(&tp->lock);
10064         if (device_may_wakeup(dp))
10065                 tg3_flag_set(tp, WOL_ENABLE);
10066         else
10067                 tg3_flag_clear(tp, WOL_ENABLE);
10068         spin_unlock_bh(&tp->lock);
10069
10070         return 0;
10071 }
10072
10073 static u32 tg3_get_msglevel(struct net_device *dev)
10074 {
10075         struct tg3 *tp = netdev_priv(dev);
10076         return tp->msg_enable;
10077 }
10078
10079 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10080 {
10081         struct tg3 *tp = netdev_priv(dev);
10082         tp->msg_enable = value;
10083 }
10084
10085 static int tg3_nway_reset(struct net_device *dev)
10086 {
10087         struct tg3 *tp = netdev_priv(dev);
10088         int r;
10089
10090         if (!netif_running(dev))
10091                 return -EAGAIN;
10092
10093         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10094                 return -EINVAL;
10095
10096         if (tg3_flag(tp, USE_PHYLIB)) {
10097                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10098                         return -EAGAIN;
10099                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10100         } else {
10101                 u32 bmcr;
10102
10103                 spin_lock_bh(&tp->lock);
10104                 r = -EINVAL;
10105                 tg3_readphy(tp, MII_BMCR, &bmcr);
10106                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10107                     ((bmcr & BMCR_ANENABLE) ||
10108                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10109                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10110                                                    BMCR_ANENABLE);
10111                         r = 0;
10112                 }
10113                 spin_unlock_bh(&tp->lock);
10114         }
10115
10116         return r;
10117 }
10118
10119 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10120 {
10121         struct tg3 *tp = netdev_priv(dev);
10122
10123         ering->rx_max_pending = tp->rx_std_ring_mask;
10124         ering->rx_mini_max_pending = 0;
10125         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10126                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10127         else
10128                 ering->rx_jumbo_max_pending = 0;
10129
10130         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10131
10132         ering->rx_pending = tp->rx_pending;
10133         ering->rx_mini_pending = 0;
10134         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10135                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10136         else
10137                 ering->rx_jumbo_pending = 0;
10138
10139         ering->tx_pending = tp->napi[0].tx_pending;
10140 }
10141
10142 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10143 {
10144         struct tg3 *tp = netdev_priv(dev);
10145         int i, irq_sync = 0, err = 0;
10146
10147         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10148             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10149             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10150             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10151             (tg3_flag(tp, TSO_BUG) &&
10152              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10153                 return -EINVAL;
10154
10155         if (netif_running(dev)) {
10156                 tg3_phy_stop(tp);
10157                 tg3_netif_stop(tp);
10158                 irq_sync = 1;
10159         }
10160
10161         tg3_full_lock(tp, irq_sync);
10162
10163         tp->rx_pending = ering->rx_pending;
10164
10165         if (tg3_flag(tp, MAX_RXPEND_64) &&
10166             tp->rx_pending > 63)
10167                 tp->rx_pending = 63;
10168         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10169
10170         for (i = 0; i < tp->irq_max; i++)
10171                 tp->napi[i].tx_pending = ering->tx_pending;
10172
10173         if (netif_running(dev)) {
10174                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10175                 err = tg3_restart_hw(tp, 1);
10176                 if (!err)
10177                         tg3_netif_start(tp);
10178         }
10179
10180         tg3_full_unlock(tp);
10181
10182         if (irq_sync && !err)
10183                 tg3_phy_start(tp);
10184
10185         return err;
10186 }
10187
10188 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10189 {
10190         struct tg3 *tp = netdev_priv(dev);
10191
10192         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10193
10194         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10195                 epause->rx_pause = 1;
10196         else
10197                 epause->rx_pause = 0;
10198
10199         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10200                 epause->tx_pause = 1;
10201         else
10202                 epause->tx_pause = 0;
10203 }
10204
10205 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10206 {
10207         struct tg3 *tp = netdev_priv(dev);
10208         int err = 0;
10209
10210         if (tg3_flag(tp, USE_PHYLIB)) {
10211                 u32 newadv;
10212                 struct phy_device *phydev;
10213
10214                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10215
10216                 if (!(phydev->supported & SUPPORTED_Pause) ||
10217                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10218                      (epause->rx_pause != epause->tx_pause)))
10219                         return -EINVAL;
10220
10221                 tp->link_config.flowctrl = 0;
10222                 if (epause->rx_pause) {
10223                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10224
10225                         if (epause->tx_pause) {
10226                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10227                                 newadv = ADVERTISED_Pause;
10228                         } else
10229                                 newadv = ADVERTISED_Pause |
10230                                          ADVERTISED_Asym_Pause;
10231                 } else if (epause->tx_pause) {
10232                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10233                         newadv = ADVERTISED_Asym_Pause;
10234                 } else
10235                         newadv = 0;
10236
10237                 if (epause->autoneg)
10238                         tg3_flag_set(tp, PAUSE_AUTONEG);
10239                 else
10240                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10241
10242                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10243                         u32 oldadv = phydev->advertising &
10244                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10245                         if (oldadv != newadv) {
10246                                 phydev->advertising &=
10247                                         ~(ADVERTISED_Pause |
10248                                           ADVERTISED_Asym_Pause);
10249                                 phydev->advertising |= newadv;
10250                                 if (phydev->autoneg) {
10251                                         /*
10252                                          * Always renegotiate the link to
10253                                          * inform our link partner of our
10254                                          * flow control settings, even if the
10255                                          * flow control is forced.  Let
10256                                          * tg3_adjust_link() do the final
10257                                          * flow control setup.
10258                                          */
10259                                         return phy_start_aneg(phydev);
10260                                 }
10261                         }
10262
10263                         if (!epause->autoneg)
10264                                 tg3_setup_flow_control(tp, 0, 0);
10265                 } else {
10266                         tp->link_config.orig_advertising &=
10267                                         ~(ADVERTISED_Pause |
10268                                           ADVERTISED_Asym_Pause);
10269                         tp->link_config.orig_advertising |= newadv;
10270                 }
10271         } else {
10272                 int irq_sync = 0;
10273
10274                 if (netif_running(dev)) {
10275                         tg3_netif_stop(tp);
10276                         irq_sync = 1;
10277                 }
10278
10279                 tg3_full_lock(tp, irq_sync);
10280
10281                 if (epause->autoneg)
10282                         tg3_flag_set(tp, PAUSE_AUTONEG);
10283                 else
10284                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10285                 if (epause->rx_pause)
10286                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10287                 else
10288                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10289                 if (epause->tx_pause)
10290                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10291                 else
10292                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10293
10294                 if (netif_running(dev)) {
10295                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10296                         err = tg3_restart_hw(tp, 1);
10297                         if (!err)
10298                                 tg3_netif_start(tp);
10299                 }
10300
10301                 tg3_full_unlock(tp);
10302         }
10303
10304         return err;
10305 }
10306
10307 static int tg3_get_sset_count(struct net_device *dev, int sset)
10308 {
10309         switch (sset) {
10310         case ETH_SS_TEST:
10311                 return TG3_NUM_TEST;
10312         case ETH_SS_STATS:
10313                 return TG3_NUM_STATS;
10314         default:
10315                 return -EOPNOTSUPP;
10316         }
10317 }
10318
10319 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10320 {
10321         switch (stringset) {
10322         case ETH_SS_STATS:
10323                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10324                 break;
10325         case ETH_SS_TEST:
10326                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10327                 break;
10328         default:
10329                 WARN_ON(1);     /* we need a WARN() */
10330                 break;
10331         }
10332 }
10333
10334 static int tg3_set_phys_id(struct net_device *dev,
10335                             enum ethtool_phys_id_state state)
10336 {
10337         struct tg3 *tp = netdev_priv(dev);
10338
10339         if (!netif_running(tp->dev))
10340                 return -EAGAIN;
10341
10342         switch (state) {
10343         case ETHTOOL_ID_ACTIVE:
10344                 return 1;       /* cycle on/off once per second */
10345
10346         case ETHTOOL_ID_ON:
10347                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10348                      LED_CTRL_1000MBPS_ON |
10349                      LED_CTRL_100MBPS_ON |
10350                      LED_CTRL_10MBPS_ON |
10351                      LED_CTRL_TRAFFIC_OVERRIDE |
10352                      LED_CTRL_TRAFFIC_BLINK |
10353                      LED_CTRL_TRAFFIC_LED);
10354                 break;
10355
10356         case ETHTOOL_ID_OFF:
10357                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10358                      LED_CTRL_TRAFFIC_OVERRIDE);
10359                 break;
10360
10361         case ETHTOOL_ID_INACTIVE:
10362                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10363                 break;
10364         }
10365
10366         return 0;
10367 }
10368
10369 static void tg3_get_ethtool_stats(struct net_device *dev,
10370                                    struct ethtool_stats *estats, u64 *tmp_stats)
10371 {
10372         struct tg3 *tp = netdev_priv(dev);
10373         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10374 }
10375
10376 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10377 {
10378         int i;
10379         __be32 *buf;
10380         u32 offset = 0, len = 0;
10381         u32 magic, val;
10382
10383         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10384                 return NULL;
10385
10386         if (magic == TG3_EEPROM_MAGIC) {
10387                 for (offset = TG3_NVM_DIR_START;
10388                      offset < TG3_NVM_DIR_END;
10389                      offset += TG3_NVM_DIRENT_SIZE) {
10390                         if (tg3_nvram_read(tp, offset, &val))
10391                                 return NULL;
10392
10393                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10394                             TG3_NVM_DIRTYPE_EXTVPD)
10395                                 break;
10396                 }
10397
10398                 if (offset != TG3_NVM_DIR_END) {
10399                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10400                         if (tg3_nvram_read(tp, offset + 4, &offset))
10401                                 return NULL;
10402
10403                         offset = tg3_nvram_logical_addr(tp, offset);
10404                 }
10405         }
10406
10407         if (!offset || !len) {
10408                 offset = TG3_NVM_VPD_OFF;
10409                 len = TG3_NVM_VPD_LEN;
10410         }
10411
10412         buf = kmalloc(len, GFP_KERNEL);
10413         if (buf == NULL)
10414                 return NULL;
10415
10416         if (magic == TG3_EEPROM_MAGIC) {
10417                 for (i = 0; i < len; i += 4) {
10418                         /* The data is in little-endian format in NVRAM.
10419                          * Use the big-endian read routines to preserve
10420                          * the byte order as it exists in NVRAM.
10421                          */
10422                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10423                                 goto error;
10424                 }
10425         } else {
10426                 u8 *ptr;
10427                 ssize_t cnt;
10428                 unsigned int pos = 0;
10429
10430                 ptr = (u8 *)&buf[0];
10431                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10432                         cnt = pci_read_vpd(tp->pdev, pos,
10433                                            len - pos, ptr);
10434                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10435                                 cnt = 0;
10436                         else if (cnt < 0)
10437                                 goto error;
10438                 }
10439                 if (pos != len)
10440                         goto error;
10441         }
10442
10443         return buf;
10444
10445 error:
10446         kfree(buf);
10447         return NULL;
10448 }
10449
10450 #define NVRAM_TEST_SIZE 0x100
10451 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10452 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10453 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10454 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10455 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10456
10457 static int tg3_test_nvram(struct tg3 *tp)
10458 {
10459         u32 csum, magic;
10460         __be32 *buf;
10461         int i, j, k, err = 0, size;
10462
10463         if (tg3_flag(tp, NO_NVRAM))
10464                 return 0;
10465
10466         if (tg3_nvram_read(tp, 0, &magic) != 0)
10467                 return -EIO;
10468
10469         if (magic == TG3_EEPROM_MAGIC)
10470                 size = NVRAM_TEST_SIZE;
10471         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10472                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10473                     TG3_EEPROM_SB_FORMAT_1) {
10474                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10475                         case TG3_EEPROM_SB_REVISION_0:
10476                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10477                                 break;
10478                         case TG3_EEPROM_SB_REVISION_2:
10479                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10480                                 break;
10481                         case TG3_EEPROM_SB_REVISION_3:
10482                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10483                                 break;
10484                         default:
10485                                 return 0;
10486                         }
10487                 } else
10488                         return 0;
10489         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10490                 size = NVRAM_SELFBOOT_HW_SIZE;
10491         else
10492                 return -EIO;
10493
10494         buf = kmalloc(size, GFP_KERNEL);
10495         if (buf == NULL)
10496                 return -ENOMEM;
10497
10498         err = -EIO;
10499         for (i = 0, j = 0; i < size; i += 4, j++) {
10500                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10501                 if (err)
10502                         break;
10503         }
10504         if (i < size)
10505                 goto out;
10506
10507         /* Selfboot format */
10508         magic = be32_to_cpu(buf[0]);
10509         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10510             TG3_EEPROM_MAGIC_FW) {
10511                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10512
10513                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10514                     TG3_EEPROM_SB_REVISION_2) {
10515                         /* For rev 2, the csum doesn't include the MBA. */
10516                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10517                                 csum8 += buf8[i];
10518                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10519                                 csum8 += buf8[i];
10520                 } else {
10521                         for (i = 0; i < size; i++)
10522                                 csum8 += buf8[i];
10523                 }
10524
10525                 if (csum8 == 0) {
10526                         err = 0;
10527                         goto out;
10528                 }
10529
10530                 err = -EIO;
10531                 goto out;
10532         }
10533
10534         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10535             TG3_EEPROM_MAGIC_HW) {
10536                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10537                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10538                 u8 *buf8 = (u8 *) buf;
10539
10540                 /* Separate the parity bits and the data bytes.  */
10541                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10542                         if ((i == 0) || (i == 8)) {
10543                                 int l;
10544                                 u8 msk;
10545
10546                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10547                                         parity[k++] = buf8[i] & msk;
10548                                 i++;
10549                         } else if (i == 16) {
10550                                 int l;
10551                                 u8 msk;
10552
10553                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10554                                         parity[k++] = buf8[i] & msk;
10555                                 i++;
10556
10557                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10558                                         parity[k++] = buf8[i] & msk;
10559                                 i++;
10560                         }
10561                         data[j++] = buf8[i];
10562                 }
10563
10564                 err = -EIO;
10565                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10566                         u8 hw8 = hweight8(data[i]);
10567
10568                         if ((hw8 & 0x1) && parity[i])
10569                                 goto out;
10570                         else if (!(hw8 & 0x1) && !parity[i])
10571                                 goto out;
10572                 }
10573                 err = 0;
10574                 goto out;
10575         }
10576
10577         err = -EIO;
10578
10579         /* Bootstrap checksum at offset 0x10 */
10580         csum = calc_crc((unsigned char *) buf, 0x10);
10581         if (csum != le32_to_cpu(buf[0x10/4]))
10582                 goto out;
10583
10584         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10585         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10586         if (csum != le32_to_cpu(buf[0xfc/4]))
10587                 goto out;
10588
10589         kfree(buf);
10590
10591         buf = tg3_vpd_readblock(tp);
10592         if (!buf)
10593                 return -ENOMEM;
10594
10595         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10596                              PCI_VPD_LRDT_RO_DATA);
10597         if (i > 0) {
10598                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10599                 if (j < 0)
10600                         goto out;
10601
10602                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10603                         goto out;
10604
10605                 i += PCI_VPD_LRDT_TAG_SIZE;
10606                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10607                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10608                 if (j > 0) {
10609                         u8 csum8 = 0;
10610
10611                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10612
10613                         for (i = 0; i <= j; i++)
10614                                 csum8 += ((u8 *)buf)[i];
10615
10616                         if (csum8)
10617                                 goto out;
10618                 }
10619         }
10620
10621         err = 0;
10622
10623 out:
10624         kfree(buf);
10625         return err;
10626 }
10627
10628 #define TG3_SERDES_TIMEOUT_SEC  2
10629 #define TG3_COPPER_TIMEOUT_SEC  6
10630
10631 static int tg3_test_link(struct tg3 *tp)
10632 {
10633         int i, max;
10634
10635         if (!netif_running(tp->dev))
10636                 return -ENODEV;
10637
10638         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10639                 max = TG3_SERDES_TIMEOUT_SEC;
10640         else
10641                 max = TG3_COPPER_TIMEOUT_SEC;
10642
10643         for (i = 0; i < max; i++) {
10644                 if (netif_carrier_ok(tp->dev))
10645                         return 0;
10646
10647                 if (msleep_interruptible(1000))
10648                         break;
10649         }
10650
10651         return -EIO;
10652 }
10653
10654 /* Only test the commonly used registers */
10655 static int tg3_test_registers(struct tg3 *tp)
10656 {
10657         int i, is_5705, is_5750;
10658         u32 offset, read_mask, write_mask, val, save_val, read_val;
10659         static struct {
10660                 u16 offset;
10661                 u16 flags;
10662 #define TG3_FL_5705     0x1
10663 #define TG3_FL_NOT_5705 0x2
10664 #define TG3_FL_NOT_5788 0x4
10665 #define TG3_FL_NOT_5750 0x8
10666                 u32 read_mask;
10667                 u32 write_mask;
10668         } reg_tbl[] = {
10669                 /* MAC Control Registers */
10670                 { MAC_MODE, TG3_FL_NOT_5705,
10671                         0x00000000, 0x00ef6f8c },
10672                 { MAC_MODE, TG3_FL_5705,
10673                         0x00000000, 0x01ef6b8c },
10674                 { MAC_STATUS, TG3_FL_NOT_5705,
10675                         0x03800107, 0x00000000 },
10676                 { MAC_STATUS, TG3_FL_5705,
10677                         0x03800100, 0x00000000 },
10678                 { MAC_ADDR_0_HIGH, 0x0000,
10679                         0x00000000, 0x0000ffff },
10680                 { MAC_ADDR_0_LOW, 0x0000,
10681                         0x00000000, 0xffffffff },
10682                 { MAC_RX_MTU_SIZE, 0x0000,
10683                         0x00000000, 0x0000ffff },
10684                 { MAC_TX_MODE, 0x0000,
10685                         0x00000000, 0x00000070 },
10686                 { MAC_TX_LENGTHS, 0x0000,
10687                         0x00000000, 0x00003fff },
10688                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10689                         0x00000000, 0x000007fc },
10690                 { MAC_RX_MODE, TG3_FL_5705,
10691                         0x00000000, 0x000007dc },
10692                 { MAC_HASH_REG_0, 0x0000,
10693                         0x00000000, 0xffffffff },
10694                 { MAC_HASH_REG_1, 0x0000,
10695                         0x00000000, 0xffffffff },
10696                 { MAC_HASH_REG_2, 0x0000,
10697                         0x00000000, 0xffffffff },
10698                 { MAC_HASH_REG_3, 0x0000,
10699                         0x00000000, 0xffffffff },
10700
10701                 /* Receive Data and Receive BD Initiator Control Registers. */
10702                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10703                         0x00000000, 0xffffffff },
10704                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10705                         0x00000000, 0xffffffff },
10706                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10707                         0x00000000, 0x00000003 },
10708                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10709                         0x00000000, 0xffffffff },
10710                 { RCVDBDI_STD_BD+0, 0x0000,
10711                         0x00000000, 0xffffffff },
10712                 { RCVDBDI_STD_BD+4, 0x0000,
10713                         0x00000000, 0xffffffff },
10714                 { RCVDBDI_STD_BD+8, 0x0000,
10715                         0x00000000, 0xffff0002 },
10716                 { RCVDBDI_STD_BD+0xc, 0x0000,
10717                         0x00000000, 0xffffffff },
10718
10719                 /* Receive BD Initiator Control Registers. */
10720                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10721                         0x00000000, 0xffffffff },
10722                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10723                         0x00000000, 0x000003ff },
10724                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10725                         0x00000000, 0xffffffff },
10726
10727                 /* Host Coalescing Control Registers. */
10728                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10729                         0x00000000, 0x00000004 },
10730                 { HOSTCC_MODE, TG3_FL_5705,
10731                         0x00000000, 0x000000f6 },
10732                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10733                         0x00000000, 0xffffffff },
10734                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10735                         0x00000000, 0x000003ff },
10736                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10737                         0x00000000, 0xffffffff },
10738                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10739                         0x00000000, 0x000003ff },
10740                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10741                         0x00000000, 0xffffffff },
10742                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10743                         0x00000000, 0x000000ff },
10744                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10745                         0x00000000, 0xffffffff },
10746                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10747                         0x00000000, 0x000000ff },
10748                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10749                         0x00000000, 0xffffffff },
10750                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10751                         0x00000000, 0xffffffff },
10752                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10753                         0x00000000, 0xffffffff },
10754                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10755                         0x00000000, 0x000000ff },
10756                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10757                         0x00000000, 0xffffffff },
10758                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10759                         0x00000000, 0x000000ff },
10760                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10761                         0x00000000, 0xffffffff },
10762                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10763                         0x00000000, 0xffffffff },
10764                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10765                         0x00000000, 0xffffffff },
10766                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10767                         0x00000000, 0xffffffff },
10768                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10769                         0x00000000, 0xffffffff },
10770                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10771                         0xffffffff, 0x00000000 },
10772                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10773                         0xffffffff, 0x00000000 },
10774
10775                 /* Buffer Manager Control Registers. */
10776                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10777                         0x00000000, 0x007fff80 },
10778                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10779                         0x00000000, 0x007fffff },
10780                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10781                         0x00000000, 0x0000003f },
10782                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10783                         0x00000000, 0x000001ff },
10784                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10785                         0x00000000, 0x000001ff },
10786                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10787                         0xffffffff, 0x00000000 },
10788                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10789                         0xffffffff, 0x00000000 },
10790
10791                 /* Mailbox Registers */
10792                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10793                         0x00000000, 0x000001ff },
10794                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10795                         0x00000000, 0x000001ff },
10796                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10797                         0x00000000, 0x000007ff },
10798                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10799                         0x00000000, 0x000001ff },
10800
10801                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10802         };
10803
10804         is_5705 = is_5750 = 0;
10805         if (tg3_flag(tp, 5705_PLUS)) {
10806                 is_5705 = 1;
10807                 if (tg3_flag(tp, 5750_PLUS))
10808                         is_5750 = 1;
10809         }
10810
10811         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10812                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10813                         continue;
10814
10815                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10816                         continue;
10817
10818                 if (tg3_flag(tp, IS_5788) &&
10819                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
10820                         continue;
10821
10822                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10823                         continue;
10824
10825                 offset = (u32) reg_tbl[i].offset;
10826                 read_mask = reg_tbl[i].read_mask;
10827                 write_mask = reg_tbl[i].write_mask;
10828
10829                 /* Save the original register content */
10830                 save_val = tr32(offset);
10831
10832                 /* Determine the read-only value. */
10833                 read_val = save_val & read_mask;
10834
10835                 /* Write zero to the register, then make sure the read-only bits
10836                  * are not changed and the read/write bits are all zeros.
10837                  */
10838                 tw32(offset, 0);
10839
10840                 val = tr32(offset);
10841
10842                 /* Test the read-only and read/write bits. */
10843                 if (((val & read_mask) != read_val) || (val & write_mask))
10844                         goto out;
10845
10846                 /* Write ones to all the bits defined by RdMask and WrMask, then
10847                  * make sure the read-only bits are not changed and the
10848                  * read/write bits are all ones.
10849                  */
10850                 tw32(offset, read_mask | write_mask);
10851
10852                 val = tr32(offset);
10853
10854                 /* Test the read-only bits. */
10855                 if ((val & read_mask) != read_val)
10856                         goto out;
10857
10858                 /* Test the read/write bits. */
10859                 if ((val & write_mask) != write_mask)
10860                         goto out;
10861
10862                 tw32(offset, save_val);
10863         }
10864
10865         return 0;
10866
10867 out:
10868         if (netif_msg_hw(tp))
10869                 netdev_err(tp->dev,
10870                            "Register test failed at offset %x\n", offset);
10871         tw32(offset, save_val);
10872         return -EIO;
10873 }
10874
10875 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10876 {
10877         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10878         int i;
10879         u32 j;
10880
10881         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10882                 for (j = 0; j < len; j += 4) {
10883                         u32 val;
10884
10885                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10886                         tg3_read_mem(tp, offset + j, &val);
10887                         if (val != test_pattern[i])
10888                                 return -EIO;
10889                 }
10890         }
10891         return 0;
10892 }
10893
10894 static int tg3_test_memory(struct tg3 *tp)
10895 {
10896         static struct mem_entry {
10897                 u32 offset;
10898                 u32 len;
10899         } mem_tbl_570x[] = {
10900                 { 0x00000000, 0x00b50},
10901                 { 0x00002000, 0x1c000},
10902                 { 0xffffffff, 0x00000}
10903         }, mem_tbl_5705[] = {
10904                 { 0x00000100, 0x0000c},
10905                 { 0x00000200, 0x00008},
10906                 { 0x00004000, 0x00800},
10907                 { 0x00006000, 0x01000},
10908                 { 0x00008000, 0x02000},
10909                 { 0x00010000, 0x0e000},
10910                 { 0xffffffff, 0x00000}
10911         }, mem_tbl_5755[] = {
10912                 { 0x00000200, 0x00008},
10913                 { 0x00004000, 0x00800},
10914                 { 0x00006000, 0x00800},
10915                 { 0x00008000, 0x02000},
10916                 { 0x00010000, 0x0c000},
10917                 { 0xffffffff, 0x00000}
10918         }, mem_tbl_5906[] = {
10919                 { 0x00000200, 0x00008},
10920                 { 0x00004000, 0x00400},
10921                 { 0x00006000, 0x00400},
10922                 { 0x00008000, 0x01000},
10923                 { 0x00010000, 0x01000},
10924                 { 0xffffffff, 0x00000}
10925         }, mem_tbl_5717[] = {
10926                 { 0x00000200, 0x00008},
10927                 { 0x00010000, 0x0a000},
10928                 { 0x00020000, 0x13c00},
10929                 { 0xffffffff, 0x00000}
10930         }, mem_tbl_57765[] = {
10931                 { 0x00000200, 0x00008},
10932                 { 0x00004000, 0x00800},
10933                 { 0x00006000, 0x09800},
10934                 { 0x00010000, 0x0a000},
10935                 { 0xffffffff, 0x00000}
10936         };
10937         struct mem_entry *mem_tbl;
10938         int err = 0;
10939         int i;
10940
10941         if (tg3_flag(tp, 5717_PLUS))
10942                 mem_tbl = mem_tbl_5717;
10943         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10944                 mem_tbl = mem_tbl_57765;
10945         else if (tg3_flag(tp, 5755_PLUS))
10946                 mem_tbl = mem_tbl_5755;
10947         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10948                 mem_tbl = mem_tbl_5906;
10949         else if (tg3_flag(tp, 5705_PLUS))
10950                 mem_tbl = mem_tbl_5705;
10951         else
10952                 mem_tbl = mem_tbl_570x;
10953
10954         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10955                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10956                 if (err)
10957                         break;
10958         }
10959
10960         return err;
10961 }
10962
10963 #define TG3_MAC_LOOPBACK        0
10964 #define TG3_PHY_LOOPBACK        1
10965 #define TG3_TSO_LOOPBACK        2
10966
10967 #define TG3_TSO_MSS             500
10968
10969 #define TG3_TSO_IP_HDR_LEN      20
10970 #define TG3_TSO_TCP_HDR_LEN     20
10971 #define TG3_TSO_TCP_OPT_LEN     12
10972
10973 static const u8 tg3_tso_header[] = {
10974 0x08, 0x00,
10975 0x45, 0x00, 0x00, 0x00,
10976 0x00, 0x00, 0x40, 0x00,
10977 0x40, 0x06, 0x00, 0x00,
10978 0x0a, 0x00, 0x00, 0x01,
10979 0x0a, 0x00, 0x00, 0x02,
10980 0x0d, 0x00, 0xe0, 0x00,
10981 0x00, 0x00, 0x01, 0x00,
10982 0x00, 0x00, 0x02, 0x00,
10983 0x80, 0x10, 0x10, 0x00,
10984 0x14, 0x09, 0x00, 0x00,
10985 0x01, 0x01, 0x08, 0x0a,
10986 0x11, 0x11, 0x11, 0x11,
10987 0x11, 0x11, 0x11, 0x11,
10988 };
10989
10990 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10991 {
10992         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10993         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10994         struct sk_buff *skb, *rx_skb;
10995         u8 *tx_data;
10996         dma_addr_t map;
10997         int num_pkts, tx_len, rx_len, i, err;
10998         struct tg3_rx_buffer_desc *desc;
10999         struct tg3_napi *tnapi, *rnapi;
11000         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11001
11002         tnapi = &tp->napi[0];
11003         rnapi = &tp->napi[0];
11004         if (tp->irq_cnt > 1) {
11005                 if (tg3_flag(tp, ENABLE_RSS))
11006                         rnapi = &tp->napi[1];
11007                 if (tg3_flag(tp, ENABLE_TSS))
11008                         tnapi = &tp->napi[1];
11009         }
11010         coal_now = tnapi->coal_now | rnapi->coal_now;
11011
11012         if (loopback_mode == TG3_MAC_LOOPBACK) {
11013                 /* HW errata - mac loopback fails in some cases on 5780.
11014                  * Normal traffic and PHY loopback are not affected by
11015                  * errata.  Also, the MAC loopback test is deprecated for
11016                  * all newer ASIC revisions.
11017                  */
11018                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11019                     tg3_flag(tp, CPMU_PRESENT))
11020                         return 0;
11021
11022                 mac_mode = tp->mac_mode &
11023                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11024                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11025                 if (!tg3_flag(tp, 5705_PLUS))
11026                         mac_mode |= MAC_MODE_LINK_POLARITY;
11027                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11028                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11029                 else
11030                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11031                 tw32(MAC_MODE, mac_mode);
11032         } else {
11033                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11034                         tg3_phy_fet_toggle_apd(tp, false);
11035                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11036                 } else
11037                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11038
11039                 tg3_phy_toggle_automdix(tp, 0);
11040
11041                 tg3_writephy(tp, MII_BMCR, val);
11042                 udelay(40);
11043
11044                 mac_mode = tp->mac_mode &
11045                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11046                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11047                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11048                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11049                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11050                         /* The write needs to be flushed for the AC131 */
11051                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11052                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11053                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11054                 } else
11055                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11056
11057                 /* reset to prevent losing 1st rx packet intermittently */
11058                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11059                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11060                         udelay(10);
11061                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11062                 }
11063                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11064                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11065                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11066                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11067                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11068                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11069                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11070                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11071                 }
11072                 tw32(MAC_MODE, mac_mode);
11073
11074                 /* Wait for link */
11075                 for (i = 0; i < 100; i++) {
11076                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11077                                 break;
11078                         mdelay(1);
11079                 }
11080         }
11081
11082         err = -EIO;
11083
11084         tx_len = pktsz;
11085         skb = netdev_alloc_skb(tp->dev, tx_len);
11086         if (!skb)
11087                 return -ENOMEM;
11088
11089         tx_data = skb_put(skb, tx_len);
11090         memcpy(tx_data, tp->dev->dev_addr, 6);
11091         memset(tx_data + 6, 0x0, 8);
11092
11093         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11094
11095         if (loopback_mode == TG3_TSO_LOOPBACK) {
11096                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11097
11098                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11099                               TG3_TSO_TCP_OPT_LEN;
11100
11101                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11102                        sizeof(tg3_tso_header));
11103                 mss = TG3_TSO_MSS;
11104
11105                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11106                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11107
11108                 /* Set the total length field in the IP header */
11109                 iph->tot_len = htons((u16)(mss + hdr_len));
11110
11111                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11112                               TXD_FLAG_CPU_POST_DMA);
11113
11114                 if (tg3_flag(tp, HW_TSO_1) ||
11115                     tg3_flag(tp, HW_TSO_2) ||
11116                     tg3_flag(tp, HW_TSO_3)) {
11117                         struct tcphdr *th;
11118                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11119                         th = (struct tcphdr *)&tx_data[val];
11120                         th->check = 0;
11121                 } else
11122                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11123
11124                 if (tg3_flag(tp, HW_TSO_3)) {
11125                         mss |= (hdr_len & 0xc) << 12;
11126                         if (hdr_len & 0x10)
11127                                 base_flags |= 0x00000010;
11128                         base_flags |= (hdr_len & 0x3e0) << 5;
11129                 } else if (tg3_flag(tp, HW_TSO_2))
11130                         mss |= hdr_len << 9;
11131                 else if (tg3_flag(tp, HW_TSO_1) ||
11132                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11133                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11134                 } else {
11135                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11136                 }
11137
11138                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11139         } else {
11140                 num_pkts = 1;
11141                 data_off = ETH_HLEN;
11142         }
11143
11144         for (i = data_off; i < tx_len; i++)
11145                 tx_data[i] = (u8) (i & 0xff);
11146
11147         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11148         if (pci_dma_mapping_error(tp->pdev, map)) {
11149                 dev_kfree_skb(skb);
11150                 return -EIO;
11151         }
11152
11153         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11154                rnapi->coal_now);
11155
11156         udelay(10);
11157
11158         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11159
11160         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11161                     base_flags, (mss << 1) | 1);
11162
11163         tnapi->tx_prod++;
11164
11165         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11166         tr32_mailbox(tnapi->prodmbox);
11167
11168         udelay(10);
11169
11170         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11171         for (i = 0; i < 35; i++) {
11172                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11173                        coal_now);
11174
11175                 udelay(10);
11176
11177                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11178                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11179                 if ((tx_idx == tnapi->tx_prod) &&
11180                     (rx_idx == (rx_start_idx + num_pkts)))
11181                         break;
11182         }
11183
11184         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11185         dev_kfree_skb(skb);
11186
11187         if (tx_idx != tnapi->tx_prod)
11188                 goto out;
11189
11190         if (rx_idx != rx_start_idx + num_pkts)
11191                 goto out;
11192
11193         val = data_off;
11194         while (rx_idx != rx_start_idx) {
11195                 desc = &rnapi->rx_rcb[rx_start_idx++];
11196                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11197                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11198
11199                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11200                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11201                         goto out;
11202
11203                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11204                          - ETH_FCS_LEN;
11205
11206                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11207                         if (rx_len != tx_len)
11208                                 goto out;
11209
11210                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11211                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11212                                         goto out;
11213                         } else {
11214                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11215                                         goto out;
11216                         }
11217                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11218                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11219                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11220                         goto out;
11221                 }
11222
11223                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11224                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11225                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11226                                              mapping);
11227                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11228                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11229                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11230                                              mapping);
11231                 } else
11232                         goto out;
11233
11234                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11235                                             PCI_DMA_FROMDEVICE);
11236
11237                 for (i = data_off; i < rx_len; i++, val++) {
11238                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11239                                 goto out;
11240                 }
11241         }
11242
11243         err = 0;
11244
11245         /* tg3_free_rings will unmap and free the rx_skb */
11246 out:
11247         return err;
11248 }
11249
11250 #define TG3_STD_LOOPBACK_FAILED         1
11251 #define TG3_JMB_LOOPBACK_FAILED         2
11252 #define TG3_TSO_LOOPBACK_FAILED         4
11253
11254 #define TG3_MAC_LOOPBACK_SHIFT          0
11255 #define TG3_PHY_LOOPBACK_SHIFT          4
11256 #define TG3_LOOPBACK_FAILED             0x00000077
11257
11258 static int tg3_test_loopback(struct tg3 *tp)
11259 {
11260         int err = 0;
11261         u32 eee_cap, cpmuctrl = 0;
11262
11263         if (!netif_running(tp->dev))
11264                 return TG3_LOOPBACK_FAILED;
11265
11266         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11267         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11268
11269         err = tg3_reset_hw(tp, 1);
11270         if (err) {
11271                 err = TG3_LOOPBACK_FAILED;
11272                 goto done;
11273         }
11274
11275         if (tg3_flag(tp, ENABLE_RSS)) {
11276                 int i;
11277
11278                 /* Reroute all rx packets to the 1st queue */
11279                 for (i = MAC_RSS_INDIR_TBL_0;
11280                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11281                         tw32(i, 0x0);
11282         }
11283
11284         /* Turn off gphy autopowerdown. */
11285         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11286                 tg3_phy_toggle_apd(tp, false);
11287
11288         if (tg3_flag(tp, CPMU_PRESENT)) {
11289                 int i;
11290                 u32 status;
11291
11292                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11293
11294                 /* Wait for up to 40 microseconds to acquire lock. */
11295                 for (i = 0; i < 4; i++) {
11296                         status = tr32(TG3_CPMU_MUTEX_GNT);
11297                         if (status == CPMU_MUTEX_GNT_DRIVER)
11298                                 break;
11299                         udelay(10);
11300                 }
11301
11302                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11303                         err = TG3_LOOPBACK_FAILED;
11304                         goto done;
11305                 }
11306
11307                 /* Turn off link-based power management. */
11308                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11309                 tw32(TG3_CPMU_CTRL,
11310                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11311                                   CPMU_CTRL_LINK_AWARE_MODE));
11312         }
11313
11314         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11315                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11316
11317         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11318             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11319                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11320
11321         if (tg3_flag(tp, CPMU_PRESENT)) {
11322                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11323
11324                 /* Release the mutex */
11325                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11326         }
11327
11328         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11329             !tg3_flag(tp, USE_PHYLIB)) {
11330                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11331                         err |= TG3_STD_LOOPBACK_FAILED <<
11332                                TG3_PHY_LOOPBACK_SHIFT;
11333                 if (tg3_flag(tp, TSO_CAPABLE) &&
11334                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11335                         err |= TG3_TSO_LOOPBACK_FAILED <<
11336                                TG3_PHY_LOOPBACK_SHIFT;
11337                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11338                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11339                         err |= TG3_JMB_LOOPBACK_FAILED <<
11340                                TG3_PHY_LOOPBACK_SHIFT;
11341         }
11342
11343         /* Re-enable gphy autopowerdown. */
11344         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11345                 tg3_phy_toggle_apd(tp, true);
11346
11347 done:
11348         tp->phy_flags |= eee_cap;
11349
11350         return err;
11351 }
11352
11353 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11354                           u64 *data)
11355 {
11356         struct tg3 *tp = netdev_priv(dev);
11357
11358         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11359                 tg3_power_up(tp);
11360
11361         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11362
11363         if (tg3_test_nvram(tp) != 0) {
11364                 etest->flags |= ETH_TEST_FL_FAILED;
11365                 data[0] = 1;
11366         }
11367         if (tg3_test_link(tp) != 0) {
11368                 etest->flags |= ETH_TEST_FL_FAILED;
11369                 data[1] = 1;
11370         }
11371         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11372                 int err, err2 = 0, irq_sync = 0;
11373
11374                 if (netif_running(dev)) {
11375                         tg3_phy_stop(tp);
11376                         tg3_netif_stop(tp);
11377                         irq_sync = 1;
11378                 }
11379
11380                 tg3_full_lock(tp, irq_sync);
11381
11382                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11383                 err = tg3_nvram_lock(tp);
11384                 tg3_halt_cpu(tp, RX_CPU_BASE);
11385                 if (!tg3_flag(tp, 5705_PLUS))
11386                         tg3_halt_cpu(tp, TX_CPU_BASE);
11387                 if (!err)
11388                         tg3_nvram_unlock(tp);
11389
11390                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11391                         tg3_phy_reset(tp);
11392
11393                 if (tg3_test_registers(tp) != 0) {
11394                         etest->flags |= ETH_TEST_FL_FAILED;
11395                         data[2] = 1;
11396                 }
11397                 if (tg3_test_memory(tp) != 0) {
11398                         etest->flags |= ETH_TEST_FL_FAILED;
11399                         data[3] = 1;
11400                 }
11401                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11402                         etest->flags |= ETH_TEST_FL_FAILED;
11403
11404                 tg3_full_unlock(tp);
11405
11406                 if (tg3_test_interrupt(tp) != 0) {
11407                         etest->flags |= ETH_TEST_FL_FAILED;
11408                         data[5] = 1;
11409                 }
11410
11411                 tg3_full_lock(tp, 0);
11412
11413                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11414                 if (netif_running(dev)) {
11415                         tg3_flag_set(tp, INIT_COMPLETE);
11416                         err2 = tg3_restart_hw(tp, 1);
11417                         if (!err2)
11418                                 tg3_netif_start(tp);
11419                 }
11420
11421                 tg3_full_unlock(tp);
11422
11423                 if (irq_sync && !err2)
11424                         tg3_phy_start(tp);
11425         }
11426         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11427                 tg3_power_down(tp);
11428
11429 }
11430
11431 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11432 {
11433         struct mii_ioctl_data *data = if_mii(ifr);
11434         struct tg3 *tp = netdev_priv(dev);
11435         int err;
11436
11437         if (tg3_flag(tp, USE_PHYLIB)) {
11438                 struct phy_device *phydev;
11439                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11440                         return -EAGAIN;
11441                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11442                 return phy_mii_ioctl(phydev, ifr, cmd);
11443         }
11444
11445         switch (cmd) {
11446         case SIOCGMIIPHY:
11447                 data->phy_id = tp->phy_addr;
11448
11449                 /* fallthru */
11450         case SIOCGMIIREG: {
11451                 u32 mii_regval;
11452
11453                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11454                         break;                  /* We have no PHY */
11455
11456                 if (!netif_running(dev))
11457                         return -EAGAIN;
11458
11459                 spin_lock_bh(&tp->lock);
11460                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11461                 spin_unlock_bh(&tp->lock);
11462
11463                 data->val_out = mii_regval;
11464
11465                 return err;
11466         }
11467
11468         case SIOCSMIIREG:
11469                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11470                         break;                  /* We have no PHY */
11471
11472                 if (!netif_running(dev))
11473                         return -EAGAIN;
11474
11475                 spin_lock_bh(&tp->lock);
11476                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11477                 spin_unlock_bh(&tp->lock);
11478
11479                 return err;
11480
11481         default:
11482                 /* do nothing */
11483                 break;
11484         }
11485         return -EOPNOTSUPP;
11486 }
11487
11488 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11489 {
11490         struct tg3 *tp = netdev_priv(dev);
11491
11492         memcpy(ec, &tp->coal, sizeof(*ec));
11493         return 0;
11494 }
11495
11496 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11497 {
11498         struct tg3 *tp = netdev_priv(dev);
11499         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11500         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11501
11502         if (!tg3_flag(tp, 5705_PLUS)) {
11503                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11504                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11505                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11506                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11507         }
11508
11509         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11510             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11511             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11512             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11513             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11514             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11515             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11516             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11517             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11518             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11519                 return -EINVAL;
11520
11521         /* No rx interrupts will be generated if both are zero */
11522         if ((ec->rx_coalesce_usecs == 0) &&
11523             (ec->rx_max_coalesced_frames == 0))
11524                 return -EINVAL;
11525
11526         /* No tx interrupts will be generated if both are zero */
11527         if ((ec->tx_coalesce_usecs == 0) &&
11528             (ec->tx_max_coalesced_frames == 0))
11529                 return -EINVAL;
11530
11531         /* Only copy relevant parameters, ignore all others. */
11532         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11533         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11534         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11535         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11536         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11537         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11538         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11539         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11540         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11541
11542         if (netif_running(dev)) {
11543                 tg3_full_lock(tp, 0);
11544                 __tg3_set_coalesce(tp, &tp->coal);
11545                 tg3_full_unlock(tp);
11546         }
11547         return 0;
11548 }
11549
11550 static const struct ethtool_ops tg3_ethtool_ops = {
11551         .get_settings           = tg3_get_settings,
11552         .set_settings           = tg3_set_settings,
11553         .get_drvinfo            = tg3_get_drvinfo,
11554         .get_regs_len           = tg3_get_regs_len,
11555         .get_regs               = tg3_get_regs,
11556         .get_wol                = tg3_get_wol,
11557         .set_wol                = tg3_set_wol,
11558         .get_msglevel           = tg3_get_msglevel,
11559         .set_msglevel           = tg3_set_msglevel,
11560         .nway_reset             = tg3_nway_reset,
11561         .get_link               = ethtool_op_get_link,
11562         .get_eeprom_len         = tg3_get_eeprom_len,
11563         .get_eeprom             = tg3_get_eeprom,
11564         .set_eeprom             = tg3_set_eeprom,
11565         .get_ringparam          = tg3_get_ringparam,
11566         .set_ringparam          = tg3_set_ringparam,
11567         .get_pauseparam         = tg3_get_pauseparam,
11568         .set_pauseparam         = tg3_set_pauseparam,
11569         .self_test              = tg3_self_test,
11570         .get_strings            = tg3_get_strings,
11571         .set_phys_id            = tg3_set_phys_id,
11572         .get_ethtool_stats      = tg3_get_ethtool_stats,
11573         .get_coalesce           = tg3_get_coalesce,
11574         .set_coalesce           = tg3_set_coalesce,
11575         .get_sset_count         = tg3_get_sset_count,
11576 };
11577
11578 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11579 {
11580         u32 cursize, val, magic;
11581
11582         tp->nvram_size = EEPROM_CHIP_SIZE;
11583
11584         if (tg3_nvram_read(tp, 0, &magic) != 0)
11585                 return;
11586
11587         if ((magic != TG3_EEPROM_MAGIC) &&
11588             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11589             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11590                 return;
11591
11592         /*
11593          * Size the chip by reading offsets at increasing powers of two.
11594          * When we encounter our validation signature, we know the addressing
11595          * has wrapped around, and thus have our chip size.
11596          */
11597         cursize = 0x10;
11598
11599         while (cursize < tp->nvram_size) {
11600                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11601                         return;
11602
11603                 if (val == magic)
11604                         break;
11605
11606                 cursize <<= 1;
11607         }
11608
11609         tp->nvram_size = cursize;
11610 }
11611
11612 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11613 {
11614         u32 val;
11615
11616         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11617                 return;
11618
11619         /* Selfboot format */
11620         if (val != TG3_EEPROM_MAGIC) {
11621                 tg3_get_eeprom_size(tp);
11622                 return;
11623         }
11624
11625         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11626                 if (val != 0) {
11627                         /* This is confusing.  We want to operate on the
11628                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11629                          * call will read from NVRAM and byteswap the data
11630                          * according to the byteswapping settings for all
11631                          * other register accesses.  This ensures the data we
11632                          * want will always reside in the lower 16-bits.
11633                          * However, the data in NVRAM is in LE format, which
11634                          * means the data from the NVRAM read will always be
11635                          * opposite the endianness of the CPU.  The 16-bit
11636                          * byteswap then brings the data to CPU endianness.
11637                          */
11638                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11639                         return;
11640                 }
11641         }
11642         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11643 }
11644
11645 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11646 {
11647         u32 nvcfg1;
11648
11649         nvcfg1 = tr32(NVRAM_CFG1);
11650         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11651                 tg3_flag_set(tp, FLASH);
11652         } else {
11653                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11654                 tw32(NVRAM_CFG1, nvcfg1);
11655         }
11656
11657         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11658             tg3_flag(tp, 5780_CLASS)) {
11659                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11660                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11661                         tp->nvram_jedecnum = JEDEC_ATMEL;
11662                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11663                         tg3_flag_set(tp, NVRAM_BUFFERED);
11664                         break;
11665                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11666                         tp->nvram_jedecnum = JEDEC_ATMEL;
11667                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11668                         break;
11669                 case FLASH_VENDOR_ATMEL_EEPROM:
11670                         tp->nvram_jedecnum = JEDEC_ATMEL;
11671                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11672                         tg3_flag_set(tp, NVRAM_BUFFERED);
11673                         break;
11674                 case FLASH_VENDOR_ST:
11675                         tp->nvram_jedecnum = JEDEC_ST;
11676                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11677                         tg3_flag_set(tp, NVRAM_BUFFERED);
11678                         break;
11679                 case FLASH_VENDOR_SAIFUN:
11680                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11681                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11682                         break;
11683                 case FLASH_VENDOR_SST_SMALL:
11684                 case FLASH_VENDOR_SST_LARGE:
11685                         tp->nvram_jedecnum = JEDEC_SST;
11686                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11687                         break;
11688                 }
11689         } else {
11690                 tp->nvram_jedecnum = JEDEC_ATMEL;
11691                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11692                 tg3_flag_set(tp, NVRAM_BUFFERED);
11693         }
11694 }
11695
11696 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11697 {
11698         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11699         case FLASH_5752PAGE_SIZE_256:
11700                 tp->nvram_pagesize = 256;
11701                 break;
11702         case FLASH_5752PAGE_SIZE_512:
11703                 tp->nvram_pagesize = 512;
11704                 break;
11705         case FLASH_5752PAGE_SIZE_1K:
11706                 tp->nvram_pagesize = 1024;
11707                 break;
11708         case FLASH_5752PAGE_SIZE_2K:
11709                 tp->nvram_pagesize = 2048;
11710                 break;
11711         case FLASH_5752PAGE_SIZE_4K:
11712                 tp->nvram_pagesize = 4096;
11713                 break;
11714         case FLASH_5752PAGE_SIZE_264:
11715                 tp->nvram_pagesize = 264;
11716                 break;
11717         case FLASH_5752PAGE_SIZE_528:
11718                 tp->nvram_pagesize = 528;
11719                 break;
11720         }
11721 }
11722
11723 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11724 {
11725         u32 nvcfg1;
11726
11727         nvcfg1 = tr32(NVRAM_CFG1);
11728
11729         /* NVRAM protection for TPM */
11730         if (nvcfg1 & (1 << 27))
11731                 tg3_flag_set(tp, PROTECTED_NVRAM);
11732
11733         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11734         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11735         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11736                 tp->nvram_jedecnum = JEDEC_ATMEL;
11737                 tg3_flag_set(tp, NVRAM_BUFFERED);
11738                 break;
11739         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11740                 tp->nvram_jedecnum = JEDEC_ATMEL;
11741                 tg3_flag_set(tp, NVRAM_BUFFERED);
11742                 tg3_flag_set(tp, FLASH);
11743                 break;
11744         case FLASH_5752VENDOR_ST_M45PE10:
11745         case FLASH_5752VENDOR_ST_M45PE20:
11746         case FLASH_5752VENDOR_ST_M45PE40:
11747                 tp->nvram_jedecnum = JEDEC_ST;
11748                 tg3_flag_set(tp, NVRAM_BUFFERED);
11749                 tg3_flag_set(tp, FLASH);
11750                 break;
11751         }
11752
11753         if (tg3_flag(tp, FLASH)) {
11754                 tg3_nvram_get_pagesize(tp, nvcfg1);
11755         } else {
11756                 /* For eeprom, set pagesize to maximum eeprom size */
11757                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11758
11759                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11760                 tw32(NVRAM_CFG1, nvcfg1);
11761         }
11762 }
11763
11764 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11765 {
11766         u32 nvcfg1, protect = 0;
11767
11768         nvcfg1 = tr32(NVRAM_CFG1);
11769
11770         /* NVRAM protection for TPM */
11771         if (nvcfg1 & (1 << 27)) {
11772                 tg3_flag_set(tp, PROTECTED_NVRAM);
11773                 protect = 1;
11774         }
11775
11776         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11777         switch (nvcfg1) {
11778         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11779         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11780         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11781         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11782                 tp->nvram_jedecnum = JEDEC_ATMEL;
11783                 tg3_flag_set(tp, NVRAM_BUFFERED);
11784                 tg3_flag_set(tp, FLASH);
11785                 tp->nvram_pagesize = 264;
11786                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11787                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11788                         tp->nvram_size = (protect ? 0x3e200 :
11789                                           TG3_NVRAM_SIZE_512KB);
11790                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11791                         tp->nvram_size = (protect ? 0x1f200 :
11792                                           TG3_NVRAM_SIZE_256KB);
11793                 else
11794                         tp->nvram_size = (protect ? 0x1f200 :
11795                                           TG3_NVRAM_SIZE_128KB);
11796                 break;
11797         case FLASH_5752VENDOR_ST_M45PE10:
11798         case FLASH_5752VENDOR_ST_M45PE20:
11799         case FLASH_5752VENDOR_ST_M45PE40:
11800                 tp->nvram_jedecnum = JEDEC_ST;
11801                 tg3_flag_set(tp, NVRAM_BUFFERED);
11802                 tg3_flag_set(tp, FLASH);
11803                 tp->nvram_pagesize = 256;
11804                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11805                         tp->nvram_size = (protect ?
11806                                           TG3_NVRAM_SIZE_64KB :
11807                                           TG3_NVRAM_SIZE_128KB);
11808                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11809                         tp->nvram_size = (protect ?
11810                                           TG3_NVRAM_SIZE_64KB :
11811                                           TG3_NVRAM_SIZE_256KB);
11812                 else
11813                         tp->nvram_size = (protect ?
11814                                           TG3_NVRAM_SIZE_128KB :
11815                                           TG3_NVRAM_SIZE_512KB);
11816                 break;
11817         }
11818 }
11819
11820 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11821 {
11822         u32 nvcfg1;
11823
11824         nvcfg1 = tr32(NVRAM_CFG1);
11825
11826         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11827         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11828         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11829         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11830         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11831                 tp->nvram_jedecnum = JEDEC_ATMEL;
11832                 tg3_flag_set(tp, NVRAM_BUFFERED);
11833                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11834
11835                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11836                 tw32(NVRAM_CFG1, nvcfg1);
11837                 break;
11838         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11839         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11840         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11841         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11842                 tp->nvram_jedecnum = JEDEC_ATMEL;
11843                 tg3_flag_set(tp, NVRAM_BUFFERED);
11844                 tg3_flag_set(tp, FLASH);
11845                 tp->nvram_pagesize = 264;
11846                 break;
11847         case FLASH_5752VENDOR_ST_M45PE10:
11848         case FLASH_5752VENDOR_ST_M45PE20:
11849         case FLASH_5752VENDOR_ST_M45PE40:
11850                 tp->nvram_jedecnum = JEDEC_ST;
11851                 tg3_flag_set(tp, NVRAM_BUFFERED);
11852                 tg3_flag_set(tp, FLASH);
11853                 tp->nvram_pagesize = 256;
11854                 break;
11855         }
11856 }
11857
11858 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11859 {
11860         u32 nvcfg1, protect = 0;
11861
11862         nvcfg1 = tr32(NVRAM_CFG1);
11863
11864         /* NVRAM protection for TPM */
11865         if (nvcfg1 & (1 << 27)) {
11866                 tg3_flag_set(tp, PROTECTED_NVRAM);
11867                 protect = 1;
11868         }
11869
11870         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11871         switch (nvcfg1) {
11872         case FLASH_5761VENDOR_ATMEL_ADB021D:
11873         case FLASH_5761VENDOR_ATMEL_ADB041D:
11874         case FLASH_5761VENDOR_ATMEL_ADB081D:
11875         case FLASH_5761VENDOR_ATMEL_ADB161D:
11876         case FLASH_5761VENDOR_ATMEL_MDB021D:
11877         case FLASH_5761VENDOR_ATMEL_MDB041D:
11878         case FLASH_5761VENDOR_ATMEL_MDB081D:
11879         case FLASH_5761VENDOR_ATMEL_MDB161D:
11880                 tp->nvram_jedecnum = JEDEC_ATMEL;
11881                 tg3_flag_set(tp, NVRAM_BUFFERED);
11882                 tg3_flag_set(tp, FLASH);
11883                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11884                 tp->nvram_pagesize = 256;
11885                 break;
11886         case FLASH_5761VENDOR_ST_A_M45PE20:
11887         case FLASH_5761VENDOR_ST_A_M45PE40:
11888         case FLASH_5761VENDOR_ST_A_M45PE80:
11889         case FLASH_5761VENDOR_ST_A_M45PE16:
11890         case FLASH_5761VENDOR_ST_M_M45PE20:
11891         case FLASH_5761VENDOR_ST_M_M45PE40:
11892         case FLASH_5761VENDOR_ST_M_M45PE80:
11893         case FLASH_5761VENDOR_ST_M_M45PE16:
11894                 tp->nvram_jedecnum = JEDEC_ST;
11895                 tg3_flag_set(tp, NVRAM_BUFFERED);
11896                 tg3_flag_set(tp, FLASH);
11897                 tp->nvram_pagesize = 256;
11898                 break;
11899         }
11900
11901         if (protect) {
11902                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11903         } else {
11904                 switch (nvcfg1) {
11905                 case FLASH_5761VENDOR_ATMEL_ADB161D:
11906                 case FLASH_5761VENDOR_ATMEL_MDB161D:
11907                 case FLASH_5761VENDOR_ST_A_M45PE16:
11908                 case FLASH_5761VENDOR_ST_M_M45PE16:
11909                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11910                         break;
11911                 case FLASH_5761VENDOR_ATMEL_ADB081D:
11912                 case FLASH_5761VENDOR_ATMEL_MDB081D:
11913                 case FLASH_5761VENDOR_ST_A_M45PE80:
11914                 case FLASH_5761VENDOR_ST_M_M45PE80:
11915                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11916                         break;
11917                 case FLASH_5761VENDOR_ATMEL_ADB041D:
11918                 case FLASH_5761VENDOR_ATMEL_MDB041D:
11919                 case FLASH_5761VENDOR_ST_A_M45PE40:
11920                 case FLASH_5761VENDOR_ST_M_M45PE40:
11921                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11922                         break;
11923                 case FLASH_5761VENDOR_ATMEL_ADB021D:
11924                 case FLASH_5761VENDOR_ATMEL_MDB021D:
11925                 case FLASH_5761VENDOR_ST_A_M45PE20:
11926                 case FLASH_5761VENDOR_ST_M_M45PE20:
11927                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11928                         break;
11929                 }
11930         }
11931 }
11932
11933 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11934 {
11935         tp->nvram_jedecnum = JEDEC_ATMEL;
11936         tg3_flag_set(tp, NVRAM_BUFFERED);
11937         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11938 }
11939
11940 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11941 {
11942         u32 nvcfg1;
11943
11944         nvcfg1 = tr32(NVRAM_CFG1);
11945
11946         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11947         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11948         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11949                 tp->nvram_jedecnum = JEDEC_ATMEL;
11950                 tg3_flag_set(tp, NVRAM_BUFFERED);
11951                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11952
11953                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11954                 tw32(NVRAM_CFG1, nvcfg1);
11955                 return;
11956         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11957         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11958         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11959         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11960         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11961         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11962         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11963                 tp->nvram_jedecnum = JEDEC_ATMEL;
11964                 tg3_flag_set(tp, NVRAM_BUFFERED);
11965                 tg3_flag_set(tp, FLASH);
11966
11967                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11968                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11969                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11970                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11971                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11972                         break;
11973                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11974                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11975                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11976                         break;
11977                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11978                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11979                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11980                         break;
11981                 }
11982                 break;
11983         case FLASH_5752VENDOR_ST_M45PE10:
11984         case FLASH_5752VENDOR_ST_M45PE20:
11985         case FLASH_5752VENDOR_ST_M45PE40:
11986                 tp->nvram_jedecnum = JEDEC_ST;
11987                 tg3_flag_set(tp, NVRAM_BUFFERED);
11988                 tg3_flag_set(tp, FLASH);
11989
11990                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11991                 case FLASH_5752VENDOR_ST_M45PE10:
11992                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11993                         break;
11994                 case FLASH_5752VENDOR_ST_M45PE20:
11995                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11996                         break;
11997                 case FLASH_5752VENDOR_ST_M45PE40:
11998                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11999                         break;
12000                 }
12001                 break;
12002         default:
12003                 tg3_flag_set(tp, NO_NVRAM);
12004                 return;
12005         }
12006
12007         tg3_nvram_get_pagesize(tp, nvcfg1);
12008         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12009                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12010 }
12011
12012
12013 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12014 {
12015         u32 nvcfg1;
12016
12017         nvcfg1 = tr32(NVRAM_CFG1);
12018
12019         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12020         case FLASH_5717VENDOR_ATMEL_EEPROM:
12021         case FLASH_5717VENDOR_MICRO_EEPROM:
12022                 tp->nvram_jedecnum = JEDEC_ATMEL;
12023                 tg3_flag_set(tp, NVRAM_BUFFERED);
12024                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12025
12026                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12027                 tw32(NVRAM_CFG1, nvcfg1);
12028                 return;
12029         case FLASH_5717VENDOR_ATMEL_MDB011D:
12030         case FLASH_5717VENDOR_ATMEL_ADB011B:
12031         case FLASH_5717VENDOR_ATMEL_ADB011D:
12032         case FLASH_5717VENDOR_ATMEL_MDB021D:
12033         case FLASH_5717VENDOR_ATMEL_ADB021B:
12034         case FLASH_5717VENDOR_ATMEL_ADB021D:
12035         case FLASH_5717VENDOR_ATMEL_45USPT:
12036                 tp->nvram_jedecnum = JEDEC_ATMEL;
12037                 tg3_flag_set(tp, NVRAM_BUFFERED);
12038                 tg3_flag_set(tp, FLASH);
12039
12040                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12041                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12042                         /* Detect size with tg3_nvram_get_size() */
12043                         break;
12044                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12045                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12046                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12047                         break;
12048                 default:
12049                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12050                         break;
12051                 }
12052                 break;
12053         case FLASH_5717VENDOR_ST_M_M25PE10:
12054         case FLASH_5717VENDOR_ST_A_M25PE10:
12055         case FLASH_5717VENDOR_ST_M_M45PE10:
12056         case FLASH_5717VENDOR_ST_A_M45PE10:
12057         case FLASH_5717VENDOR_ST_M_M25PE20:
12058         case FLASH_5717VENDOR_ST_A_M25PE20:
12059         case FLASH_5717VENDOR_ST_M_M45PE20:
12060         case FLASH_5717VENDOR_ST_A_M45PE20:
12061         case FLASH_5717VENDOR_ST_25USPT:
12062         case FLASH_5717VENDOR_ST_45USPT:
12063                 tp->nvram_jedecnum = JEDEC_ST;
12064                 tg3_flag_set(tp, NVRAM_BUFFERED);
12065                 tg3_flag_set(tp, FLASH);
12066
12067                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12068                 case FLASH_5717VENDOR_ST_M_M25PE20:
12069                 case FLASH_5717VENDOR_ST_M_M45PE20:
12070                         /* Detect size with tg3_nvram_get_size() */
12071                         break;
12072                 case FLASH_5717VENDOR_ST_A_M25PE20:
12073                 case FLASH_5717VENDOR_ST_A_M45PE20:
12074                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12075                         break;
12076                 default:
12077                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12078                         break;
12079                 }
12080                 break;
12081         default:
12082                 tg3_flag_set(tp, NO_NVRAM);
12083                 return;
12084         }
12085
12086         tg3_nvram_get_pagesize(tp, nvcfg1);
12087         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12088                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12089 }
12090
12091 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12092 {
12093         u32 nvcfg1, nvmpinstrp;
12094
12095         nvcfg1 = tr32(NVRAM_CFG1);
12096         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12097
12098         switch (nvmpinstrp) {
12099         case FLASH_5720_EEPROM_HD:
12100         case FLASH_5720_EEPROM_LD:
12101                 tp->nvram_jedecnum = JEDEC_ATMEL;
12102                 tg3_flag_set(tp, NVRAM_BUFFERED);
12103
12104                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12105                 tw32(NVRAM_CFG1, nvcfg1);
12106                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12107                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12108                 else
12109                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12110                 return;
12111         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12112         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12113         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12114         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12115         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12116         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12117         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12118         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12119         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12120         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12121         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12122         case FLASH_5720VENDOR_ATMEL_45USPT:
12123                 tp->nvram_jedecnum = JEDEC_ATMEL;
12124                 tg3_flag_set(tp, NVRAM_BUFFERED);
12125                 tg3_flag_set(tp, FLASH);
12126
12127                 switch (nvmpinstrp) {
12128                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12129                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12130                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12131                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12132                         break;
12133                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12134                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12135                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12136                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12137                         break;
12138                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12139                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12140                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12141                         break;
12142                 default:
12143                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12144                         break;
12145                 }
12146                 break;
12147         case FLASH_5720VENDOR_M_ST_M25PE10:
12148         case FLASH_5720VENDOR_M_ST_M45PE10:
12149         case FLASH_5720VENDOR_A_ST_M25PE10:
12150         case FLASH_5720VENDOR_A_ST_M45PE10:
12151         case FLASH_5720VENDOR_M_ST_M25PE20:
12152         case FLASH_5720VENDOR_M_ST_M45PE20:
12153         case FLASH_5720VENDOR_A_ST_M25PE20:
12154         case FLASH_5720VENDOR_A_ST_M45PE20:
12155         case FLASH_5720VENDOR_M_ST_M25PE40:
12156         case FLASH_5720VENDOR_M_ST_M45PE40:
12157         case FLASH_5720VENDOR_A_ST_M25PE40:
12158         case FLASH_5720VENDOR_A_ST_M45PE40:
12159         case FLASH_5720VENDOR_M_ST_M25PE80:
12160         case FLASH_5720VENDOR_M_ST_M45PE80:
12161         case FLASH_5720VENDOR_A_ST_M25PE80:
12162         case FLASH_5720VENDOR_A_ST_M45PE80:
12163         case FLASH_5720VENDOR_ST_25USPT:
12164         case FLASH_5720VENDOR_ST_45USPT:
12165                 tp->nvram_jedecnum = JEDEC_ST;
12166                 tg3_flag_set(tp, NVRAM_BUFFERED);
12167                 tg3_flag_set(tp, FLASH);
12168
12169                 switch (nvmpinstrp) {
12170                 case FLASH_5720VENDOR_M_ST_M25PE20:
12171                 case FLASH_5720VENDOR_M_ST_M45PE20:
12172                 case FLASH_5720VENDOR_A_ST_M25PE20:
12173                 case FLASH_5720VENDOR_A_ST_M45PE20:
12174                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12175                         break;
12176                 case FLASH_5720VENDOR_M_ST_M25PE40:
12177                 case FLASH_5720VENDOR_M_ST_M45PE40:
12178                 case FLASH_5720VENDOR_A_ST_M25PE40:
12179                 case FLASH_5720VENDOR_A_ST_M45PE40:
12180                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12181                         break;
12182                 case FLASH_5720VENDOR_M_ST_M25PE80:
12183                 case FLASH_5720VENDOR_M_ST_M45PE80:
12184                 case FLASH_5720VENDOR_A_ST_M25PE80:
12185                 case FLASH_5720VENDOR_A_ST_M45PE80:
12186                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12187                         break;
12188                 default:
12189                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12190                         break;
12191                 }
12192                 break;
12193         default:
12194                 tg3_flag_set(tp, NO_NVRAM);
12195                 return;
12196         }
12197
12198         tg3_nvram_get_pagesize(tp, nvcfg1);
12199         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12200                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12201 }
12202
12203 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12204 static void __devinit tg3_nvram_init(struct tg3 *tp)
12205 {
12206         tw32_f(GRC_EEPROM_ADDR,
12207              (EEPROM_ADDR_FSM_RESET |
12208               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12209                EEPROM_ADDR_CLKPERD_SHIFT)));
12210
12211         msleep(1);
12212
12213         /* Enable seeprom accesses. */
12214         tw32_f(GRC_LOCAL_CTRL,
12215              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12216         udelay(100);
12217
12218         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12219             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12220                 tg3_flag_set(tp, NVRAM);
12221
12222                 if (tg3_nvram_lock(tp)) {
12223                         netdev_warn(tp->dev,
12224                                     "Cannot get nvram lock, %s failed\n",
12225                                     __func__);
12226                         return;
12227                 }
12228                 tg3_enable_nvram_access(tp);
12229
12230                 tp->nvram_size = 0;
12231
12232                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12233                         tg3_get_5752_nvram_info(tp);
12234                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12235                         tg3_get_5755_nvram_info(tp);
12236                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12237                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12238                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12239                         tg3_get_5787_nvram_info(tp);
12240                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12241                         tg3_get_5761_nvram_info(tp);
12242                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12243                         tg3_get_5906_nvram_info(tp);
12244                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12245                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12246                         tg3_get_57780_nvram_info(tp);
12247                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12248                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12249                         tg3_get_5717_nvram_info(tp);
12250                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12251                         tg3_get_5720_nvram_info(tp);
12252                 else
12253                         tg3_get_nvram_info(tp);
12254
12255                 if (tp->nvram_size == 0)
12256                         tg3_get_nvram_size(tp);
12257
12258                 tg3_disable_nvram_access(tp);
12259                 tg3_nvram_unlock(tp);
12260
12261         } else {
12262                 tg3_flag_clear(tp, NVRAM);
12263                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12264
12265                 tg3_get_eeprom_size(tp);
12266         }
12267 }
12268
12269 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12270                                     u32 offset, u32 len, u8 *buf)
12271 {
12272         int i, j, rc = 0;
12273         u32 val;
12274
12275         for (i = 0; i < len; i += 4) {
12276                 u32 addr;
12277                 __be32 data;
12278
12279                 addr = offset + i;
12280
12281                 memcpy(&data, buf + i, 4);
12282
12283                 /*
12284                  * The SEEPROM interface expects the data to always be opposite
12285                  * the native endian format.  We accomplish this by reversing
12286                  * all the operations that would have been performed on the
12287                  * data from a call to tg3_nvram_read_be32().
12288                  */
12289                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12290
12291                 val = tr32(GRC_EEPROM_ADDR);
12292                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12293
12294                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12295                         EEPROM_ADDR_READ);
12296                 tw32(GRC_EEPROM_ADDR, val |
12297                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12298                         (addr & EEPROM_ADDR_ADDR_MASK) |
12299                         EEPROM_ADDR_START |
12300                         EEPROM_ADDR_WRITE);
12301
12302                 for (j = 0; j < 1000; j++) {
12303                         val = tr32(GRC_EEPROM_ADDR);
12304
12305                         if (val & EEPROM_ADDR_COMPLETE)
12306                                 break;
12307                         msleep(1);
12308                 }
12309                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12310                         rc = -EBUSY;
12311                         break;
12312                 }
12313         }
12314
12315         return rc;
12316 }
12317
12318 /* offset and length are dword aligned */
12319 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12320                 u8 *buf)
12321 {
12322         int ret = 0;
12323         u32 pagesize = tp->nvram_pagesize;
12324         u32 pagemask = pagesize - 1;
12325         u32 nvram_cmd;
12326         u8 *tmp;
12327
12328         tmp = kmalloc(pagesize, GFP_KERNEL);
12329         if (tmp == NULL)
12330                 return -ENOMEM;
12331
12332         while (len) {
12333                 int j;
12334                 u32 phy_addr, page_off, size;
12335
12336                 phy_addr = offset & ~pagemask;
12337
12338                 for (j = 0; j < pagesize; j += 4) {
12339                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12340                                                   (__be32 *) (tmp + j));
12341                         if (ret)
12342                                 break;
12343                 }
12344                 if (ret)
12345                         break;
12346
12347                 page_off = offset & pagemask;
12348                 size = pagesize;
12349                 if (len < size)
12350                         size = len;
12351
12352                 len -= size;
12353
12354                 memcpy(tmp + page_off, buf, size);
12355
12356                 offset = offset + (pagesize - page_off);
12357
12358                 tg3_enable_nvram_access(tp);
12359
12360                 /*
12361                  * Before we can erase the flash page, we need
12362                  * to issue a special "write enable" command.
12363                  */
12364                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12365
12366                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12367                         break;
12368
12369                 /* Erase the target page */
12370                 tw32(NVRAM_ADDR, phy_addr);
12371
12372                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12373                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12374
12375                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12376                         break;
12377
12378                 /* Issue another write enable to start the write. */
12379                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12380
12381                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12382                         break;
12383
12384                 for (j = 0; j < pagesize; j += 4) {
12385                         __be32 data;
12386
12387                         data = *((__be32 *) (tmp + j));
12388
12389                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12390
12391                         tw32(NVRAM_ADDR, phy_addr + j);
12392
12393                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12394                                 NVRAM_CMD_WR;
12395
12396                         if (j == 0)
12397                                 nvram_cmd |= NVRAM_CMD_FIRST;
12398                         else if (j == (pagesize - 4))
12399                                 nvram_cmd |= NVRAM_CMD_LAST;
12400
12401                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12402                                 break;
12403                 }
12404                 if (ret)
12405                         break;
12406         }
12407
12408         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12409         tg3_nvram_exec_cmd(tp, nvram_cmd);
12410
12411         kfree(tmp);
12412
12413         return ret;
12414 }
12415
12416 /* offset and length are dword aligned */
12417 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12418                 u8 *buf)
12419 {
12420         int i, ret = 0;
12421
12422         for (i = 0; i < len; i += 4, offset += 4) {
12423                 u32 page_off, phy_addr, nvram_cmd;
12424                 __be32 data;
12425
12426                 memcpy(&data, buf + i, 4);
12427                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12428
12429                 page_off = offset % tp->nvram_pagesize;
12430
12431                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12432
12433                 tw32(NVRAM_ADDR, phy_addr);
12434
12435                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12436
12437                 if (page_off == 0 || i == 0)
12438                         nvram_cmd |= NVRAM_CMD_FIRST;
12439                 if (page_off == (tp->nvram_pagesize - 4))
12440                         nvram_cmd |= NVRAM_CMD_LAST;
12441
12442                 if (i == (len - 4))
12443                         nvram_cmd |= NVRAM_CMD_LAST;
12444
12445                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12446                     !tg3_flag(tp, 5755_PLUS) &&
12447                     (tp->nvram_jedecnum == JEDEC_ST) &&
12448                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12449
12450                         if ((ret = tg3_nvram_exec_cmd(tp,
12451                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12452                                 NVRAM_CMD_DONE)))
12453
12454                                 break;
12455                 }
12456                 if (!tg3_flag(tp, FLASH)) {
12457                         /* We always do complete word writes to eeprom. */
12458                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12459                 }
12460
12461                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12462                         break;
12463         }
12464         return ret;
12465 }
12466
12467 /* offset and length are dword aligned */
12468 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12469 {
12470         int ret;
12471
12472         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12473                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12474                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12475                 udelay(40);
12476         }
12477
12478         if (!tg3_flag(tp, NVRAM)) {
12479                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12480         } else {
12481                 u32 grc_mode;
12482
12483                 ret = tg3_nvram_lock(tp);
12484                 if (ret)
12485                         return ret;
12486
12487                 tg3_enable_nvram_access(tp);
12488                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12489                         tw32(NVRAM_WRITE1, 0x406);
12490
12491                 grc_mode = tr32(GRC_MODE);
12492                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12493
12494                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12495                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12496                                 buf);
12497                 } else {
12498                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12499                                 buf);
12500                 }
12501
12502                 grc_mode = tr32(GRC_MODE);
12503                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12504
12505                 tg3_disable_nvram_access(tp);
12506                 tg3_nvram_unlock(tp);
12507         }
12508
12509         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12510                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12511                 udelay(40);
12512         }
12513
12514         return ret;
12515 }
12516
12517 struct subsys_tbl_ent {
12518         u16 subsys_vendor, subsys_devid;
12519         u32 phy_id;
12520 };
12521
12522 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12523         /* Broadcom boards. */
12524         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12525           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12526         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12527           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12528         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12529           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12530         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12531           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12532         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12533           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12534         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12535           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12536         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12537           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12538         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12539           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12540         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12541           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12542         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12543           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12544         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12545           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12546
12547         /* 3com boards. */
12548         { TG3PCI_SUBVENDOR_ID_3COM,
12549           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12550         { TG3PCI_SUBVENDOR_ID_3COM,
12551           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12552         { TG3PCI_SUBVENDOR_ID_3COM,
12553           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12554         { TG3PCI_SUBVENDOR_ID_3COM,
12555           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12556         { TG3PCI_SUBVENDOR_ID_3COM,
12557           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12558
12559         /* DELL boards. */
12560         { TG3PCI_SUBVENDOR_ID_DELL,
12561           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12562         { TG3PCI_SUBVENDOR_ID_DELL,
12563           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12564         { TG3PCI_SUBVENDOR_ID_DELL,
12565           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12566         { TG3PCI_SUBVENDOR_ID_DELL,
12567           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12568
12569         /* Compaq boards. */
12570         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12571           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12572         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12573           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12574         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12575           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12576         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12577           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12578         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12579           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12580
12581         /* IBM boards. */
12582         { TG3PCI_SUBVENDOR_ID_IBM,
12583           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12584 };
12585
12586 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12587 {
12588         int i;
12589
12590         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12591                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12592                      tp->pdev->subsystem_vendor) &&
12593                     (subsys_id_to_phy_id[i].subsys_devid ==
12594                      tp->pdev->subsystem_device))
12595                         return &subsys_id_to_phy_id[i];
12596         }
12597         return NULL;
12598 }
12599
12600 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12601 {
12602         u32 val;
12603         u16 pmcsr;
12604
12605         /* On some early chips the SRAM cannot be accessed in D3hot state,
12606          * so need make sure we're in D0.
12607          */
12608         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12609         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12610         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12611         msleep(1);
12612
12613         /* Make sure register accesses (indirect or otherwise)
12614          * will function correctly.
12615          */
12616         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12617                                tp->misc_host_ctrl);
12618
12619         /* The memory arbiter has to be enabled in order for SRAM accesses
12620          * to succeed.  Normally on powerup the tg3 chip firmware will make
12621          * sure it is enabled, but other entities such as system netboot
12622          * code might disable it.
12623          */
12624         val = tr32(MEMARB_MODE);
12625         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12626
12627         tp->phy_id = TG3_PHY_ID_INVALID;
12628         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12629
12630         /* Assume an onboard device and WOL capable by default.  */
12631         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12632         tg3_flag_set(tp, WOL_CAP);
12633
12634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12635                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12636                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12637                         tg3_flag_set(tp, IS_NIC);
12638                 }
12639                 val = tr32(VCPU_CFGSHDW);
12640                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12641                         tg3_flag_set(tp, ASPM_WORKAROUND);
12642                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12643                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12644                         tg3_flag_set(tp, WOL_ENABLE);
12645                         device_set_wakeup_enable(&tp->pdev->dev, true);
12646                 }
12647                 goto done;
12648         }
12649
12650         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12651         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12652                 u32 nic_cfg, led_cfg;
12653                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12654                 int eeprom_phy_serdes = 0;
12655
12656                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12657                 tp->nic_sram_data_cfg = nic_cfg;
12658
12659                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12660                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12662                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12663                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12664                     (ver > 0) && (ver < 0x100))
12665                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12666
12667                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12668                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12669
12670                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12671                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12672                         eeprom_phy_serdes = 1;
12673
12674                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12675                 if (nic_phy_id != 0) {
12676                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12677                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12678
12679                         eeprom_phy_id  = (id1 >> 16) << 10;
12680                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12681                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12682                 } else
12683                         eeprom_phy_id = 0;
12684
12685                 tp->phy_id = eeprom_phy_id;
12686                 if (eeprom_phy_serdes) {
12687                         if (!tg3_flag(tp, 5705_PLUS))
12688                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12689                         else
12690                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12691                 }
12692
12693                 if (tg3_flag(tp, 5750_PLUS))
12694                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12695                                     SHASTA_EXT_LED_MODE_MASK);
12696                 else
12697                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12698
12699                 switch (led_cfg) {
12700                 default:
12701                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12702                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12703                         break;
12704
12705                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12706                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12707                         break;
12708
12709                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12710                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12711
12712                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12713                          * read on some older 5700/5701 bootcode.
12714                          */
12715                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12716                             ASIC_REV_5700 ||
12717                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12718                             ASIC_REV_5701)
12719                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12720
12721                         break;
12722
12723                 case SHASTA_EXT_LED_SHARED:
12724                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12725                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12726                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12727                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12728                                                  LED_CTRL_MODE_PHY_2);
12729                         break;
12730
12731                 case SHASTA_EXT_LED_MAC:
12732                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12733                         break;
12734
12735                 case SHASTA_EXT_LED_COMBO:
12736                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12737                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12738                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12739                                                  LED_CTRL_MODE_PHY_2);
12740                         break;
12741
12742                 }
12743
12744                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12745                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12746                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12747                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12748
12749                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12750                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12751
12752                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12753                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12754                         if ((tp->pdev->subsystem_vendor ==
12755                              PCI_VENDOR_ID_ARIMA) &&
12756                             (tp->pdev->subsystem_device == 0x205a ||
12757                              tp->pdev->subsystem_device == 0x2063))
12758                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12759                 } else {
12760                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12761                         tg3_flag_set(tp, IS_NIC);
12762                 }
12763
12764                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12765                         tg3_flag_set(tp, ENABLE_ASF);
12766                         if (tg3_flag(tp, 5750_PLUS))
12767                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12768                 }
12769
12770                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12771                     tg3_flag(tp, 5750_PLUS))
12772                         tg3_flag_set(tp, ENABLE_APE);
12773
12774                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12775                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12776                         tg3_flag_clear(tp, WOL_CAP);
12777
12778                 if (tg3_flag(tp, WOL_CAP) &&
12779                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12780                         tg3_flag_set(tp, WOL_ENABLE);
12781                         device_set_wakeup_enable(&tp->pdev->dev, true);
12782                 }
12783
12784                 if (cfg2 & (1 << 17))
12785                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12786
12787                 /* serdes signal pre-emphasis in register 0x590 set by */
12788                 /* bootcode if bit 18 is set */
12789                 if (cfg2 & (1 << 18))
12790                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12791
12792                 if ((tg3_flag(tp, 57765_PLUS) ||
12793                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12794                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12795                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12796                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12797
12798                 if (tg3_flag(tp, PCI_EXPRESS) &&
12799                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12800                     !tg3_flag(tp, 57765_PLUS)) {
12801                         u32 cfg3;
12802
12803                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12804                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12805                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12806                 }
12807
12808                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12809                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12810                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12811                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12812                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12813                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12814         }
12815 done:
12816         if (tg3_flag(tp, WOL_CAP))
12817                 device_set_wakeup_enable(&tp->pdev->dev,
12818                                          tg3_flag(tp, WOL_ENABLE));
12819         else
12820                 device_set_wakeup_capable(&tp->pdev->dev, false);
12821 }
12822
12823 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12824 {
12825         int i;
12826         u32 val;
12827
12828         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12829         tw32(OTP_CTRL, cmd);
12830
12831         /* Wait for up to 1 ms for command to execute. */
12832         for (i = 0; i < 100; i++) {
12833                 val = tr32(OTP_STATUS);
12834                 if (val & OTP_STATUS_CMD_DONE)
12835                         break;
12836                 udelay(10);
12837         }
12838
12839         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12840 }
12841
12842 /* Read the gphy configuration from the OTP region of the chip.  The gphy
12843  * configuration is a 32-bit value that straddles the alignment boundary.
12844  * We do two 32-bit reads and then shift and merge the results.
12845  */
12846 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12847 {
12848         u32 bhalf_otp, thalf_otp;
12849
12850         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12851
12852         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12853                 return 0;
12854
12855         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12856
12857         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12858                 return 0;
12859
12860         thalf_otp = tr32(OTP_READ_DATA);
12861
12862         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12863
12864         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12865                 return 0;
12866
12867         bhalf_otp = tr32(OTP_READ_DATA);
12868
12869         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12870 }
12871
12872 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12873 {
12874         u32 adv = ADVERTISED_Autoneg |
12875                   ADVERTISED_Pause;
12876
12877         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12878                 adv |= ADVERTISED_1000baseT_Half |
12879                        ADVERTISED_1000baseT_Full;
12880
12881         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12882                 adv |= ADVERTISED_100baseT_Half |
12883                        ADVERTISED_100baseT_Full |
12884                        ADVERTISED_10baseT_Half |
12885                        ADVERTISED_10baseT_Full |
12886                        ADVERTISED_TP;
12887         else
12888                 adv |= ADVERTISED_FIBRE;
12889
12890         tp->link_config.advertising = adv;
12891         tp->link_config.speed = SPEED_INVALID;
12892         tp->link_config.duplex = DUPLEX_INVALID;
12893         tp->link_config.autoneg = AUTONEG_ENABLE;
12894         tp->link_config.active_speed = SPEED_INVALID;
12895         tp->link_config.active_duplex = DUPLEX_INVALID;
12896         tp->link_config.orig_speed = SPEED_INVALID;
12897         tp->link_config.orig_duplex = DUPLEX_INVALID;
12898         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12899 }
12900
12901 static int __devinit tg3_phy_probe(struct tg3 *tp)
12902 {
12903         u32 hw_phy_id_1, hw_phy_id_2;
12904         u32 hw_phy_id, hw_phy_id_masked;
12905         int err;
12906
12907         /* flow control autonegotiation is default behavior */
12908         tg3_flag_set(tp, PAUSE_AUTONEG);
12909         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12910
12911         if (tg3_flag(tp, USE_PHYLIB))
12912                 return tg3_phy_init(tp);
12913
12914         /* Reading the PHY ID register can conflict with ASF
12915          * firmware access to the PHY hardware.
12916          */
12917         err = 0;
12918         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12919                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12920         } else {
12921                 /* Now read the physical PHY_ID from the chip and verify
12922                  * that it is sane.  If it doesn't look good, we fall back
12923                  * to either the hard-coded table based PHY_ID and failing
12924                  * that the value found in the eeprom area.
12925                  */
12926                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12927                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12928
12929                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
12930                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12931                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
12932
12933                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12934         }
12935
12936         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12937                 tp->phy_id = hw_phy_id;
12938                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12939                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12940                 else
12941                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12942         } else {
12943                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12944                         /* Do nothing, phy ID already set up in
12945                          * tg3_get_eeprom_hw_cfg().
12946                          */
12947                 } else {
12948                         struct subsys_tbl_ent *p;
12949
12950                         /* No eeprom signature?  Try the hardcoded
12951                          * subsys device table.
12952                          */
12953                         p = tg3_lookup_by_subsys(tp);
12954                         if (!p)
12955                                 return -ENODEV;
12956
12957                         tp->phy_id = p->phy_id;
12958                         if (!tp->phy_id ||
12959                             tp->phy_id == TG3_PHY_ID_BCM8002)
12960                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12961                 }
12962         }
12963
12964         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12965             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
12966              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
12967              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12968               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12969              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12970               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12971                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12972
12973         tg3_phy_init_link_config(tp);
12974
12975         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12976             !tg3_flag(tp, ENABLE_APE) &&
12977             !tg3_flag(tp, ENABLE_ASF)) {
12978                 u32 bmsr, mask;
12979
12980                 tg3_readphy(tp, MII_BMSR, &bmsr);
12981                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12982                     (bmsr & BMSR_LSTATUS))
12983                         goto skip_phy_reset;
12984
12985                 err = tg3_phy_reset(tp);
12986                 if (err)
12987                         return err;
12988
12989                 tg3_phy_set_wirespeed(tp);
12990
12991                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12992                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12993                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12994                 if (!tg3_copper_is_advertising_all(tp, mask)) {
12995                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
12996                                             tp->link_config.flowctrl);
12997
12998                         tg3_writephy(tp, MII_BMCR,
12999                                      BMCR_ANENABLE | BMCR_ANRESTART);
13000                 }
13001         }
13002
13003 skip_phy_reset:
13004         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13005                 err = tg3_init_5401phy_dsp(tp);
13006                 if (err)
13007                         return err;
13008
13009                 err = tg3_init_5401phy_dsp(tp);
13010         }
13011
13012         return err;
13013 }
13014
13015 static void __devinit tg3_read_vpd(struct tg3 *tp)
13016 {
13017         u8 *vpd_data;
13018         unsigned int block_end, rosize, len;
13019         int j, i = 0;
13020
13021         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13022         if (!vpd_data)
13023                 goto out_no_vpd;
13024
13025         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13026                              PCI_VPD_LRDT_RO_DATA);
13027         if (i < 0)
13028                 goto out_not_found;
13029
13030         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13031         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13032         i += PCI_VPD_LRDT_TAG_SIZE;
13033
13034         if (block_end > TG3_NVM_VPD_LEN)
13035                 goto out_not_found;
13036
13037         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13038                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13039         if (j > 0) {
13040                 len = pci_vpd_info_field_size(&vpd_data[j]);
13041
13042                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13043                 if (j + len > block_end || len != 4 ||
13044                     memcmp(&vpd_data[j], "1028", 4))
13045                         goto partno;
13046
13047                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13048                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13049                 if (j < 0)
13050                         goto partno;
13051
13052                 len = pci_vpd_info_field_size(&vpd_data[j]);
13053
13054                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13055                 if (j + len > block_end)
13056                         goto partno;
13057
13058                 memcpy(tp->fw_ver, &vpd_data[j], len);
13059                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13060         }
13061
13062 partno:
13063         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13064                                       PCI_VPD_RO_KEYWORD_PARTNO);
13065         if (i < 0)
13066                 goto out_not_found;
13067
13068         len = pci_vpd_info_field_size(&vpd_data[i]);
13069
13070         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13071         if (len > TG3_BPN_SIZE ||
13072             (len + i) > TG3_NVM_VPD_LEN)
13073                 goto out_not_found;
13074
13075         memcpy(tp->board_part_number, &vpd_data[i], len);
13076
13077 out_not_found:
13078         kfree(vpd_data);
13079         if (tp->board_part_number[0])
13080                 return;
13081
13082 out_no_vpd:
13083         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13084                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13085                         strcpy(tp->board_part_number, "BCM5717");
13086                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13087                         strcpy(tp->board_part_number, "BCM5718");
13088                 else
13089                         goto nomatch;
13090         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13091                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13092                         strcpy(tp->board_part_number, "BCM57780");
13093                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13094                         strcpy(tp->board_part_number, "BCM57760");
13095                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13096                         strcpy(tp->board_part_number, "BCM57790");
13097                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13098                         strcpy(tp->board_part_number, "BCM57788");
13099                 else
13100                         goto nomatch;
13101         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13102                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13103                         strcpy(tp->board_part_number, "BCM57761");
13104                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13105                         strcpy(tp->board_part_number, "BCM57765");
13106                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13107                         strcpy(tp->board_part_number, "BCM57781");
13108                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13109                         strcpy(tp->board_part_number, "BCM57785");
13110                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13111                         strcpy(tp->board_part_number, "BCM57791");
13112                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13113                         strcpy(tp->board_part_number, "BCM57795");
13114                 else
13115                         goto nomatch;
13116         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13117                 strcpy(tp->board_part_number, "BCM95906");
13118         } else {
13119 nomatch:
13120                 strcpy(tp->board_part_number, "none");
13121         }
13122 }
13123
13124 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13125 {
13126         u32 val;
13127
13128         if (tg3_nvram_read(tp, offset, &val) ||
13129             (val & 0xfc000000) != 0x0c000000 ||
13130             tg3_nvram_read(tp, offset + 4, &val) ||
13131             val != 0)
13132                 return 0;
13133
13134         return 1;
13135 }
13136
13137 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13138 {
13139         u32 val, offset, start, ver_offset;
13140         int i, dst_off;
13141         bool newver = false;
13142
13143         if (tg3_nvram_read(tp, 0xc, &offset) ||
13144             tg3_nvram_read(tp, 0x4, &start))
13145                 return;
13146
13147         offset = tg3_nvram_logical_addr(tp, offset);
13148
13149         if (tg3_nvram_read(tp, offset, &val))
13150                 return;
13151
13152         if ((val & 0xfc000000) == 0x0c000000) {
13153                 if (tg3_nvram_read(tp, offset + 4, &val))
13154                         return;
13155
13156                 if (val == 0)
13157                         newver = true;
13158         }
13159
13160         dst_off = strlen(tp->fw_ver);
13161
13162         if (newver) {
13163                 if (TG3_VER_SIZE - dst_off < 16 ||
13164                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13165                         return;
13166
13167                 offset = offset + ver_offset - start;
13168                 for (i = 0; i < 16; i += 4) {
13169                         __be32 v;
13170                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13171                                 return;
13172
13173                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13174                 }
13175         } else {
13176                 u32 major, minor;
13177
13178                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13179                         return;
13180
13181                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13182                         TG3_NVM_BCVER_MAJSFT;
13183                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13184                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13185                          "v%d.%02d", major, minor);
13186         }
13187 }
13188
13189 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13190 {
13191         u32 val, major, minor;
13192
13193         /* Use native endian representation */
13194         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13195                 return;
13196
13197         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13198                 TG3_NVM_HWSB_CFG1_MAJSFT;
13199         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13200                 TG3_NVM_HWSB_CFG1_MINSFT;
13201
13202         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13203 }
13204
13205 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13206 {
13207         u32 offset, major, minor, build;
13208
13209         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13210
13211         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13212                 return;
13213
13214         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13215         case TG3_EEPROM_SB_REVISION_0:
13216                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13217                 break;
13218         case TG3_EEPROM_SB_REVISION_2:
13219                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13220                 break;
13221         case TG3_EEPROM_SB_REVISION_3:
13222                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13223                 break;
13224         case TG3_EEPROM_SB_REVISION_4:
13225                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13226                 break;
13227         case TG3_EEPROM_SB_REVISION_5:
13228                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13229                 break;
13230         case TG3_EEPROM_SB_REVISION_6:
13231                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13232                 break;
13233         default:
13234                 return;
13235         }
13236
13237         if (tg3_nvram_read(tp, offset, &val))
13238                 return;
13239
13240         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13241                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13242         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13243                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13244         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13245
13246         if (minor > 99 || build > 26)
13247                 return;
13248
13249         offset = strlen(tp->fw_ver);
13250         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13251                  " v%d.%02d", major, minor);
13252
13253         if (build > 0) {
13254                 offset = strlen(tp->fw_ver);
13255                 if (offset < TG3_VER_SIZE - 1)
13256                         tp->fw_ver[offset] = 'a' + build - 1;
13257         }
13258 }
13259
13260 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13261 {
13262         u32 val, offset, start;
13263         int i, vlen;
13264
13265         for (offset = TG3_NVM_DIR_START;
13266              offset < TG3_NVM_DIR_END;
13267              offset += TG3_NVM_DIRENT_SIZE) {
13268                 if (tg3_nvram_read(tp, offset, &val))
13269                         return;
13270
13271                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13272                         break;
13273         }
13274
13275         if (offset == TG3_NVM_DIR_END)
13276                 return;
13277
13278         if (!tg3_flag(tp, 5705_PLUS))
13279                 start = 0x08000000;
13280         else if (tg3_nvram_read(tp, offset - 4, &start))
13281                 return;
13282
13283         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13284             !tg3_fw_img_is_valid(tp, offset) ||
13285             tg3_nvram_read(tp, offset + 8, &val))
13286                 return;
13287
13288         offset += val - start;
13289
13290         vlen = strlen(tp->fw_ver);
13291
13292         tp->fw_ver[vlen++] = ',';
13293         tp->fw_ver[vlen++] = ' ';
13294
13295         for (i = 0; i < 4; i++) {
13296                 __be32 v;
13297                 if (tg3_nvram_read_be32(tp, offset, &v))
13298                         return;
13299
13300                 offset += sizeof(v);
13301
13302                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13303                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13304                         break;
13305                 }
13306
13307                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13308                 vlen += sizeof(v);
13309         }
13310 }
13311
13312 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13313 {
13314         int vlen;
13315         u32 apedata;
13316         char *fwtype;
13317
13318         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13319                 return;
13320
13321         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13322         if (apedata != APE_SEG_SIG_MAGIC)
13323                 return;
13324
13325         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13326         if (!(apedata & APE_FW_STATUS_READY))
13327                 return;
13328
13329         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13330
13331         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13332                 tg3_flag_set(tp, APE_HAS_NCSI);
13333                 fwtype = "NCSI";
13334         } else {
13335                 fwtype = "DASH";
13336         }
13337
13338         vlen = strlen(tp->fw_ver);
13339
13340         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13341                  fwtype,
13342                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13343                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13344                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13345                  (apedata & APE_FW_VERSION_BLDMSK));
13346 }
13347
13348 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13349 {
13350         u32 val;
13351         bool vpd_vers = false;
13352
13353         if (tp->fw_ver[0] != 0)
13354                 vpd_vers = true;
13355
13356         if (tg3_flag(tp, NO_NVRAM)) {
13357                 strcat(tp->fw_ver, "sb");
13358                 return;
13359         }
13360
13361         if (tg3_nvram_read(tp, 0, &val))
13362                 return;
13363
13364         if (val == TG3_EEPROM_MAGIC)
13365                 tg3_read_bc_ver(tp);
13366         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13367                 tg3_read_sb_ver(tp, val);
13368         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13369                 tg3_read_hwsb_ver(tp);
13370         else
13371                 return;
13372
13373         if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13374                 goto done;
13375
13376         tg3_read_mgmtfw_ver(tp);
13377
13378 done:
13379         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13380 }
13381
13382 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13383
13384 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13385 {
13386         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13387                 return TG3_RX_RET_MAX_SIZE_5717;
13388         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13389                 return TG3_RX_RET_MAX_SIZE_5700;
13390         else
13391                 return TG3_RX_RET_MAX_SIZE_5705;
13392 }
13393
13394 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13395         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13396         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13397         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13398         { },
13399 };
13400
13401 static int __devinit tg3_get_invariants(struct tg3 *tp)
13402 {
13403         u32 misc_ctrl_reg;
13404         u32 pci_state_reg, grc_misc_cfg;
13405         u32 val;
13406         u16 pci_cmd;
13407         int err;
13408
13409         /* Force memory write invalidate off.  If we leave it on,
13410          * then on 5700_BX chips we have to enable a workaround.
13411          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13412          * to match the cacheline size.  The Broadcom driver have this
13413          * workaround but turns MWI off all the times so never uses
13414          * it.  This seems to suggest that the workaround is insufficient.
13415          */
13416         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13417         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13418         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13419
13420         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13421          * has the register indirect write enable bit set before
13422          * we try to access any of the MMIO registers.  It is also
13423          * critical that the PCI-X hw workaround situation is decided
13424          * before that as well.
13425          */
13426         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13427                               &misc_ctrl_reg);
13428
13429         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13430                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13431         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13432                 u32 prod_id_asic_rev;
13433
13434                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13435                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13436                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13437                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13438                         pci_read_config_dword(tp->pdev,
13439                                               TG3PCI_GEN2_PRODID_ASICREV,
13440                                               &prod_id_asic_rev);
13441                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13442                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13443                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13444                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13445                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13446                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13447                         pci_read_config_dword(tp->pdev,
13448                                               TG3PCI_GEN15_PRODID_ASICREV,
13449                                               &prod_id_asic_rev);
13450                 else
13451                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13452                                               &prod_id_asic_rev);
13453
13454                 tp->pci_chip_rev_id = prod_id_asic_rev;
13455         }
13456
13457         /* Wrong chip ID in 5752 A0. This code can be removed later
13458          * as A0 is not in production.
13459          */
13460         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13461                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13462
13463         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13464          * we need to disable memory and use config. cycles
13465          * only to access all registers. The 5702/03 chips
13466          * can mistakenly decode the special cycles from the
13467          * ICH chipsets as memory write cycles, causing corruption
13468          * of register and memory space. Only certain ICH bridges
13469          * will drive special cycles with non-zero data during the
13470          * address phase which can fall within the 5703's address
13471          * range. This is not an ICH bug as the PCI spec allows
13472          * non-zero address during special cycles. However, only
13473          * these ICH bridges are known to drive non-zero addresses
13474          * during special cycles.
13475          *
13476          * Since special cycles do not cross PCI bridges, we only
13477          * enable this workaround if the 5703 is on the secondary
13478          * bus of these ICH bridges.
13479          */
13480         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13481             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13482                 static struct tg3_dev_id {
13483                         u32     vendor;
13484                         u32     device;
13485                         u32     rev;
13486                 } ich_chipsets[] = {
13487                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13488                           PCI_ANY_ID },
13489                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13490                           PCI_ANY_ID },
13491                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13492                           0xa },
13493                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13494                           PCI_ANY_ID },
13495                         { },
13496                 };
13497                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13498                 struct pci_dev *bridge = NULL;
13499
13500                 while (pci_id->vendor != 0) {
13501                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13502                                                 bridge);
13503                         if (!bridge) {
13504                                 pci_id++;
13505                                 continue;
13506                         }
13507                         if (pci_id->rev != PCI_ANY_ID) {
13508                                 if (bridge->revision > pci_id->rev)
13509                                         continue;
13510                         }
13511                         if (bridge->subordinate &&
13512                             (bridge->subordinate->number ==
13513                              tp->pdev->bus->number)) {
13514                                 tg3_flag_set(tp, ICH_WORKAROUND);
13515                                 pci_dev_put(bridge);
13516                                 break;
13517                         }
13518                 }
13519         }
13520
13521         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13522                 static struct tg3_dev_id {
13523                         u32     vendor;
13524                         u32     device;
13525                 } bridge_chipsets[] = {
13526                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13527                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13528                         { },
13529                 };
13530                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13531                 struct pci_dev *bridge = NULL;
13532
13533                 while (pci_id->vendor != 0) {
13534                         bridge = pci_get_device(pci_id->vendor,
13535                                                 pci_id->device,
13536                                                 bridge);
13537                         if (!bridge) {
13538                                 pci_id++;
13539                                 continue;
13540                         }
13541                         if (bridge->subordinate &&
13542                             (bridge->subordinate->number <=
13543                              tp->pdev->bus->number) &&
13544                             (bridge->subordinate->subordinate >=
13545                              tp->pdev->bus->number)) {
13546                                 tg3_flag_set(tp, 5701_DMA_BUG);
13547                                 pci_dev_put(bridge);
13548                                 break;
13549                         }
13550                 }
13551         }
13552
13553         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13554          * DMA addresses > 40-bit. This bridge may have other additional
13555          * 57xx devices behind it in some 4-port NIC designs for example.
13556          * Any tg3 device found behind the bridge will also need the 40-bit
13557          * DMA workaround.
13558          */
13559         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13560             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13561                 tg3_flag_set(tp, 5780_CLASS);
13562                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13563                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13564         } else {
13565                 struct pci_dev *bridge = NULL;
13566
13567                 do {
13568                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13569                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13570                                                 bridge);
13571                         if (bridge && bridge->subordinate &&
13572                             (bridge->subordinate->number <=
13573                              tp->pdev->bus->number) &&
13574                             (bridge->subordinate->subordinate >=
13575                              tp->pdev->bus->number)) {
13576                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13577                                 pci_dev_put(bridge);
13578                                 break;
13579                         }
13580                 } while (bridge);
13581         }
13582
13583         /* Initialize misc host control in PCI block. */
13584         tp->misc_host_ctrl |= (misc_ctrl_reg &
13585                                MISC_HOST_CTRL_CHIPREV);
13586         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13587                                tp->misc_host_ctrl);
13588
13589         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13590             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13591             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13592             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13593                 tp->pdev_peer = tg3_find_peer(tp);
13594
13595         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13596             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13597             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13598                 tg3_flag_set(tp, 5717_PLUS);
13599
13600         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13601             tg3_flag(tp, 5717_PLUS))
13602                 tg3_flag_set(tp, 57765_PLUS);
13603
13604         /* Intentionally exclude ASIC_REV_5906 */
13605         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13606             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13607             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13608             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13609             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13610             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13611             tg3_flag(tp, 57765_PLUS))
13612                 tg3_flag_set(tp, 5755_PLUS);
13613
13614         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13615             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13616             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13617             tg3_flag(tp, 5755_PLUS) ||
13618             tg3_flag(tp, 5780_CLASS))
13619                 tg3_flag_set(tp, 5750_PLUS);
13620
13621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13622             tg3_flag(tp, 5750_PLUS))
13623                 tg3_flag_set(tp, 5705_PLUS);
13624
13625         /* Determine TSO capabilities */
13626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13627                 ; /* Do nothing. HW bug. */
13628         else if (tg3_flag(tp, 57765_PLUS))
13629                 tg3_flag_set(tp, HW_TSO_3);
13630         else if (tg3_flag(tp, 5755_PLUS) ||
13631                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13632                 tg3_flag_set(tp, HW_TSO_2);
13633         else if (tg3_flag(tp, 5750_PLUS)) {
13634                 tg3_flag_set(tp, HW_TSO_1);
13635                 tg3_flag_set(tp, TSO_BUG);
13636                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13637                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13638                         tg3_flag_clear(tp, TSO_BUG);
13639         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13640                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13641                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13642                         tg3_flag_set(tp, TSO_BUG);
13643                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13644                         tp->fw_needed = FIRMWARE_TG3TSO5;
13645                 else
13646                         tp->fw_needed = FIRMWARE_TG3TSO;
13647         }
13648
13649         /* Selectively allow TSO based on operating conditions */
13650         if (tg3_flag(tp, HW_TSO_1) ||
13651             tg3_flag(tp, HW_TSO_2) ||
13652             tg3_flag(tp, HW_TSO_3) ||
13653             tp->fw_needed) {
13654                 /* For firmware TSO, assume ASF is disabled.
13655                  * We'll disable TSO later if we discover ASF
13656                  * is enabled in tg3_get_eeprom_hw_cfg().
13657                  */
13658                 tg3_flag_set(tp, TSO_CAPABLE);
13659         } else {
13660                 tg3_flag_clear(tp, TSO_CAPABLE);
13661                 tg3_flag_clear(tp, TSO_BUG);
13662                 tp->fw_needed = NULL;
13663         }
13664
13665         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13666                 tp->fw_needed = FIRMWARE_TG3;
13667
13668         tp->irq_max = 1;
13669
13670         if (tg3_flag(tp, 5750_PLUS)) {
13671                 tg3_flag_set(tp, SUPPORT_MSI);
13672                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13673                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13674                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13675                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13676                      tp->pdev_peer == tp->pdev))
13677                         tg3_flag_clear(tp, SUPPORT_MSI);
13678
13679                 if (tg3_flag(tp, 5755_PLUS) ||
13680                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13681                         tg3_flag_set(tp, 1SHOT_MSI);
13682                 }
13683
13684                 if (tg3_flag(tp, 57765_PLUS)) {
13685                         tg3_flag_set(tp, SUPPORT_MSIX);
13686                         tp->irq_max = TG3_IRQ_MAX_VECS;
13687                 }
13688         }
13689
13690         /* All chips can get confused if TX buffers
13691          * straddle the 4GB address boundary.
13692          */
13693         tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13694
13695         if (tg3_flag(tp, 5755_PLUS) ||
13696                 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13697                         tg3_flag_set(tp, SHORT_DMA_BUG);
13698         else
13699                 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13700
13701         if (tg3_flag(tp, 5717_PLUS))
13702                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13703
13704         if (tg3_flag(tp, 57765_PLUS) &&
13705             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13706                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13707
13708         if (!tg3_flag(tp, 5705_PLUS) ||
13709             tg3_flag(tp, 5780_CLASS) ||
13710             tg3_flag(tp, USE_JUMBO_BDFLAG))
13711                 tg3_flag_set(tp, JUMBO_CAPABLE);
13712
13713         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13714                               &pci_state_reg);
13715
13716         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13717         if (tp->pcie_cap != 0) {
13718                 u16 lnkctl;
13719
13720                 tg3_flag_set(tp, PCI_EXPRESS);
13721
13722                 tp->pcie_readrq = 4096;
13723                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13724                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13725                         tp->pcie_readrq = 2048;
13726
13727                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13728
13729                 pci_read_config_word(tp->pdev,
13730                                      tp->pcie_cap + PCI_EXP_LNKCTL,
13731                                      &lnkctl);
13732                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13733                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13734                             ASIC_REV_5906) {
13735                                 tg3_flag_clear(tp, HW_TSO_2);
13736                                 tg3_flag_clear(tp, TSO_CAPABLE);
13737                         }
13738                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13739                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13740                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13741                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13742                                 tg3_flag_set(tp, CLKREQ_BUG);
13743                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13744                         tg3_flag_set(tp, L1PLLPD_EN);
13745                 }
13746         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13747                 tg3_flag_set(tp, PCI_EXPRESS);
13748         } else if (!tg3_flag(tp, 5705_PLUS) ||
13749                    tg3_flag(tp, 5780_CLASS)) {
13750                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13751                 if (!tp->pcix_cap) {
13752                         dev_err(&tp->pdev->dev,
13753                                 "Cannot find PCI-X capability, aborting\n");
13754                         return -EIO;
13755                 }
13756
13757                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13758                         tg3_flag_set(tp, PCIX_MODE);
13759         }
13760
13761         /* If we have an AMD 762 or VIA K8T800 chipset, write
13762          * reordering to the mailbox registers done by the host
13763          * controller can cause major troubles.  We read back from
13764          * every mailbox register write to force the writes to be
13765          * posted to the chip in order.
13766          */
13767         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13768             !tg3_flag(tp, PCI_EXPRESS))
13769                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13770
13771         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13772                              &tp->pci_cacheline_sz);
13773         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13774                              &tp->pci_lat_timer);
13775         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13776             tp->pci_lat_timer < 64) {
13777                 tp->pci_lat_timer = 64;
13778                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13779                                       tp->pci_lat_timer);
13780         }
13781
13782         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13783                 /* 5700 BX chips need to have their TX producer index
13784                  * mailboxes written twice to workaround a bug.
13785                  */
13786                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13787
13788                 /* If we are in PCI-X mode, enable register write workaround.
13789                  *
13790                  * The workaround is to use indirect register accesses
13791                  * for all chip writes not to mailbox registers.
13792                  */
13793                 if (tg3_flag(tp, PCIX_MODE)) {
13794                         u32 pm_reg;
13795
13796                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13797
13798                         /* The chip can have it's power management PCI config
13799                          * space registers clobbered due to this bug.
13800                          * So explicitly force the chip into D0 here.
13801                          */
13802                         pci_read_config_dword(tp->pdev,
13803                                               tp->pm_cap + PCI_PM_CTRL,
13804                                               &pm_reg);
13805                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13806                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13807                         pci_write_config_dword(tp->pdev,
13808                                                tp->pm_cap + PCI_PM_CTRL,
13809                                                pm_reg);
13810
13811                         /* Also, force SERR#/PERR# in PCI command. */
13812                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13813                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13814                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13815                 }
13816         }
13817
13818         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13819                 tg3_flag_set(tp, PCI_HIGH_SPEED);
13820         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13821                 tg3_flag_set(tp, PCI_32BIT);
13822
13823         /* Chip-specific fixup from Broadcom driver */
13824         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13825             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13826                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13827                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13828         }
13829
13830         /* Default fast path register access methods */
13831         tp->read32 = tg3_read32;
13832         tp->write32 = tg3_write32;
13833         tp->read32_mbox = tg3_read32;
13834         tp->write32_mbox = tg3_write32;
13835         tp->write32_tx_mbox = tg3_write32;
13836         tp->write32_rx_mbox = tg3_write32;
13837
13838         /* Various workaround register access methods */
13839         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13840                 tp->write32 = tg3_write_indirect_reg32;
13841         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13842                  (tg3_flag(tp, PCI_EXPRESS) &&
13843                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13844                 /*
13845                  * Back to back register writes can cause problems on these
13846                  * chips, the workaround is to read back all reg writes
13847                  * except those to mailbox regs.
13848                  *
13849                  * See tg3_write_indirect_reg32().
13850                  */
13851                 tp->write32 = tg3_write_flush_reg32;
13852         }
13853
13854         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13855                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13856                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13857                         tp->write32_rx_mbox = tg3_write_flush_reg32;
13858         }
13859
13860         if (tg3_flag(tp, ICH_WORKAROUND)) {
13861                 tp->read32 = tg3_read_indirect_reg32;
13862                 tp->write32 = tg3_write_indirect_reg32;
13863                 tp->read32_mbox = tg3_read_indirect_mbox;
13864                 tp->write32_mbox = tg3_write_indirect_mbox;
13865                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13866                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13867
13868                 iounmap(tp->regs);
13869                 tp->regs = NULL;
13870
13871                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13872                 pci_cmd &= ~PCI_COMMAND_MEMORY;
13873                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13874         }
13875         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13876                 tp->read32_mbox = tg3_read32_mbox_5906;
13877                 tp->write32_mbox = tg3_write32_mbox_5906;
13878                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13879                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13880         }
13881
13882         if (tp->write32 == tg3_write_indirect_reg32 ||
13883             (tg3_flag(tp, PCIX_MODE) &&
13884              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13885               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13886                 tg3_flag_set(tp, SRAM_USE_CONFIG);
13887
13888         /* Get eeprom hw config before calling tg3_set_power_state().
13889          * In particular, the TG3_FLAG_IS_NIC flag must be
13890          * determined before calling tg3_set_power_state() so that
13891          * we know whether or not to switch out of Vaux power.
13892          * When the flag is set, it means that GPIO1 is used for eeprom
13893          * write protect and also implies that it is a LOM where GPIOs
13894          * are not used to switch power.
13895          */
13896         tg3_get_eeprom_hw_cfg(tp);
13897
13898         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
13899                 tg3_flag_clear(tp, TSO_CAPABLE);
13900                 tg3_flag_clear(tp, TSO_BUG);
13901                 tp->fw_needed = NULL;
13902         }
13903
13904         if (tg3_flag(tp, ENABLE_APE)) {
13905                 /* Allow reads and writes to the
13906                  * APE register and memory space.
13907                  */
13908                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13909                                  PCISTATE_ALLOW_APE_SHMEM_WR |
13910                                  PCISTATE_ALLOW_APE_PSPACE_WR;
13911                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13912                                        pci_state_reg);
13913         }
13914
13915         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13916             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13917             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13918             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13919             tg3_flag(tp, 57765_PLUS))
13920                 tg3_flag_set(tp, CPMU_PRESENT);
13921
13922         /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13923          * GPIO1 driven high will bring 5700's external PHY out of reset.
13924          * It is also used as eeprom write protect on LOMs.
13925          */
13926         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13927         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13928             tg3_flag(tp, EEPROM_WRITE_PROT))
13929                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13930                                        GRC_LCLCTRL_GPIO_OUTPUT1);
13931         /* Unused GPIO3 must be driven as output on 5752 because there
13932          * are no pull-up resistors on unused GPIO pins.
13933          */
13934         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13935                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13936
13937         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13938             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13939             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13940                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13941
13942         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13943             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13944                 /* Turn off the debug UART. */
13945                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13946                 if (tg3_flag(tp, IS_NIC))
13947                         /* Keep VMain power. */
13948                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13949                                               GRC_LCLCTRL_GPIO_OUTPUT0;
13950         }
13951
13952         /* Force the chip into D0. */
13953         err = tg3_power_up(tp);
13954         if (err) {
13955                 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13956                 return err;
13957         }
13958
13959         /* Derive initial jumbo mode from MTU assigned in
13960          * ether_setup() via the alloc_etherdev() call
13961          */
13962         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13963                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13964
13965         /* Determine WakeOnLan speed to use. */
13966         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13967             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13968             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13969             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13970                 tg3_flag_clear(tp, WOL_SPEED_100MB);
13971         } else {
13972                 tg3_flag_set(tp, WOL_SPEED_100MB);
13973         }
13974
13975         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13976                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13977
13978         /* A few boards don't want Ethernet@WireSpeed phy feature */
13979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13980             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13981              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13982              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13983             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13984             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13985                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13986
13987         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13988             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13989                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13990         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13991                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13992
13993         if (tg3_flag(tp, 5705_PLUS) &&
13994             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13995             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13996             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13997             !tg3_flag(tp, 57765_PLUS)) {
13998                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13999                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14000                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14001                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14002                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14003                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14004                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14005                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14006                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14007                 } else
14008                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14009         }
14010
14011         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14012             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14013                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14014                 if (tp->phy_otp == 0)
14015                         tp->phy_otp = TG3_OTP_DEFAULT;
14016         }
14017
14018         if (tg3_flag(tp, CPMU_PRESENT))
14019                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14020         else
14021                 tp->mi_mode = MAC_MI_MODE_BASE;
14022
14023         tp->coalesce_mode = 0;
14024         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14025             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14026                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14027
14028         /* Set these bits to enable statistics workaround. */
14029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14030             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14031             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14032                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14033                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14034         }
14035
14036         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14037             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14038                 tg3_flag_set(tp, USE_PHYLIB);
14039
14040         err = tg3_mdio_init(tp);
14041         if (err)
14042                 return err;
14043
14044         /* Initialize data/descriptor byte/word swapping. */
14045         val = tr32(GRC_MODE);
14046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14047                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14048                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14049                         GRC_MODE_B2HRX_ENABLE |
14050                         GRC_MODE_HTX2B_ENABLE |
14051                         GRC_MODE_HOST_STACKUP);
14052         else
14053                 val &= GRC_MODE_HOST_STACKUP;
14054
14055         tw32(GRC_MODE, val | tp->grc_mode);
14056
14057         tg3_switch_clocks(tp);
14058
14059         /* Clear this out for sanity. */
14060         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14061
14062         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14063                               &pci_state_reg);
14064         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14065             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14066                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14067
14068                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14069                     chiprevid == CHIPREV_ID_5701_B0 ||
14070                     chiprevid == CHIPREV_ID_5701_B2 ||
14071                     chiprevid == CHIPREV_ID_5701_B5) {
14072                         void __iomem *sram_base;
14073
14074                         /* Write some dummy words into the SRAM status block
14075                          * area, see if it reads back correctly.  If the return
14076                          * value is bad, force enable the PCIX workaround.
14077                          */
14078                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14079
14080                         writel(0x00000000, sram_base);
14081                         writel(0x00000000, sram_base + 4);
14082                         writel(0xffffffff, sram_base + 4);
14083                         if (readl(sram_base) != 0x00000000)
14084                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14085                 }
14086         }
14087
14088         udelay(50);
14089         tg3_nvram_init(tp);
14090
14091         grc_misc_cfg = tr32(GRC_MISC_CFG);
14092         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14093
14094         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14095             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14096              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14097                 tg3_flag_set(tp, IS_5788);
14098
14099         if (!tg3_flag(tp, IS_5788) &&
14100             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14101                 tg3_flag_set(tp, TAGGED_STATUS);
14102         if (tg3_flag(tp, TAGGED_STATUS)) {
14103                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14104                                       HOSTCC_MODE_CLRTICK_TXBD);
14105
14106                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14107                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14108                                        tp->misc_host_ctrl);
14109         }
14110
14111         /* Preserve the APE MAC_MODE bits */
14112         if (tg3_flag(tp, ENABLE_APE))
14113                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14114         else
14115                 tp->mac_mode = TG3_DEF_MAC_MODE;
14116
14117         /* these are limited to 10/100 only */
14118         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14119              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14120             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14121              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14122              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14123               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14124               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14125             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14126              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14127               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14128               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14129             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14130             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14131             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14132             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14133                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14134
14135         err = tg3_phy_probe(tp);
14136         if (err) {
14137                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14138                 /* ... but do not return immediately ... */
14139                 tg3_mdio_fini(tp);
14140         }
14141
14142         tg3_read_vpd(tp);
14143         tg3_read_fw_ver(tp);
14144
14145         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14146                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14147         } else {
14148                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14149                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14150                 else
14151                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14152         }
14153
14154         /* 5700 {AX,BX} chips have a broken status block link
14155          * change bit implementation, so we must use the
14156          * status register in those cases.
14157          */
14158         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14159                 tg3_flag_set(tp, USE_LINKCHG_REG);
14160         else
14161                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14162
14163         /* The led_ctrl is set during tg3_phy_probe, here we might
14164          * have to force the link status polling mechanism based
14165          * upon subsystem IDs.
14166          */
14167         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14168             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14169             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14170                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14171                 tg3_flag_set(tp, USE_LINKCHG_REG);
14172         }
14173
14174         /* For all SERDES we poll the MAC status register. */
14175         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14176                 tg3_flag_set(tp, POLL_SERDES);
14177         else
14178                 tg3_flag_clear(tp, POLL_SERDES);
14179
14180         tp->rx_offset = NET_IP_ALIGN;
14181         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14182         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14183             tg3_flag(tp, PCIX_MODE)) {
14184                 tp->rx_offset = 0;
14185 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14186                 tp->rx_copy_thresh = ~(u16)0;
14187 #endif
14188         }
14189
14190         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14191         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14192         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14193
14194         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14195
14196         /* Increment the rx prod index on the rx std ring by at most
14197          * 8 for these chips to workaround hw errata.
14198          */
14199         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14200             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14201             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14202                 tp->rx_std_max_post = 8;
14203
14204         if (tg3_flag(tp, ASPM_WORKAROUND))
14205                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14206                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14207
14208         return err;
14209 }
14210
14211 #ifdef CONFIG_SPARC
14212 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14213 {
14214         struct net_device *dev = tp->dev;
14215         struct pci_dev *pdev = tp->pdev;
14216         struct device_node *dp = pci_device_to_OF_node(pdev);
14217         const unsigned char *addr;
14218         int len;
14219
14220         addr = of_get_property(dp, "local-mac-address", &len);
14221         if (addr && len == 6) {
14222                 memcpy(dev->dev_addr, addr, 6);
14223                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14224                 return 0;
14225         }
14226         return -ENODEV;
14227 }
14228
14229 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14230 {
14231         struct net_device *dev = tp->dev;
14232
14233         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14234         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14235         return 0;
14236 }
14237 #endif
14238
14239 static int __devinit tg3_get_device_address(struct tg3 *tp)
14240 {
14241         struct net_device *dev = tp->dev;
14242         u32 hi, lo, mac_offset;
14243         int addr_ok = 0;
14244
14245 #ifdef CONFIG_SPARC
14246         if (!tg3_get_macaddr_sparc(tp))
14247                 return 0;
14248 #endif
14249
14250         mac_offset = 0x7c;
14251         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14252             tg3_flag(tp, 5780_CLASS)) {
14253                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14254                         mac_offset = 0xcc;
14255                 if (tg3_nvram_lock(tp))
14256                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14257                 else
14258                         tg3_nvram_unlock(tp);
14259         } else if (tg3_flag(tp, 5717_PLUS)) {
14260                 if (PCI_FUNC(tp->pdev->devfn) & 1)
14261                         mac_offset = 0xcc;
14262                 if (PCI_FUNC(tp->pdev->devfn) > 1)
14263                         mac_offset += 0x18c;
14264         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14265                 mac_offset = 0x10;
14266
14267         /* First try to get it from MAC address mailbox. */
14268         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14269         if ((hi >> 16) == 0x484b) {
14270                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14271                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14272
14273                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14274                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14275                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14276                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14277                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14278
14279                 /* Some old bootcode may report a 0 MAC address in SRAM */
14280                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14281         }
14282         if (!addr_ok) {
14283                 /* Next, try NVRAM. */
14284                 if (!tg3_flag(tp, NO_NVRAM) &&
14285                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14286                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14287                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14288                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14289                 }
14290                 /* Finally just fetch it out of the MAC control regs. */
14291                 else {
14292                         hi = tr32(MAC_ADDR_0_HIGH);
14293                         lo = tr32(MAC_ADDR_0_LOW);
14294
14295                         dev->dev_addr[5] = lo & 0xff;
14296                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14297                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14298                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14299                         dev->dev_addr[1] = hi & 0xff;
14300                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14301                 }
14302         }
14303
14304         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14305 #ifdef CONFIG_SPARC
14306                 if (!tg3_get_default_macaddr_sparc(tp))
14307                         return 0;
14308 #endif
14309                 return -EINVAL;
14310         }
14311         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14312         return 0;
14313 }
14314
14315 #define BOUNDARY_SINGLE_CACHELINE       1
14316 #define BOUNDARY_MULTI_CACHELINE        2
14317
14318 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14319 {
14320         int cacheline_size;
14321         u8 byte;
14322         int goal;
14323
14324         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14325         if (byte == 0)
14326                 cacheline_size = 1024;
14327         else
14328                 cacheline_size = (int) byte * 4;
14329
14330         /* On 5703 and later chips, the boundary bits have no
14331          * effect.
14332          */
14333         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14334             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14335             !tg3_flag(tp, PCI_EXPRESS))
14336                 goto out;
14337
14338 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14339         goal = BOUNDARY_MULTI_CACHELINE;
14340 #else
14341 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14342         goal = BOUNDARY_SINGLE_CACHELINE;
14343 #else
14344         goal = 0;
14345 #endif
14346 #endif
14347
14348         if (tg3_flag(tp, 57765_PLUS)) {
14349                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14350                 goto out;
14351         }
14352
14353         if (!goal)
14354                 goto out;
14355
14356         /* PCI controllers on most RISC systems tend to disconnect
14357          * when a device tries to burst across a cache-line boundary.
14358          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14359          *
14360          * Unfortunately, for PCI-E there are only limited
14361          * write-side controls for this, and thus for reads
14362          * we will still get the disconnects.  We'll also waste
14363          * these PCI cycles for both read and write for chips
14364          * other than 5700 and 5701 which do not implement the
14365          * boundary bits.
14366          */
14367         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14368                 switch (cacheline_size) {
14369                 case 16:
14370                 case 32:
14371                 case 64:
14372                 case 128:
14373                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14374                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14375                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14376                         } else {
14377                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14378                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14379                         }
14380                         break;
14381
14382                 case 256:
14383                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14384                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14385                         break;
14386
14387                 default:
14388                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14389                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14390                         break;
14391                 }
14392         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14393                 switch (cacheline_size) {
14394                 case 16:
14395                 case 32:
14396                 case 64:
14397                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14398                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14399                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14400                                 break;
14401                         }
14402                         /* fallthrough */
14403                 case 128:
14404                 default:
14405                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14406                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14407                         break;
14408                 }
14409         } else {
14410                 switch (cacheline_size) {
14411                 case 16:
14412                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14413                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14414                                         DMA_RWCTRL_WRITE_BNDRY_16);
14415                                 break;
14416                         }
14417                         /* fallthrough */
14418                 case 32:
14419                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14420                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14421                                         DMA_RWCTRL_WRITE_BNDRY_32);
14422                                 break;
14423                         }
14424                         /* fallthrough */
14425                 case 64:
14426                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14427                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14428                                         DMA_RWCTRL_WRITE_BNDRY_64);
14429                                 break;
14430                         }
14431                         /* fallthrough */
14432                 case 128:
14433                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14434                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14435                                         DMA_RWCTRL_WRITE_BNDRY_128);
14436                                 break;
14437                         }
14438                         /* fallthrough */
14439                 case 256:
14440                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14441                                 DMA_RWCTRL_WRITE_BNDRY_256);
14442                         break;
14443                 case 512:
14444                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14445                                 DMA_RWCTRL_WRITE_BNDRY_512);
14446                         break;
14447                 case 1024:
14448                 default:
14449                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14450                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14451                         break;
14452                 }
14453         }
14454
14455 out:
14456         return val;
14457 }
14458
14459 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14460 {
14461         struct tg3_internal_buffer_desc test_desc;
14462         u32 sram_dma_descs;
14463         int i, ret;
14464
14465         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14466
14467         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14468         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14469         tw32(RDMAC_STATUS, 0);
14470         tw32(WDMAC_STATUS, 0);
14471
14472         tw32(BUFMGR_MODE, 0);
14473         tw32(FTQ_RESET, 0);
14474
14475         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14476         test_desc.addr_lo = buf_dma & 0xffffffff;
14477         test_desc.nic_mbuf = 0x00002100;
14478         test_desc.len = size;
14479
14480         /*
14481          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14482          * the *second* time the tg3 driver was getting loaded after an
14483          * initial scan.
14484          *
14485          * Broadcom tells me:
14486          *   ...the DMA engine is connected to the GRC block and a DMA
14487          *   reset may affect the GRC block in some unpredictable way...
14488          *   The behavior of resets to individual blocks has not been tested.
14489          *
14490          * Broadcom noted the GRC reset will also reset all sub-components.
14491          */
14492         if (to_device) {
14493                 test_desc.cqid_sqid = (13 << 8) | 2;
14494
14495                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14496                 udelay(40);
14497         } else {
14498                 test_desc.cqid_sqid = (16 << 8) | 7;
14499
14500                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14501                 udelay(40);
14502         }
14503         test_desc.flags = 0x00000005;
14504
14505         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14506                 u32 val;
14507
14508                 val = *(((u32 *)&test_desc) + i);
14509                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14510                                        sram_dma_descs + (i * sizeof(u32)));
14511                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14512         }
14513         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14514
14515         if (to_device)
14516                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14517         else
14518                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14519
14520         ret = -ENODEV;
14521         for (i = 0; i < 40; i++) {
14522                 u32 val;
14523
14524                 if (to_device)
14525                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14526                 else
14527                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14528                 if ((val & 0xffff) == sram_dma_descs) {
14529                         ret = 0;
14530                         break;
14531                 }
14532
14533                 udelay(100);
14534         }
14535
14536         return ret;
14537 }
14538
14539 #define TEST_BUFFER_SIZE        0x2000
14540
14541 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14542         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14543         { },
14544 };
14545
14546 static int __devinit tg3_test_dma(struct tg3 *tp)
14547 {
14548         dma_addr_t buf_dma;
14549         u32 *buf, saved_dma_rwctrl;
14550         int ret = 0;
14551
14552         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14553                                  &buf_dma, GFP_KERNEL);
14554         if (!buf) {
14555                 ret = -ENOMEM;
14556                 goto out_nofree;
14557         }
14558
14559         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14560                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14561
14562         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14563
14564         if (tg3_flag(tp, 57765_PLUS))
14565                 goto out;
14566
14567         if (tg3_flag(tp, PCI_EXPRESS)) {
14568                 /* DMA read watermark not used on PCIE */
14569                 tp->dma_rwctrl |= 0x00180000;
14570         } else if (!tg3_flag(tp, PCIX_MODE)) {
14571                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14572                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14573                         tp->dma_rwctrl |= 0x003f0000;
14574                 else
14575                         tp->dma_rwctrl |= 0x003f000f;
14576         } else {
14577                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14578                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14579                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14580                         u32 read_water = 0x7;
14581
14582                         /* If the 5704 is behind the EPB bridge, we can
14583                          * do the less restrictive ONE_DMA workaround for
14584                          * better performance.
14585                          */
14586                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14587                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14588                                 tp->dma_rwctrl |= 0x8000;
14589                         else if (ccval == 0x6 || ccval == 0x7)
14590                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14591
14592                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14593                                 read_water = 4;
14594                         /* Set bit 23 to enable PCIX hw bug fix */
14595                         tp->dma_rwctrl |=
14596                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14597                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14598                                 (1 << 23);
14599                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14600                         /* 5780 always in PCIX mode */
14601                         tp->dma_rwctrl |= 0x00144000;
14602                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14603                         /* 5714 always in PCIX mode */
14604                         tp->dma_rwctrl |= 0x00148000;
14605                 } else {
14606                         tp->dma_rwctrl |= 0x001b000f;
14607                 }
14608         }
14609
14610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14611             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14612                 tp->dma_rwctrl &= 0xfffffff0;
14613
14614         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14615             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14616                 /* Remove this if it causes problems for some boards. */
14617                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14618
14619                 /* On 5700/5701 chips, we need to set this bit.
14620                  * Otherwise the chip will issue cacheline transactions
14621                  * to streamable DMA memory with not all the byte
14622                  * enables turned on.  This is an error on several
14623                  * RISC PCI controllers, in particular sparc64.
14624                  *
14625                  * On 5703/5704 chips, this bit has been reassigned
14626                  * a different meaning.  In particular, it is used
14627                  * on those chips to enable a PCI-X workaround.
14628                  */
14629                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14630         }
14631
14632         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14633
14634 #if 0
14635         /* Unneeded, already done by tg3_get_invariants.  */
14636         tg3_switch_clocks(tp);
14637 #endif
14638
14639         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14640             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14641                 goto out;
14642
14643         /* It is best to perform DMA test with maximum write burst size
14644          * to expose the 5700/5701 write DMA bug.
14645          */
14646         saved_dma_rwctrl = tp->dma_rwctrl;
14647         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14648         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14649
14650         while (1) {
14651                 u32 *p = buf, i;
14652
14653                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14654                         p[i] = i;
14655
14656                 /* Send the buffer to the chip. */
14657                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14658                 if (ret) {
14659                         dev_err(&tp->pdev->dev,
14660                                 "%s: Buffer write failed. err = %d\n",
14661                                 __func__, ret);
14662                         break;
14663                 }
14664
14665 #if 0
14666                 /* validate data reached card RAM correctly. */
14667                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14668                         u32 val;
14669                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14670                         if (le32_to_cpu(val) != p[i]) {
14671                                 dev_err(&tp->pdev->dev,
14672                                         "%s: Buffer corrupted on device! "
14673                                         "(%d != %d)\n", __func__, val, i);
14674                                 /* ret = -ENODEV here? */
14675                         }
14676                         p[i] = 0;
14677                 }
14678 #endif
14679                 /* Now read it back. */
14680                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14681                 if (ret) {
14682                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14683                                 "err = %d\n", __func__, ret);
14684                         break;
14685                 }
14686
14687                 /* Verify it. */
14688                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14689                         if (p[i] == i)
14690                                 continue;
14691
14692                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14693                             DMA_RWCTRL_WRITE_BNDRY_16) {
14694                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14695                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14696                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14697                                 break;
14698                         } else {
14699                                 dev_err(&tp->pdev->dev,
14700                                         "%s: Buffer corrupted on read back! "
14701                                         "(%d != %d)\n", __func__, p[i], i);
14702                                 ret = -ENODEV;
14703                                 goto out;
14704                         }
14705                 }
14706
14707                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14708                         /* Success. */
14709                         ret = 0;
14710                         break;
14711                 }
14712         }
14713         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14714             DMA_RWCTRL_WRITE_BNDRY_16) {
14715                 /* DMA test passed without adjusting DMA boundary,
14716                  * now look for chipsets that are known to expose the
14717                  * DMA bug without failing the test.
14718                  */
14719                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14720                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14721                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14722                 } else {
14723                         /* Safe to use the calculated DMA boundary. */
14724                         tp->dma_rwctrl = saved_dma_rwctrl;
14725                 }
14726
14727                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14728         }
14729
14730 out:
14731         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14732 out_nofree:
14733         return ret;
14734 }
14735
14736 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14737 {
14738         if (tg3_flag(tp, 57765_PLUS)) {
14739                 tp->bufmgr_config.mbuf_read_dma_low_water =
14740                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14741                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14742                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14743                 tp->bufmgr_config.mbuf_high_water =
14744                         DEFAULT_MB_HIGH_WATER_57765;
14745
14746                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14747                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14748                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14749                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14750                 tp->bufmgr_config.mbuf_high_water_jumbo =
14751                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14752         } else if (tg3_flag(tp, 5705_PLUS)) {
14753                 tp->bufmgr_config.mbuf_read_dma_low_water =
14754                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14755                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14756                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14757                 tp->bufmgr_config.mbuf_high_water =
14758                         DEFAULT_MB_HIGH_WATER_5705;
14759                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14760                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14761                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14762                         tp->bufmgr_config.mbuf_high_water =
14763                                 DEFAULT_MB_HIGH_WATER_5906;
14764                 }
14765
14766                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14767                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14768                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14769                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14770                 tp->bufmgr_config.mbuf_high_water_jumbo =
14771                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14772         } else {
14773                 tp->bufmgr_config.mbuf_read_dma_low_water =
14774                         DEFAULT_MB_RDMA_LOW_WATER;
14775                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14776                         DEFAULT_MB_MACRX_LOW_WATER;
14777                 tp->bufmgr_config.mbuf_high_water =
14778                         DEFAULT_MB_HIGH_WATER;
14779
14780                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14781                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14782                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14783                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14784                 tp->bufmgr_config.mbuf_high_water_jumbo =
14785                         DEFAULT_MB_HIGH_WATER_JUMBO;
14786         }
14787
14788         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14789         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14790 }
14791
14792 static char * __devinit tg3_phy_string(struct tg3 *tp)
14793 {
14794         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14795         case TG3_PHY_ID_BCM5400:        return "5400";
14796         case TG3_PHY_ID_BCM5401:        return "5401";
14797         case TG3_PHY_ID_BCM5411:        return "5411";
14798         case TG3_PHY_ID_BCM5701:        return "5701";
14799         case TG3_PHY_ID_BCM5703:        return "5703";
14800         case TG3_PHY_ID_BCM5704:        return "5704";
14801         case TG3_PHY_ID_BCM5705:        return "5705";
14802         case TG3_PHY_ID_BCM5750:        return "5750";
14803         case TG3_PHY_ID_BCM5752:        return "5752";
14804         case TG3_PHY_ID_BCM5714:        return "5714";
14805         case TG3_PHY_ID_BCM5780:        return "5780";
14806         case TG3_PHY_ID_BCM5755:        return "5755";
14807         case TG3_PHY_ID_BCM5787:        return "5787";
14808         case TG3_PHY_ID_BCM5784:        return "5784";
14809         case TG3_PHY_ID_BCM5756:        return "5722/5756";
14810         case TG3_PHY_ID_BCM5906:        return "5906";
14811         case TG3_PHY_ID_BCM5761:        return "5761";
14812         case TG3_PHY_ID_BCM5718C:       return "5718C";
14813         case TG3_PHY_ID_BCM5718S:       return "5718S";
14814         case TG3_PHY_ID_BCM57765:       return "57765";
14815         case TG3_PHY_ID_BCM5719C:       return "5719C";
14816         case TG3_PHY_ID_BCM5720C:       return "5720C";
14817         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
14818         case 0:                 return "serdes";
14819         default:                return "unknown";
14820         }
14821 }
14822
14823 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14824 {
14825         if (tg3_flag(tp, PCI_EXPRESS)) {
14826                 strcpy(str, "PCI Express");
14827                 return str;
14828         } else if (tg3_flag(tp, PCIX_MODE)) {
14829                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14830
14831                 strcpy(str, "PCIX:");
14832
14833                 if ((clock_ctrl == 7) ||
14834                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14835                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14836                         strcat(str, "133MHz");
14837                 else if (clock_ctrl == 0)
14838                         strcat(str, "33MHz");
14839                 else if (clock_ctrl == 2)
14840                         strcat(str, "50MHz");
14841                 else if (clock_ctrl == 4)
14842                         strcat(str, "66MHz");
14843                 else if (clock_ctrl == 6)
14844                         strcat(str, "100MHz");
14845         } else {
14846                 strcpy(str, "PCI:");
14847                 if (tg3_flag(tp, PCI_HIGH_SPEED))
14848                         strcat(str, "66MHz");
14849                 else
14850                         strcat(str, "33MHz");
14851         }
14852         if (tg3_flag(tp, PCI_32BIT))
14853                 strcat(str, ":32-bit");
14854         else
14855                 strcat(str, ":64-bit");
14856         return str;
14857 }
14858
14859 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14860 {
14861         struct pci_dev *peer;
14862         unsigned int func, devnr = tp->pdev->devfn & ~7;
14863
14864         for (func = 0; func < 8; func++) {
14865                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14866                 if (peer && peer != tp->pdev)
14867                         break;
14868                 pci_dev_put(peer);
14869         }
14870         /* 5704 can be configured in single-port mode, set peer to
14871          * tp->pdev in that case.
14872          */
14873         if (!peer) {
14874                 peer = tp->pdev;
14875                 return peer;
14876         }
14877
14878         /*
14879          * We don't need to keep the refcount elevated; there's no way
14880          * to remove one half of this device without removing the other
14881          */
14882         pci_dev_put(peer);
14883
14884         return peer;
14885 }
14886
14887 static void __devinit tg3_init_coal(struct tg3 *tp)
14888 {
14889         struct ethtool_coalesce *ec = &tp->coal;
14890
14891         memset(ec, 0, sizeof(*ec));
14892         ec->cmd = ETHTOOL_GCOALESCE;
14893         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14894         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14895         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14896         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14897         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14898         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14899         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14900         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14901         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14902
14903         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14904                                  HOSTCC_MODE_CLRTICK_TXBD)) {
14905                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14906                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14907                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14908                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14909         }
14910
14911         if (tg3_flag(tp, 5705_PLUS)) {
14912                 ec->rx_coalesce_usecs_irq = 0;
14913                 ec->tx_coalesce_usecs_irq = 0;
14914                 ec->stats_block_coalesce_usecs = 0;
14915         }
14916 }
14917
14918 static const struct net_device_ops tg3_netdev_ops = {
14919         .ndo_open               = tg3_open,
14920         .ndo_stop               = tg3_close,
14921         .ndo_start_xmit         = tg3_start_xmit,
14922         .ndo_get_stats64        = tg3_get_stats64,
14923         .ndo_validate_addr      = eth_validate_addr,
14924         .ndo_set_multicast_list = tg3_set_rx_mode,
14925         .ndo_set_mac_address    = tg3_set_mac_addr,
14926         .ndo_do_ioctl           = tg3_ioctl,
14927         .ndo_tx_timeout         = tg3_tx_timeout,
14928         .ndo_change_mtu         = tg3_change_mtu,
14929         .ndo_fix_features       = tg3_fix_features,
14930         .ndo_set_features       = tg3_set_features,
14931 #ifdef CONFIG_NET_POLL_CONTROLLER
14932         .ndo_poll_controller    = tg3_poll_controller,
14933 #endif
14934 };
14935
14936 static int __devinit tg3_init_one(struct pci_dev *pdev,
14937                                   const struct pci_device_id *ent)
14938 {
14939         struct net_device *dev;
14940         struct tg3 *tp;
14941         int i, err, pm_cap;
14942         u32 sndmbx, rcvmbx, intmbx;
14943         char str[40];
14944         u64 dma_mask, persist_dma_mask;
14945         u32 features = 0;
14946
14947         printk_once(KERN_INFO "%s\n", version);
14948
14949         err = pci_enable_device(pdev);
14950         if (err) {
14951                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14952                 return err;
14953         }
14954
14955         err = pci_request_regions(pdev, DRV_MODULE_NAME);
14956         if (err) {
14957                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14958                 goto err_out_disable_pdev;
14959         }
14960
14961         pci_set_master(pdev);
14962
14963         /* Find power-management capability. */
14964         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14965         if (pm_cap == 0) {
14966                 dev_err(&pdev->dev,
14967                         "Cannot find Power Management capability, aborting\n");
14968                 err = -EIO;
14969                 goto err_out_free_res;
14970         }
14971
14972         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14973         if (!dev) {
14974                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14975                 err = -ENOMEM;
14976                 goto err_out_free_res;
14977         }
14978
14979         SET_NETDEV_DEV(dev, &pdev->dev);
14980
14981         tp = netdev_priv(dev);
14982         tp->pdev = pdev;
14983         tp->dev = dev;
14984         tp->pm_cap = pm_cap;
14985         tp->rx_mode = TG3_DEF_RX_MODE;
14986         tp->tx_mode = TG3_DEF_TX_MODE;
14987         tp->irq_sync = 1;
14988
14989         if (tg3_debug > 0)
14990                 tp->msg_enable = tg3_debug;
14991         else
14992                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14993
14994         /* The word/byte swap controls here control register access byte
14995          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
14996          * setting below.
14997          */
14998         tp->misc_host_ctrl =
14999                 MISC_HOST_CTRL_MASK_PCI_INT |
15000                 MISC_HOST_CTRL_WORD_SWAP |
15001                 MISC_HOST_CTRL_INDIR_ACCESS |
15002                 MISC_HOST_CTRL_PCISTATE_RW;
15003
15004         /* The NONFRM (non-frame) byte/word swap controls take effect
15005          * on descriptor entries, anything which isn't packet data.
15006          *
15007          * The StrongARM chips on the board (one for tx, one for rx)
15008          * are running in big-endian mode.
15009          */
15010         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15011                         GRC_MODE_WSWAP_NONFRM_DATA);
15012 #ifdef __BIG_ENDIAN
15013         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15014 #endif
15015         spin_lock_init(&tp->lock);
15016         spin_lock_init(&tp->indirect_lock);
15017         INIT_WORK(&tp->reset_task, tg3_reset_task);
15018
15019         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15020         if (!tp->regs) {
15021                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15022                 err = -ENOMEM;
15023                 goto err_out_free_dev;
15024         }
15025
15026         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15027         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15028
15029         dev->ethtool_ops = &tg3_ethtool_ops;
15030         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15031         dev->netdev_ops = &tg3_netdev_ops;
15032         dev->irq = pdev->irq;
15033
15034         err = tg3_get_invariants(tp);
15035         if (err) {
15036                 dev_err(&pdev->dev,
15037                         "Problem fetching invariants of chip, aborting\n");
15038                 goto err_out_iounmap;
15039         }
15040
15041         /* The EPB bridge inside 5714, 5715, and 5780 and any
15042          * device behind the EPB cannot support DMA addresses > 40-bit.
15043          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15044          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15045          * do DMA address check in tg3_start_xmit().
15046          */
15047         if (tg3_flag(tp, IS_5788))
15048                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15049         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15050                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15051 #ifdef CONFIG_HIGHMEM
15052                 dma_mask = DMA_BIT_MASK(64);
15053 #endif
15054         } else
15055                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15056
15057         /* Configure DMA attributes. */
15058         if (dma_mask > DMA_BIT_MASK(32)) {
15059                 err = pci_set_dma_mask(pdev, dma_mask);
15060                 if (!err) {
15061                         features |= NETIF_F_HIGHDMA;
15062                         err = pci_set_consistent_dma_mask(pdev,
15063                                                           persist_dma_mask);
15064                         if (err < 0) {
15065                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15066                                         "DMA for consistent allocations\n");
15067                                 goto err_out_iounmap;
15068                         }
15069                 }
15070         }
15071         if (err || dma_mask == DMA_BIT_MASK(32)) {
15072                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15073                 if (err) {
15074                         dev_err(&pdev->dev,
15075                                 "No usable DMA configuration, aborting\n");
15076                         goto err_out_iounmap;
15077                 }
15078         }
15079
15080         tg3_init_bufmgr_config(tp);
15081
15082         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15083
15084         /* 5700 B0 chips do not support checksumming correctly due
15085          * to hardware bugs.
15086          */
15087         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15088                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15089
15090                 if (tg3_flag(tp, 5755_PLUS))
15091                         features |= NETIF_F_IPV6_CSUM;
15092         }
15093
15094         /* TSO is on by default on chips that support hardware TSO.
15095          * Firmware TSO on older chips gives lower performance, so it
15096          * is off by default, but can be enabled using ethtool.
15097          */
15098         if ((tg3_flag(tp, HW_TSO_1) ||
15099              tg3_flag(tp, HW_TSO_2) ||
15100              tg3_flag(tp, HW_TSO_3)) &&
15101             (features & NETIF_F_IP_CSUM))
15102                 features |= NETIF_F_TSO;
15103         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15104                 if (features & NETIF_F_IPV6_CSUM)
15105                         features |= NETIF_F_TSO6;
15106                 if (tg3_flag(tp, HW_TSO_3) ||
15107                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15108                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15109                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15110                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15111                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15112                         features |= NETIF_F_TSO_ECN;
15113         }
15114
15115         dev->features |= features;
15116         dev->vlan_features |= features;
15117
15118         /*
15119          * Add loopback capability only for a subset of devices that support
15120          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15121          * loopback for the remaining devices.
15122          */
15123         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15124             !tg3_flag(tp, CPMU_PRESENT))
15125                 /* Add the loopback capability */
15126                 features |= NETIF_F_LOOPBACK;
15127
15128         dev->hw_features |= features;
15129
15130         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15131             !tg3_flag(tp, TSO_CAPABLE) &&
15132             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15133                 tg3_flag_set(tp, MAX_RXPEND_64);
15134                 tp->rx_pending = 63;
15135         }
15136
15137         err = tg3_get_device_address(tp);
15138         if (err) {
15139                 dev_err(&pdev->dev,
15140                         "Could not obtain valid ethernet address, aborting\n");
15141                 goto err_out_iounmap;
15142         }
15143
15144         if (tg3_flag(tp, ENABLE_APE)) {
15145                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15146                 if (!tp->aperegs) {
15147                         dev_err(&pdev->dev,
15148                                 "Cannot map APE registers, aborting\n");
15149                         err = -ENOMEM;
15150                         goto err_out_iounmap;
15151                 }
15152
15153                 tg3_ape_lock_init(tp);
15154
15155                 if (tg3_flag(tp, ENABLE_ASF))
15156                         tg3_read_dash_ver(tp);
15157         }
15158
15159         /*
15160          * Reset chip in case UNDI or EFI driver did not shutdown
15161          * DMA self test will enable WDMAC and we'll see (spurious)
15162          * pending DMA on the PCI bus at that point.
15163          */
15164         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15165             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15166                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15167                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15168         }
15169
15170         err = tg3_test_dma(tp);
15171         if (err) {
15172                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15173                 goto err_out_apeunmap;
15174         }
15175
15176         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15177         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15178         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15179         for (i = 0; i < tp->irq_max; i++) {
15180                 struct tg3_napi *tnapi = &tp->napi[i];
15181
15182                 tnapi->tp = tp;
15183                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15184
15185                 tnapi->int_mbox = intmbx;
15186                 if (i < 4)
15187                         intmbx += 0x8;
15188                 else
15189                         intmbx += 0x4;
15190
15191                 tnapi->consmbox = rcvmbx;
15192                 tnapi->prodmbox = sndmbx;
15193
15194                 if (i)
15195                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15196                 else
15197                         tnapi->coal_now = HOSTCC_MODE_NOW;
15198
15199                 if (!tg3_flag(tp, SUPPORT_MSIX))
15200                         break;
15201
15202                 /*
15203                  * If we support MSIX, we'll be using RSS.  If we're using
15204                  * RSS, the first vector only handles link interrupts and the
15205                  * remaining vectors handle rx and tx interrupts.  Reuse the
15206                  * mailbox values for the next iteration.  The values we setup
15207                  * above are still useful for the single vectored mode.
15208                  */
15209                 if (!i)
15210                         continue;
15211
15212                 rcvmbx += 0x8;
15213
15214                 if (sndmbx & 0x4)
15215                         sndmbx -= 0x4;
15216                 else
15217                         sndmbx += 0xc;
15218         }
15219
15220         tg3_init_coal(tp);
15221
15222         pci_set_drvdata(pdev, dev);
15223
15224         err = register_netdev(dev);
15225         if (err) {
15226                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15227                 goto err_out_apeunmap;
15228         }
15229
15230         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15231                     tp->board_part_number,
15232                     tp->pci_chip_rev_id,
15233                     tg3_bus_string(tp, str),
15234                     dev->dev_addr);
15235
15236         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15237                 struct phy_device *phydev;
15238                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15239                 netdev_info(dev,
15240                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15241                             phydev->drv->name, dev_name(&phydev->dev));
15242         } else {
15243                 char *ethtype;
15244
15245                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15246                         ethtype = "10/100Base-TX";
15247                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15248                         ethtype = "1000Base-SX";
15249                 else
15250                         ethtype = "10/100/1000Base-T";
15251
15252                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15253                             "(WireSpeed[%d], EEE[%d])\n",
15254                             tg3_phy_string(tp), ethtype,
15255                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15256                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15257         }
15258
15259         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15260                     (dev->features & NETIF_F_RXCSUM) != 0,
15261                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15262                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15263                     tg3_flag(tp, ENABLE_ASF) != 0,
15264                     tg3_flag(tp, TSO_CAPABLE) != 0);
15265         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15266                     tp->dma_rwctrl,
15267                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15268                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15269
15270         pci_save_state(pdev);
15271
15272         return 0;
15273
15274 err_out_apeunmap:
15275         if (tp->aperegs) {
15276                 iounmap(tp->aperegs);
15277                 tp->aperegs = NULL;
15278         }
15279
15280 err_out_iounmap:
15281         if (tp->regs) {
15282                 iounmap(tp->regs);
15283                 tp->regs = NULL;
15284         }
15285
15286 err_out_free_dev:
15287         free_netdev(dev);
15288
15289 err_out_free_res:
15290         pci_release_regions(pdev);
15291
15292 err_out_disable_pdev:
15293         pci_disable_device(pdev);
15294         pci_set_drvdata(pdev, NULL);
15295         return err;
15296 }
15297
15298 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15299 {
15300         struct net_device *dev = pci_get_drvdata(pdev);
15301
15302         if (dev) {
15303                 struct tg3 *tp = netdev_priv(dev);
15304
15305                 if (tp->fw)
15306                         release_firmware(tp->fw);
15307
15308                 cancel_work_sync(&tp->reset_task);
15309
15310                 if (tg3_flag(tp, USE_PHYLIB)) {
15311                         tg3_phy_fini(tp);
15312                         tg3_mdio_fini(tp);
15313                 }
15314
15315                 unregister_netdev(dev);
15316                 if (tp->aperegs) {
15317                         iounmap(tp->aperegs);
15318                         tp->aperegs = NULL;
15319                 }
15320                 if (tp->regs) {
15321                         iounmap(tp->regs);
15322                         tp->regs = NULL;
15323                 }
15324                 free_netdev(dev);
15325                 pci_release_regions(pdev);
15326                 pci_disable_device(pdev);
15327                 pci_set_drvdata(pdev, NULL);
15328         }
15329 }
15330
15331 #ifdef CONFIG_PM_SLEEP
15332 static int tg3_suspend(struct device *device)
15333 {
15334         struct pci_dev *pdev = to_pci_dev(device);
15335         struct net_device *dev = pci_get_drvdata(pdev);
15336         struct tg3 *tp = netdev_priv(dev);
15337         int err;
15338
15339         if (!netif_running(dev))
15340                 return 0;
15341
15342         flush_work_sync(&tp->reset_task);
15343         tg3_phy_stop(tp);
15344         tg3_netif_stop(tp);
15345
15346         del_timer_sync(&tp->timer);
15347
15348         tg3_full_lock(tp, 1);
15349         tg3_disable_ints(tp);
15350         tg3_full_unlock(tp);
15351
15352         netif_device_detach(dev);
15353
15354         tg3_full_lock(tp, 0);
15355         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15356         tg3_flag_clear(tp, INIT_COMPLETE);
15357         tg3_full_unlock(tp);
15358
15359         err = tg3_power_down_prepare(tp);
15360         if (err) {
15361                 int err2;
15362
15363                 tg3_full_lock(tp, 0);
15364
15365                 tg3_flag_set(tp, INIT_COMPLETE);
15366                 err2 = tg3_restart_hw(tp, 1);
15367                 if (err2)
15368                         goto out;
15369
15370                 tp->timer.expires = jiffies + tp->timer_offset;
15371                 add_timer(&tp->timer);
15372
15373                 netif_device_attach(dev);
15374                 tg3_netif_start(tp);
15375
15376 out:
15377                 tg3_full_unlock(tp);
15378
15379                 if (!err2)
15380                         tg3_phy_start(tp);
15381         }
15382
15383         return err;
15384 }
15385
15386 static int tg3_resume(struct device *device)
15387 {
15388         struct pci_dev *pdev = to_pci_dev(device);
15389         struct net_device *dev = pci_get_drvdata(pdev);
15390         struct tg3 *tp = netdev_priv(dev);
15391         int err;
15392
15393         if (!netif_running(dev))
15394                 return 0;
15395
15396         netif_device_attach(dev);
15397
15398         tg3_full_lock(tp, 0);
15399
15400         tg3_flag_set(tp, INIT_COMPLETE);
15401         err = tg3_restart_hw(tp, 1);
15402         if (err)
15403                 goto out;
15404
15405         tp->timer.expires = jiffies + tp->timer_offset;
15406         add_timer(&tp->timer);
15407
15408         tg3_netif_start(tp);
15409
15410 out:
15411         tg3_full_unlock(tp);
15412
15413         if (!err)
15414                 tg3_phy_start(tp);
15415
15416         return err;
15417 }
15418
15419 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15420 #define TG3_PM_OPS (&tg3_pm_ops)
15421
15422 #else
15423
15424 #define TG3_PM_OPS NULL
15425
15426 #endif /* CONFIG_PM_SLEEP */
15427
15428 /**
15429  * tg3_io_error_detected - called when PCI error is detected
15430  * @pdev: Pointer to PCI device
15431  * @state: The current pci connection state
15432  *
15433  * This function is called after a PCI bus error affecting
15434  * this device has been detected.
15435  */
15436 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15437                                               pci_channel_state_t state)
15438 {
15439         struct net_device *netdev = pci_get_drvdata(pdev);
15440         struct tg3 *tp = netdev_priv(netdev);
15441         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15442
15443         netdev_info(netdev, "PCI I/O error detected\n");
15444
15445         rtnl_lock();
15446
15447         if (!netif_running(netdev))
15448                 goto done;
15449
15450         tg3_phy_stop(tp);
15451
15452         tg3_netif_stop(tp);
15453
15454         del_timer_sync(&tp->timer);
15455         tg3_flag_clear(tp, RESTART_TIMER);
15456
15457         /* Want to make sure that the reset task doesn't run */
15458         cancel_work_sync(&tp->reset_task);
15459         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15460         tg3_flag_clear(tp, RESTART_TIMER);
15461
15462         netif_device_detach(netdev);
15463
15464         /* Clean up software state, even if MMIO is blocked */
15465         tg3_full_lock(tp, 0);
15466         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15467         tg3_full_unlock(tp);
15468
15469 done:
15470         if (state == pci_channel_io_perm_failure)
15471                 err = PCI_ERS_RESULT_DISCONNECT;
15472         else
15473                 pci_disable_device(pdev);
15474
15475         rtnl_unlock();
15476
15477         return err;
15478 }
15479
15480 /**
15481  * tg3_io_slot_reset - called after the pci bus has been reset.
15482  * @pdev: Pointer to PCI device
15483  *
15484  * Restart the card from scratch, as if from a cold-boot.
15485  * At this point, the card has exprienced a hard reset,
15486  * followed by fixups by BIOS, and has its config space
15487  * set up identically to what it was at cold boot.
15488  */
15489 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15490 {
15491         struct net_device *netdev = pci_get_drvdata(pdev);
15492         struct tg3 *tp = netdev_priv(netdev);
15493         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15494         int err;
15495
15496         rtnl_lock();
15497
15498         if (pci_enable_device(pdev)) {
15499                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15500                 goto done;
15501         }
15502
15503         pci_set_master(pdev);
15504         pci_restore_state(pdev);
15505         pci_save_state(pdev);
15506
15507         if (!netif_running(netdev)) {
15508                 rc = PCI_ERS_RESULT_RECOVERED;
15509                 goto done;
15510         }
15511
15512         err = tg3_power_up(tp);
15513         if (err) {
15514                 netdev_err(netdev, "Failed to restore register access.\n");
15515                 goto done;
15516         }
15517
15518         rc = PCI_ERS_RESULT_RECOVERED;
15519
15520 done:
15521         rtnl_unlock();
15522
15523         return rc;
15524 }
15525
15526 /**
15527  * tg3_io_resume - called when traffic can start flowing again.
15528  * @pdev: Pointer to PCI device
15529  *
15530  * This callback is called when the error recovery driver tells
15531  * us that its OK to resume normal operation.
15532  */
15533 static void tg3_io_resume(struct pci_dev *pdev)
15534 {
15535         struct net_device *netdev = pci_get_drvdata(pdev);
15536         struct tg3 *tp = netdev_priv(netdev);
15537         int err;
15538
15539         rtnl_lock();
15540
15541         if (!netif_running(netdev))
15542                 goto done;
15543
15544         tg3_full_lock(tp, 0);
15545         tg3_flag_set(tp, INIT_COMPLETE);
15546         err = tg3_restart_hw(tp, 1);
15547         tg3_full_unlock(tp);
15548         if (err) {
15549                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15550                 goto done;
15551         }
15552
15553         netif_device_attach(netdev);
15554
15555         tp->timer.expires = jiffies + tp->timer_offset;
15556         add_timer(&tp->timer);
15557
15558         tg3_netif_start(tp);
15559
15560         tg3_phy_start(tp);
15561
15562 done:
15563         rtnl_unlock();
15564 }
15565
15566 static struct pci_error_handlers tg3_err_handler = {
15567         .error_detected = tg3_io_error_detected,
15568         .slot_reset     = tg3_io_slot_reset,
15569         .resume         = tg3_io_resume
15570 };
15571
15572 static struct pci_driver tg3_driver = {
15573         .name           = DRV_MODULE_NAME,
15574         .id_table       = tg3_pci_tbl,
15575         .probe          = tg3_init_one,
15576         .remove         = __devexit_p(tg3_remove_one),
15577         .err_handler    = &tg3_err_handler,
15578         .driver.pm      = TG3_PM_OPS,
15579 };
15580
15581 static int __init tg3_init(void)
15582 {
15583         return pci_register_driver(&tg3_driver);
15584 }
15585
15586 static void __exit tg3_cleanup(void)
15587 {
15588         pci_unregister_driver(&tg3_driver);
15589 }
15590
15591 module_init(tg3_init);
15592 module_exit(tg3_cleanup);