]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: add device id of Apple Thunderbolt Ethernet device
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     123
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "March 21, 2012"
95
96 #define RESET_KIND_SHUTDOWN     0
97 #define RESET_KIND_INIT         1
98 #define RESET_KIND_SUSPEND      2
99
100 #define TG3_DEF_RX_MODE         0
101 #define TG3_DEF_TX_MODE         0
102 #define TG3_DEF_MSG_ENABLE        \
103         (NETIF_MSG_DRV          | \
104          NETIF_MSG_PROBE        | \
105          NETIF_MSG_LINK         | \
106          NETIF_MSG_TIMER        | \
107          NETIF_MSG_IFDOWN       | \
108          NETIF_MSG_IFUP         | \
109          NETIF_MSG_RX_ERR       | \
110          NETIF_MSG_TX_ERR)
111
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
113
114 /* length of time before we decide the hardware is borked,
115  * and dev->tx_timeout() should be called to fix the problem
116  */
117
118 #define TG3_TX_TIMEOUT                  (5 * HZ)
119
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU                     60
122 #define TG3_MAX_MTU(tp) \
123         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
124
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126  * You can't change the ring sizes, but you can change where you place
127  * them in the NIC onboard memory.
128  */
129 #define TG3_RX_STD_RING_SIZE(tp) \
130         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING         200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
137
138 /* Do not place this n-ring entries value into the tp struct itself,
139  * we really want to expose these constants to GCC so that modulo et
140  * al.  operations are done with shifts and masks instead of with
141  * hw multiply/modulo instructions.  Another solution would be to
142  * replace things like '% foo' with '& (foo - 1)'.
143  */
144
145 #define TG3_TX_RING_SIZE                512
146 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
147
148 #define TG3_RX_STD_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
155                                  TG3_TX_RING_SIZE)
156 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157
158 #define TG3_DMA_BYTE_ENAB               64
159
160 #define TG3_RX_STD_DMA_SZ               1536
161 #define TG3_RX_JMB_DMA_SZ               9046
162
163 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
164
165 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175  * that are at least dword aligned when used in PCIX mode.  The driver
176  * works around this bug by double copying the packet.  This workaround
177  * is built into the normal double copy length check for efficiency.
178  *
179  * However, the double copy is only necessary on those architectures
180  * where unaligned memory accesses are inefficient.  For those architectures
181  * where unaligned memory accesses incur little penalty, we can reintegrate
182  * the 5701 in the normal rx path.  Doing so saves a device structure
183  * dereference by hardcoding the double copy threshold in place.
184  */
185 #define TG3_RX_COPY_THRESHOLD           256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
188 #else
189         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
190 #endif
191
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
194 #else
195 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
196 #endif
197
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K            2048
201 #define TG3_TX_BD_DMA_MAX_4K            4096
202
203 #define TG3_RAW_IP_ALIGN 2
204
205 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
206 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
307         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
308         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
309         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
310         {}
311 };
312
313 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314
315 static const struct {
316         const char string[ETH_GSTRING_LEN];
317 } ethtool_stats_keys[] = {
318         { "rx_octets" },
319         { "rx_fragments" },
320         { "rx_ucast_packets" },
321         { "rx_mcast_packets" },
322         { "rx_bcast_packets" },
323         { "rx_fcs_errors" },
324         { "rx_align_errors" },
325         { "rx_xon_pause_rcvd" },
326         { "rx_xoff_pause_rcvd" },
327         { "rx_mac_ctrl_rcvd" },
328         { "rx_xoff_entered" },
329         { "rx_frame_too_long_errors" },
330         { "rx_jabbers" },
331         { "rx_undersize_packets" },
332         { "rx_in_length_errors" },
333         { "rx_out_length_errors" },
334         { "rx_64_or_less_octet_packets" },
335         { "rx_65_to_127_octet_packets" },
336         { "rx_128_to_255_octet_packets" },
337         { "rx_256_to_511_octet_packets" },
338         { "rx_512_to_1023_octet_packets" },
339         { "rx_1024_to_1522_octet_packets" },
340         { "rx_1523_to_2047_octet_packets" },
341         { "rx_2048_to_4095_octet_packets" },
342         { "rx_4096_to_8191_octet_packets" },
343         { "rx_8192_to_9022_octet_packets" },
344
345         { "tx_octets" },
346         { "tx_collisions" },
347
348         { "tx_xon_sent" },
349         { "tx_xoff_sent" },
350         { "tx_flow_control" },
351         { "tx_mac_errors" },
352         { "tx_single_collisions" },
353         { "tx_mult_collisions" },
354         { "tx_deferred" },
355         { "tx_excessive_collisions" },
356         { "tx_late_collisions" },
357         { "tx_collide_2times" },
358         { "tx_collide_3times" },
359         { "tx_collide_4times" },
360         { "tx_collide_5times" },
361         { "tx_collide_6times" },
362         { "tx_collide_7times" },
363         { "tx_collide_8times" },
364         { "tx_collide_9times" },
365         { "tx_collide_10times" },
366         { "tx_collide_11times" },
367         { "tx_collide_12times" },
368         { "tx_collide_13times" },
369         { "tx_collide_14times" },
370         { "tx_collide_15times" },
371         { "tx_ucast_packets" },
372         { "tx_mcast_packets" },
373         { "tx_bcast_packets" },
374         { "tx_carrier_sense_errors" },
375         { "tx_discards" },
376         { "tx_errors" },
377
378         { "dma_writeq_full" },
379         { "dma_write_prioq_full" },
380         { "rxbds_empty" },
381         { "rx_discards" },
382         { "rx_errors" },
383         { "rx_threshold_hit" },
384
385         { "dma_readq_full" },
386         { "dma_read_prioq_full" },
387         { "tx_comp_queue_full" },
388
389         { "ring_set_send_prod_index" },
390         { "ring_status_update" },
391         { "nic_irqs" },
392         { "nic_avoided_irqs" },
393         { "nic_tx_threshold_hit" },
394
395         { "mbuf_lwm_thresh_hit" },
396 };
397
398 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
399
400
401 static const struct {
402         const char string[ETH_GSTRING_LEN];
403 } ethtool_test_keys[] = {
404         { "nvram test        (online) " },
405         { "link test         (online) " },
406         { "register test     (offline)" },
407         { "memory test       (offline)" },
408         { "mac loopback test (offline)" },
409         { "phy loopback test (offline)" },
410         { "ext loopback test (offline)" },
411         { "interrupt test    (offline)" },
412 };
413
414 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
415
416
417 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 {
419         writel(val, tp->regs + off);
420 }
421
422 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 {
424         return readl(tp->regs + off);
425 }
426
427 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 {
429         writel(val, tp->aperegs + off);
430 }
431
432 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 {
434         return readl(tp->aperegs + off);
435 }
436
437 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
438 {
439         unsigned long flags;
440
441         spin_lock_irqsave(&tp->indirect_lock, flags);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
443         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
444         spin_unlock_irqrestore(&tp->indirect_lock, flags);
445 }
446
447 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 {
449         writel(val, tp->regs + off);
450         readl(tp->regs + off);
451 }
452
453 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
454 {
455         unsigned long flags;
456         u32 val;
457
458         spin_lock_irqsave(&tp->indirect_lock, flags);
459         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
460         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
461         spin_unlock_irqrestore(&tp->indirect_lock, flags);
462         return val;
463 }
464
465 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
466 {
467         unsigned long flags;
468
469         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
470                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
471                                        TG3_64BIT_REG_LOW, val);
472                 return;
473         }
474         if (off == TG3_RX_STD_PROD_IDX_REG) {
475                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
476                                        TG3_64BIT_REG_LOW, val);
477                 return;
478         }
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
482         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483         spin_unlock_irqrestore(&tp->indirect_lock, flags);
484
485         /* In indirect mode when disabling interrupts, we also need
486          * to clear the interrupt bit in the GRC local ctrl register.
487          */
488         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
489             (val == 0x1)) {
490                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
491                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
492         }
493 }
494
495 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
496 {
497         unsigned long flags;
498         u32 val;
499
500         spin_lock_irqsave(&tp->indirect_lock, flags);
501         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
502         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503         spin_unlock_irqrestore(&tp->indirect_lock, flags);
504         return val;
505 }
506
507 /* usec_wait specifies the wait time in usec when writing to certain registers
508  * where it is unsafe to read back the register without some delay.
509  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
510  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511  */
512 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 {
514         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
515                 /* Non-posted methods */
516                 tp->write32(tp, off, val);
517         else {
518                 /* Posted method */
519                 tg3_write32(tp, off, val);
520                 if (usec_wait)
521                         udelay(usec_wait);
522                 tp->read32(tp, off);
523         }
524         /* Wait again after the read for the posted method to guarantee that
525          * the wait time is met.
526          */
527         if (usec_wait)
528                 udelay(usec_wait);
529 }
530
531 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 {
533         tp->write32_mbox(tp, off, val);
534         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
535                 tp->read32_mbox(tp, off);
536 }
537
538 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 {
540         void __iomem *mbox = tp->regs + off;
541         writel(val, mbox);
542         if (tg3_flag(tp, TXD_MBOX_HWBUG))
543                 writel(val, mbox);
544         if (tg3_flag(tp, MBOX_WRITE_REORDER))
545                 readl(mbox);
546 }
547
548 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 {
550         return readl(tp->regs + off + GRCMBOX_BASE);
551 }
552
553 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 {
555         writel(val, tp->regs + off + GRCMBOX_BASE);
556 }
557
558 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
559 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
560 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
561 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
562 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
563
564 #define tw32(reg, val)                  tp->write32(tp, reg, val)
565 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
566 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
567 #define tr32(reg)                       tp->read32(tp, reg)
568
569 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
570 {
571         unsigned long flags;
572
573         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
574             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
575                 return;
576
577         spin_lock_irqsave(&tp->indirect_lock, flags);
578         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
580                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581
582                 /* Always leave this as zero. */
583                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
584         } else {
585                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
586                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587
588                 /* Always leave this as zero. */
589                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590         }
591         spin_unlock_irqrestore(&tp->indirect_lock, flags);
592 }
593
594 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
595 {
596         unsigned long flags;
597
598         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
599             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
600                 *val = 0;
601                 return;
602         }
603
604         spin_lock_irqsave(&tp->indirect_lock, flags);
605         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
606                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
607                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608
609                 /* Always leave this as zero. */
610                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
611         } else {
612                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
613                 *val = tr32(TG3PCI_MEM_WIN_DATA);
614
615                 /* Always leave this as zero. */
616                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617         }
618         spin_unlock_irqrestore(&tp->indirect_lock, flags);
619 }
620
621 static void tg3_ape_lock_init(struct tg3 *tp)
622 {
623         int i;
624         u32 regbase, bit;
625
626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
627                 regbase = TG3_APE_LOCK_GRANT;
628         else
629                 regbase = TG3_APE_PER_LOCK_GRANT;
630
631         /* Make sure the driver hasn't any stale locks. */
632         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
633                 switch (i) {
634                 case TG3_APE_LOCK_PHY0:
635                 case TG3_APE_LOCK_PHY1:
636                 case TG3_APE_LOCK_PHY2:
637                 case TG3_APE_LOCK_PHY3:
638                         bit = APE_LOCK_GRANT_DRIVER;
639                         break;
640                 default:
641                         if (!tp->pci_fn)
642                                 bit = APE_LOCK_GRANT_DRIVER;
643                         else
644                                 bit = 1 << tp->pci_fn;
645                 }
646                 tg3_ape_write32(tp, regbase + 4 * i, bit);
647         }
648
649 }
650
651 static int tg3_ape_lock(struct tg3 *tp, int locknum)
652 {
653         int i, off;
654         int ret = 0;
655         u32 status, req, gnt, bit;
656
657         if (!tg3_flag(tp, ENABLE_APE))
658                 return 0;
659
660         switch (locknum) {
661         case TG3_APE_LOCK_GPIO:
662                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
663                         return 0;
664         case TG3_APE_LOCK_GRC:
665         case TG3_APE_LOCK_MEM:
666                 if (!tp->pci_fn)
667                         bit = APE_LOCK_REQ_DRIVER;
668                 else
669                         bit = 1 << tp->pci_fn;
670                 break;
671         default:
672                 return -EINVAL;
673         }
674
675         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
676                 req = TG3_APE_LOCK_REQ;
677                 gnt = TG3_APE_LOCK_GRANT;
678         } else {
679                 req = TG3_APE_PER_LOCK_REQ;
680                 gnt = TG3_APE_PER_LOCK_GRANT;
681         }
682
683         off = 4 * locknum;
684
685         tg3_ape_write32(tp, req + off, bit);
686
687         /* Wait for up to 1 millisecond to acquire lock. */
688         for (i = 0; i < 100; i++) {
689                 status = tg3_ape_read32(tp, gnt + off);
690                 if (status == bit)
691                         break;
692                 udelay(10);
693         }
694
695         if (status != bit) {
696                 /* Revoke the lock request. */
697                 tg3_ape_write32(tp, gnt + off, bit);
698                 ret = -EBUSY;
699         }
700
701         return ret;
702 }
703
704 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
705 {
706         u32 gnt, bit;
707
708         if (!tg3_flag(tp, ENABLE_APE))
709                 return;
710
711         switch (locknum) {
712         case TG3_APE_LOCK_GPIO:
713                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
714                         return;
715         case TG3_APE_LOCK_GRC:
716         case TG3_APE_LOCK_MEM:
717                 if (!tp->pci_fn)
718                         bit = APE_LOCK_GRANT_DRIVER;
719                 else
720                         bit = 1 << tp->pci_fn;
721                 break;
722         default:
723                 return;
724         }
725
726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
727                 gnt = TG3_APE_LOCK_GRANT;
728         else
729                 gnt = TG3_APE_PER_LOCK_GRANT;
730
731         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
732 }
733
734 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
735 {
736         int i;
737         u32 apedata;
738
739         /* NCSI does not support APE events */
740         if (tg3_flag(tp, APE_HAS_NCSI))
741                 return;
742
743         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
744         if (apedata != APE_SEG_SIG_MAGIC)
745                 return;
746
747         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
748         if (!(apedata & APE_FW_STATUS_READY))
749                 return;
750
751         /* Wait for up to 1 millisecond for APE to service previous event. */
752         for (i = 0; i < 10; i++) {
753                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
754                         return;
755
756                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757
758                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
759                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
760                                         event | APE_EVENT_STATUS_EVENT_PENDING);
761
762                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763
764                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
765                         break;
766
767                 udelay(100);
768         }
769
770         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
771                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
772 }
773
774 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
775 {
776         u32 event;
777         u32 apedata;
778
779         if (!tg3_flag(tp, ENABLE_APE))
780                 return;
781
782         switch (kind) {
783         case RESET_KIND_INIT:
784                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
785                                 APE_HOST_SEG_SIG_MAGIC);
786                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
787                                 APE_HOST_SEG_LEN_MAGIC);
788                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
789                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
790                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
791                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
792                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
793                                 APE_HOST_BEHAV_NO_PHYLOCK);
794                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
795                                     TG3_APE_HOST_DRVR_STATE_START);
796
797                 event = APE_EVENT_STATUS_STATE_START;
798                 break;
799         case RESET_KIND_SHUTDOWN:
800                 /* With the interface we are currently using,
801                  * APE does not track driver state.  Wiping
802                  * out the HOST SEGMENT SIGNATURE forces
803                  * the APE to assume OS absent status.
804                  */
805                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
806
807                 if (device_may_wakeup(&tp->pdev->dev) &&
808                     tg3_flag(tp, WOL_ENABLE)) {
809                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
810                                             TG3_APE_HOST_WOL_SPEED_AUTO);
811                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
812                 } else
813                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
814
815                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
816
817                 event = APE_EVENT_STATUS_STATE_UNLOAD;
818                 break;
819         case RESET_KIND_SUSPEND:
820                 event = APE_EVENT_STATUS_STATE_SUSPEND;
821                 break;
822         default:
823                 return;
824         }
825
826         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
827
828         tg3_ape_send_event(tp, event);
829 }
830
831 static void tg3_disable_ints(struct tg3 *tp)
832 {
833         int i;
834
835         tw32(TG3PCI_MISC_HOST_CTRL,
836              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
837         for (i = 0; i < tp->irq_max; i++)
838                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
839 }
840
841 static void tg3_enable_ints(struct tg3 *tp)
842 {
843         int i;
844
845         tp->irq_sync = 0;
846         wmb();
847
848         tw32(TG3PCI_MISC_HOST_CTRL,
849              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
850
851         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
852         for (i = 0; i < tp->irq_cnt; i++) {
853                 struct tg3_napi *tnapi = &tp->napi[i];
854
855                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
856                 if (tg3_flag(tp, 1SHOT_MSI))
857                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
858
859                 tp->coal_now |= tnapi->coal_now;
860         }
861
862         /* Force an initial interrupt */
863         if (!tg3_flag(tp, TAGGED_STATUS) &&
864             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
865                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
866         else
867                 tw32(HOSTCC_MODE, tp->coal_now);
868
869         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
870 }
871
872 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
873 {
874         struct tg3 *tp = tnapi->tp;
875         struct tg3_hw_status *sblk = tnapi->hw_status;
876         unsigned int work_exists = 0;
877
878         /* check for phy events */
879         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
880                 if (sblk->status & SD_STATUS_LINK_CHG)
881                         work_exists = 1;
882         }
883
884         /* check for TX work to do */
885         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
886                 work_exists = 1;
887
888         /* check for RX work to do */
889         if (tnapi->rx_rcb_prod_idx &&
890             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
891                 work_exists = 1;
892
893         return work_exists;
894 }
895
896 /* tg3_int_reenable
897  *  similar to tg3_enable_ints, but it accurately determines whether there
898  *  is new work pending and can return without flushing the PIO write
899  *  which reenables interrupts
900  */
901 static void tg3_int_reenable(struct tg3_napi *tnapi)
902 {
903         struct tg3 *tp = tnapi->tp;
904
905         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
906         mmiowb();
907
908         /* When doing tagged status, this work check is unnecessary.
909          * The last_tag we write above tells the chip which piece of
910          * work we've completed.
911          */
912         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
913                 tw32(HOSTCC_MODE, tp->coalesce_mode |
914                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
915 }
916
917 static void tg3_switch_clocks(struct tg3 *tp)
918 {
919         u32 clock_ctrl;
920         u32 orig_clock_ctrl;
921
922         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
923                 return;
924
925         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
926
927         orig_clock_ctrl = clock_ctrl;
928         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
929                        CLOCK_CTRL_CLKRUN_OENABLE |
930                        0x1f);
931         tp->pci_clock_ctrl = clock_ctrl;
932
933         if (tg3_flag(tp, 5705_PLUS)) {
934                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
935                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
936                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
937                 }
938         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
939                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
940                             clock_ctrl |
941                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
942                             40);
943                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
944                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
945                             40);
946         }
947         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
948 }
949
950 #define PHY_BUSY_LOOPS  5000
951
952 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
953 {
954         u32 frame_val;
955         unsigned int loops;
956         int ret;
957
958         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
959                 tw32_f(MAC_MI_MODE,
960                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
961                 udelay(80);
962         }
963
964         *val = 0x0;
965
966         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
967                       MI_COM_PHY_ADDR_MASK);
968         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
969                       MI_COM_REG_ADDR_MASK);
970         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
971
972         tw32_f(MAC_MI_COM, frame_val);
973
974         loops = PHY_BUSY_LOOPS;
975         while (loops != 0) {
976                 udelay(10);
977                 frame_val = tr32(MAC_MI_COM);
978
979                 if ((frame_val & MI_COM_BUSY) == 0) {
980                         udelay(5);
981                         frame_val = tr32(MAC_MI_COM);
982                         break;
983                 }
984                 loops -= 1;
985         }
986
987         ret = -EBUSY;
988         if (loops != 0) {
989                 *val = frame_val & MI_COM_DATA_MASK;
990                 ret = 0;
991         }
992
993         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
994                 tw32_f(MAC_MI_MODE, tp->mi_mode);
995                 udelay(80);
996         }
997
998         return ret;
999 }
1000
1001 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1002 {
1003         u32 frame_val;
1004         unsigned int loops;
1005         int ret;
1006
1007         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1008             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1009                 return 0;
1010
1011         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1012                 tw32_f(MAC_MI_MODE,
1013                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1014                 udelay(80);
1015         }
1016
1017         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1018                       MI_COM_PHY_ADDR_MASK);
1019         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1020                       MI_COM_REG_ADDR_MASK);
1021         frame_val |= (val & MI_COM_DATA_MASK);
1022         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1023
1024         tw32_f(MAC_MI_COM, frame_val);
1025
1026         loops = PHY_BUSY_LOOPS;
1027         while (loops != 0) {
1028                 udelay(10);
1029                 frame_val = tr32(MAC_MI_COM);
1030                 if ((frame_val & MI_COM_BUSY) == 0) {
1031                         udelay(5);
1032                         frame_val = tr32(MAC_MI_COM);
1033                         break;
1034                 }
1035                 loops -= 1;
1036         }
1037
1038         ret = -EBUSY;
1039         if (loops != 0)
1040                 ret = 0;
1041
1042         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1043                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1044                 udelay(80);
1045         }
1046
1047         return ret;
1048 }
1049
1050 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1051 {
1052         int err;
1053
1054         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1055         if (err)
1056                 goto done;
1057
1058         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1059         if (err)
1060                 goto done;
1061
1062         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1063                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1064         if (err)
1065                 goto done;
1066
1067         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1068
1069 done:
1070         return err;
1071 }
1072
1073 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1074 {
1075         int err;
1076
1077         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1078         if (err)
1079                 goto done;
1080
1081         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1082         if (err)
1083                 goto done;
1084
1085         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1086                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1087         if (err)
1088                 goto done;
1089
1090         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1091
1092 done:
1093         return err;
1094 }
1095
1096 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1097 {
1098         int err;
1099
1100         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1101         if (!err)
1102                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1103
1104         return err;
1105 }
1106
1107 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1108 {
1109         int err;
1110
1111         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1112         if (!err)
1113                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1114
1115         return err;
1116 }
1117
1118 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1119 {
1120         int err;
1121
1122         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1123                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1124                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1125         if (!err)
1126                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1127
1128         return err;
1129 }
1130
1131 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1132 {
1133         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1134                 set |= MII_TG3_AUXCTL_MISC_WREN;
1135
1136         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1137 }
1138
1139 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1140         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1141                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1142                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1143
1144 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1145         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1146                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1147
1148 static int tg3_bmcr_reset(struct tg3 *tp)
1149 {
1150         u32 phy_control;
1151         int limit, err;
1152
1153         /* OK, reset it, and poll the BMCR_RESET bit until it
1154          * clears or we time out.
1155          */
1156         phy_control = BMCR_RESET;
1157         err = tg3_writephy(tp, MII_BMCR, phy_control);
1158         if (err != 0)
1159                 return -EBUSY;
1160
1161         limit = 5000;
1162         while (limit--) {
1163                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1164                 if (err != 0)
1165                         return -EBUSY;
1166
1167                 if ((phy_control & BMCR_RESET) == 0) {
1168                         udelay(40);
1169                         break;
1170                 }
1171                 udelay(10);
1172         }
1173         if (limit < 0)
1174                 return -EBUSY;
1175
1176         return 0;
1177 }
1178
1179 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1180 {
1181         struct tg3 *tp = bp->priv;
1182         u32 val;
1183
1184         spin_lock_bh(&tp->lock);
1185
1186         if (tg3_readphy(tp, reg, &val))
1187                 val = -EIO;
1188
1189         spin_unlock_bh(&tp->lock);
1190
1191         return val;
1192 }
1193
1194 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1195 {
1196         struct tg3 *tp = bp->priv;
1197         u32 ret = 0;
1198
1199         spin_lock_bh(&tp->lock);
1200
1201         if (tg3_writephy(tp, reg, val))
1202                 ret = -EIO;
1203
1204         spin_unlock_bh(&tp->lock);
1205
1206         return ret;
1207 }
1208
1209 static int tg3_mdio_reset(struct mii_bus *bp)
1210 {
1211         return 0;
1212 }
1213
1214 static void tg3_mdio_config_5785(struct tg3 *tp)
1215 {
1216         u32 val;
1217         struct phy_device *phydev;
1218
1219         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1220         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1221         case PHY_ID_BCM50610:
1222         case PHY_ID_BCM50610M:
1223                 val = MAC_PHYCFG2_50610_LED_MODES;
1224                 break;
1225         case PHY_ID_BCMAC131:
1226                 val = MAC_PHYCFG2_AC131_LED_MODES;
1227                 break;
1228         case PHY_ID_RTL8211C:
1229                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1230                 break;
1231         case PHY_ID_RTL8201E:
1232                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1233                 break;
1234         default:
1235                 return;
1236         }
1237
1238         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1239                 tw32(MAC_PHYCFG2, val);
1240
1241                 val = tr32(MAC_PHYCFG1);
1242                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1243                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1244                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1245                 tw32(MAC_PHYCFG1, val);
1246
1247                 return;
1248         }
1249
1250         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1251                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1252                        MAC_PHYCFG2_FMODE_MASK_MASK |
1253                        MAC_PHYCFG2_GMODE_MASK_MASK |
1254                        MAC_PHYCFG2_ACT_MASK_MASK   |
1255                        MAC_PHYCFG2_QUAL_MASK_MASK |
1256                        MAC_PHYCFG2_INBAND_ENABLE;
1257
1258         tw32(MAC_PHYCFG2, val);
1259
1260         val = tr32(MAC_PHYCFG1);
1261         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1262                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1263         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1264                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1265                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1266                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1267                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1268         }
1269         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1270                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1271         tw32(MAC_PHYCFG1, val);
1272
1273         val = tr32(MAC_EXT_RGMII_MODE);
1274         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1275                  MAC_RGMII_MODE_RX_QUALITY |
1276                  MAC_RGMII_MODE_RX_ACTIVITY |
1277                  MAC_RGMII_MODE_RX_ENG_DET |
1278                  MAC_RGMII_MODE_TX_ENABLE |
1279                  MAC_RGMII_MODE_TX_LOWPWR |
1280                  MAC_RGMII_MODE_TX_RESET);
1281         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1282                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1283                         val |= MAC_RGMII_MODE_RX_INT_B |
1284                                MAC_RGMII_MODE_RX_QUALITY |
1285                                MAC_RGMII_MODE_RX_ACTIVITY |
1286                                MAC_RGMII_MODE_RX_ENG_DET;
1287                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1288                         val |= MAC_RGMII_MODE_TX_ENABLE |
1289                                MAC_RGMII_MODE_TX_LOWPWR |
1290                                MAC_RGMII_MODE_TX_RESET;
1291         }
1292         tw32(MAC_EXT_RGMII_MODE, val);
1293 }
1294
1295 static void tg3_mdio_start(struct tg3 *tp)
1296 {
1297         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1298         tw32_f(MAC_MI_MODE, tp->mi_mode);
1299         udelay(80);
1300
1301         if (tg3_flag(tp, MDIOBUS_INITED) &&
1302             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1303                 tg3_mdio_config_5785(tp);
1304 }
1305
1306 static int tg3_mdio_init(struct tg3 *tp)
1307 {
1308         int i;
1309         u32 reg;
1310         struct phy_device *phydev;
1311
1312         if (tg3_flag(tp, 5717_PLUS)) {
1313                 u32 is_serdes;
1314
1315                 tp->phy_addr = tp->pci_fn + 1;
1316
1317                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1318                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1319                 else
1320                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1321                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1322                 if (is_serdes)
1323                         tp->phy_addr += 7;
1324         } else
1325                 tp->phy_addr = TG3_PHY_MII_ADDR;
1326
1327         tg3_mdio_start(tp);
1328
1329         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1330                 return 0;
1331
1332         tp->mdio_bus = mdiobus_alloc();
1333         if (tp->mdio_bus == NULL)
1334                 return -ENOMEM;
1335
1336         tp->mdio_bus->name     = "tg3 mdio bus";
1337         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1338                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1339         tp->mdio_bus->priv     = tp;
1340         tp->mdio_bus->parent   = &tp->pdev->dev;
1341         tp->mdio_bus->read     = &tg3_mdio_read;
1342         tp->mdio_bus->write    = &tg3_mdio_write;
1343         tp->mdio_bus->reset    = &tg3_mdio_reset;
1344         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1345         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1346
1347         for (i = 0; i < PHY_MAX_ADDR; i++)
1348                 tp->mdio_bus->irq[i] = PHY_POLL;
1349
1350         /* The bus registration will look for all the PHYs on the mdio bus.
1351          * Unfortunately, it does not ensure the PHY is powered up before
1352          * accessing the PHY ID registers.  A chip reset is the
1353          * quickest way to bring the device back to an operational state..
1354          */
1355         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1356                 tg3_bmcr_reset(tp);
1357
1358         i = mdiobus_register(tp->mdio_bus);
1359         if (i) {
1360                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1361                 mdiobus_free(tp->mdio_bus);
1362                 return i;
1363         }
1364
1365         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1366
1367         if (!phydev || !phydev->drv) {
1368                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1369                 mdiobus_unregister(tp->mdio_bus);
1370                 mdiobus_free(tp->mdio_bus);
1371                 return -ENODEV;
1372         }
1373
1374         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1375         case PHY_ID_BCM57780:
1376                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1377                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1378                 break;
1379         case PHY_ID_BCM50610:
1380         case PHY_ID_BCM50610M:
1381                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1382                                      PHY_BRCM_RX_REFCLK_UNUSED |
1383                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1384                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1385                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1386                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1387                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1388                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1389                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1390                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1391                 /* fallthru */
1392         case PHY_ID_RTL8211C:
1393                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1394                 break;
1395         case PHY_ID_RTL8201E:
1396         case PHY_ID_BCMAC131:
1397                 phydev->interface = PHY_INTERFACE_MODE_MII;
1398                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1399                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1400                 break;
1401         }
1402
1403         tg3_flag_set(tp, MDIOBUS_INITED);
1404
1405         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1406                 tg3_mdio_config_5785(tp);
1407
1408         return 0;
1409 }
1410
1411 static void tg3_mdio_fini(struct tg3 *tp)
1412 {
1413         if (tg3_flag(tp, MDIOBUS_INITED)) {
1414                 tg3_flag_clear(tp, MDIOBUS_INITED);
1415                 mdiobus_unregister(tp->mdio_bus);
1416                 mdiobus_free(tp->mdio_bus);
1417         }
1418 }
1419
1420 /* tp->lock is held. */
1421 static inline void tg3_generate_fw_event(struct tg3 *tp)
1422 {
1423         u32 val;
1424
1425         val = tr32(GRC_RX_CPU_EVENT);
1426         val |= GRC_RX_CPU_DRIVER_EVENT;
1427         tw32_f(GRC_RX_CPU_EVENT, val);
1428
1429         tp->last_event_jiffies = jiffies;
1430 }
1431
1432 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1433
1434 /* tp->lock is held. */
1435 static void tg3_wait_for_event_ack(struct tg3 *tp)
1436 {
1437         int i;
1438         unsigned int delay_cnt;
1439         long time_remain;
1440
1441         /* If enough time has passed, no wait is necessary. */
1442         time_remain = (long)(tp->last_event_jiffies + 1 +
1443                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1444                       (long)jiffies;
1445         if (time_remain < 0)
1446                 return;
1447
1448         /* Check if we can shorten the wait time. */
1449         delay_cnt = jiffies_to_usecs(time_remain);
1450         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1451                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1452         delay_cnt = (delay_cnt >> 3) + 1;
1453
1454         for (i = 0; i < delay_cnt; i++) {
1455                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1456                         break;
1457                 udelay(8);
1458         }
1459 }
1460
1461 /* tp->lock is held. */
1462 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1463 {
1464         u32 reg, val;
1465
1466         val = 0;
1467         if (!tg3_readphy(tp, MII_BMCR, &reg))
1468                 val = reg << 16;
1469         if (!tg3_readphy(tp, MII_BMSR, &reg))
1470                 val |= (reg & 0xffff);
1471         *data++ = val;
1472
1473         val = 0;
1474         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1475                 val = reg << 16;
1476         if (!tg3_readphy(tp, MII_LPA, &reg))
1477                 val |= (reg & 0xffff);
1478         *data++ = val;
1479
1480         val = 0;
1481         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1482                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1483                         val = reg << 16;
1484                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1485                         val |= (reg & 0xffff);
1486         }
1487         *data++ = val;
1488
1489         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1490                 val = reg << 16;
1491         else
1492                 val = 0;
1493         *data++ = val;
1494 }
1495
1496 /* tp->lock is held. */
1497 static void tg3_ump_link_report(struct tg3 *tp)
1498 {
1499         u32 data[4];
1500
1501         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1502                 return;
1503
1504         tg3_phy_gather_ump_data(tp, data);
1505
1506         tg3_wait_for_event_ack(tp);
1507
1508         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1509         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1510         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1511         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1512         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1513         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1514
1515         tg3_generate_fw_event(tp);
1516 }
1517
1518 /* tp->lock is held. */
1519 static void tg3_stop_fw(struct tg3 *tp)
1520 {
1521         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1522                 /* Wait for RX cpu to ACK the previous event. */
1523                 tg3_wait_for_event_ack(tp);
1524
1525                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1526
1527                 tg3_generate_fw_event(tp);
1528
1529                 /* Wait for RX cpu to ACK this event. */
1530                 tg3_wait_for_event_ack(tp);
1531         }
1532 }
1533
1534 /* tp->lock is held. */
1535 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1536 {
1537         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1538                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1539
1540         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1541                 switch (kind) {
1542                 case RESET_KIND_INIT:
1543                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1544                                       DRV_STATE_START);
1545                         break;
1546
1547                 case RESET_KIND_SHUTDOWN:
1548                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1549                                       DRV_STATE_UNLOAD);
1550                         break;
1551
1552                 case RESET_KIND_SUSPEND:
1553                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1554                                       DRV_STATE_SUSPEND);
1555                         break;
1556
1557                 default:
1558                         break;
1559                 }
1560         }
1561
1562         if (kind == RESET_KIND_INIT ||
1563             kind == RESET_KIND_SUSPEND)
1564                 tg3_ape_driver_state_change(tp, kind);
1565 }
1566
1567 /* tp->lock is held. */
1568 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1569 {
1570         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1571                 switch (kind) {
1572                 case RESET_KIND_INIT:
1573                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1574                                       DRV_STATE_START_DONE);
1575                         break;
1576
1577                 case RESET_KIND_SHUTDOWN:
1578                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1579                                       DRV_STATE_UNLOAD_DONE);
1580                         break;
1581
1582                 default:
1583                         break;
1584                 }
1585         }
1586
1587         if (kind == RESET_KIND_SHUTDOWN)
1588                 tg3_ape_driver_state_change(tp, kind);
1589 }
1590
1591 /* tp->lock is held. */
1592 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1593 {
1594         if (tg3_flag(tp, ENABLE_ASF)) {
1595                 switch (kind) {
1596                 case RESET_KIND_INIT:
1597                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1598                                       DRV_STATE_START);
1599                         break;
1600
1601                 case RESET_KIND_SHUTDOWN:
1602                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1603                                       DRV_STATE_UNLOAD);
1604                         break;
1605
1606                 case RESET_KIND_SUSPEND:
1607                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1608                                       DRV_STATE_SUSPEND);
1609                         break;
1610
1611                 default:
1612                         break;
1613                 }
1614         }
1615 }
1616
1617 static int tg3_poll_fw(struct tg3 *tp)
1618 {
1619         int i;
1620         u32 val;
1621
1622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1623                 /* Wait up to 20ms for init done. */
1624                 for (i = 0; i < 200; i++) {
1625                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1626                                 return 0;
1627                         udelay(100);
1628                 }
1629                 return -ENODEV;
1630         }
1631
1632         /* Wait for firmware initialization to complete. */
1633         for (i = 0; i < 100000; i++) {
1634                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1635                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1636                         break;
1637                 udelay(10);
1638         }
1639
1640         /* Chip might not be fitted with firmware.  Some Sun onboard
1641          * parts are configured like that.  So don't signal the timeout
1642          * of the above loop as an error, but do report the lack of
1643          * running firmware once.
1644          */
1645         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1646                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1647
1648                 netdev_info(tp->dev, "No firmware running\n");
1649         }
1650
1651         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1652                 /* The 57765 A0 needs a little more
1653                  * time to do some important work.
1654                  */
1655                 mdelay(10);
1656         }
1657
1658         return 0;
1659 }
1660
1661 static void tg3_link_report(struct tg3 *tp)
1662 {
1663         if (!netif_carrier_ok(tp->dev)) {
1664                 netif_info(tp, link, tp->dev, "Link is down\n");
1665                 tg3_ump_link_report(tp);
1666         } else if (netif_msg_link(tp)) {
1667                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1668                             (tp->link_config.active_speed == SPEED_1000 ?
1669                              1000 :
1670                              (tp->link_config.active_speed == SPEED_100 ?
1671                               100 : 10)),
1672                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1673                              "full" : "half"));
1674
1675                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1676                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1677                             "on" : "off",
1678                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1679                             "on" : "off");
1680
1681                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1682                         netdev_info(tp->dev, "EEE is %s\n",
1683                                     tp->setlpicnt ? "enabled" : "disabled");
1684
1685                 tg3_ump_link_report(tp);
1686         }
1687 }
1688
1689 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1690 {
1691         u16 miireg;
1692
1693         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1694                 miireg = ADVERTISE_1000XPAUSE;
1695         else if (flow_ctrl & FLOW_CTRL_TX)
1696                 miireg = ADVERTISE_1000XPSE_ASYM;
1697         else if (flow_ctrl & FLOW_CTRL_RX)
1698                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1699         else
1700                 miireg = 0;
1701
1702         return miireg;
1703 }
1704
1705 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1706 {
1707         u8 cap = 0;
1708
1709         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1710                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1711         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1712                 if (lcladv & ADVERTISE_1000XPAUSE)
1713                         cap = FLOW_CTRL_RX;
1714                 if (rmtadv & ADVERTISE_1000XPAUSE)
1715                         cap = FLOW_CTRL_TX;
1716         }
1717
1718         return cap;
1719 }
1720
1721 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1722 {
1723         u8 autoneg;
1724         u8 flowctrl = 0;
1725         u32 old_rx_mode = tp->rx_mode;
1726         u32 old_tx_mode = tp->tx_mode;
1727
1728         if (tg3_flag(tp, USE_PHYLIB))
1729                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1730         else
1731                 autoneg = tp->link_config.autoneg;
1732
1733         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1734                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1735                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1736                 else
1737                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1738         } else
1739                 flowctrl = tp->link_config.flowctrl;
1740
1741         tp->link_config.active_flowctrl = flowctrl;
1742
1743         if (flowctrl & FLOW_CTRL_RX)
1744                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1745         else
1746                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1747
1748         if (old_rx_mode != tp->rx_mode)
1749                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1750
1751         if (flowctrl & FLOW_CTRL_TX)
1752                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1753         else
1754                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1755
1756         if (old_tx_mode != tp->tx_mode)
1757                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1758 }
1759
1760 static void tg3_adjust_link(struct net_device *dev)
1761 {
1762         u8 oldflowctrl, linkmesg = 0;
1763         u32 mac_mode, lcl_adv, rmt_adv;
1764         struct tg3 *tp = netdev_priv(dev);
1765         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1766
1767         spin_lock_bh(&tp->lock);
1768
1769         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1770                                     MAC_MODE_HALF_DUPLEX);
1771
1772         oldflowctrl = tp->link_config.active_flowctrl;
1773
1774         if (phydev->link) {
1775                 lcl_adv = 0;
1776                 rmt_adv = 0;
1777
1778                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1779                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1780                 else if (phydev->speed == SPEED_1000 ||
1781                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1782                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1783                 else
1784                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1785
1786                 if (phydev->duplex == DUPLEX_HALF)
1787                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1788                 else {
1789                         lcl_adv = mii_advertise_flowctrl(
1790                                   tp->link_config.flowctrl);
1791
1792                         if (phydev->pause)
1793                                 rmt_adv = LPA_PAUSE_CAP;
1794                         if (phydev->asym_pause)
1795                                 rmt_adv |= LPA_PAUSE_ASYM;
1796                 }
1797
1798                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1799         } else
1800                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1801
1802         if (mac_mode != tp->mac_mode) {
1803                 tp->mac_mode = mac_mode;
1804                 tw32_f(MAC_MODE, tp->mac_mode);
1805                 udelay(40);
1806         }
1807
1808         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1809                 if (phydev->speed == SPEED_10)
1810                         tw32(MAC_MI_STAT,
1811                              MAC_MI_STAT_10MBPS_MODE |
1812                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1813                 else
1814                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1815         }
1816
1817         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1818                 tw32(MAC_TX_LENGTHS,
1819                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1820                       (6 << TX_LENGTHS_IPG_SHIFT) |
1821                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1822         else
1823                 tw32(MAC_TX_LENGTHS,
1824                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1825                       (6 << TX_LENGTHS_IPG_SHIFT) |
1826                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1827
1828         if (phydev->link != tp->old_link ||
1829             phydev->speed != tp->link_config.active_speed ||
1830             phydev->duplex != tp->link_config.active_duplex ||
1831             oldflowctrl != tp->link_config.active_flowctrl)
1832                 linkmesg = 1;
1833
1834         tp->old_link = phydev->link;
1835         tp->link_config.active_speed = phydev->speed;
1836         tp->link_config.active_duplex = phydev->duplex;
1837
1838         spin_unlock_bh(&tp->lock);
1839
1840         if (linkmesg)
1841                 tg3_link_report(tp);
1842 }
1843
1844 static int tg3_phy_init(struct tg3 *tp)
1845 {
1846         struct phy_device *phydev;
1847
1848         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1849                 return 0;
1850
1851         /* Bring the PHY back to a known state. */
1852         tg3_bmcr_reset(tp);
1853
1854         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1855
1856         /* Attach the MAC to the PHY. */
1857         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1858                              phydev->dev_flags, phydev->interface);
1859         if (IS_ERR(phydev)) {
1860                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1861                 return PTR_ERR(phydev);
1862         }
1863
1864         /* Mask with MAC supported features. */
1865         switch (phydev->interface) {
1866         case PHY_INTERFACE_MODE_GMII:
1867         case PHY_INTERFACE_MODE_RGMII:
1868                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1869                         phydev->supported &= (PHY_GBIT_FEATURES |
1870                                               SUPPORTED_Pause |
1871                                               SUPPORTED_Asym_Pause);
1872                         break;
1873                 }
1874                 /* fallthru */
1875         case PHY_INTERFACE_MODE_MII:
1876                 phydev->supported &= (PHY_BASIC_FEATURES |
1877                                       SUPPORTED_Pause |
1878                                       SUPPORTED_Asym_Pause);
1879                 break;
1880         default:
1881                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1882                 return -EINVAL;
1883         }
1884
1885         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1886
1887         phydev->advertising = phydev->supported;
1888
1889         return 0;
1890 }
1891
1892 static void tg3_phy_start(struct tg3 *tp)
1893 {
1894         struct phy_device *phydev;
1895
1896         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1897                 return;
1898
1899         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1900
1901         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1902                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1903                 phydev->speed = tp->link_config.speed;
1904                 phydev->duplex = tp->link_config.duplex;
1905                 phydev->autoneg = tp->link_config.autoneg;
1906                 phydev->advertising = tp->link_config.advertising;
1907         }
1908
1909         phy_start(phydev);
1910
1911         phy_start_aneg(phydev);
1912 }
1913
1914 static void tg3_phy_stop(struct tg3 *tp)
1915 {
1916         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1917                 return;
1918
1919         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1920 }
1921
1922 static void tg3_phy_fini(struct tg3 *tp)
1923 {
1924         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1925                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1926                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1927         }
1928 }
1929
1930 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1931 {
1932         int err;
1933         u32 val;
1934
1935         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1936                 return 0;
1937
1938         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1939                 /* Cannot do read-modify-write on 5401 */
1940                 err = tg3_phy_auxctl_write(tp,
1941                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1942                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1943                                            0x4c20);
1944                 goto done;
1945         }
1946
1947         err = tg3_phy_auxctl_read(tp,
1948                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1949         if (err)
1950                 return err;
1951
1952         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1953         err = tg3_phy_auxctl_write(tp,
1954                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1955
1956 done:
1957         return err;
1958 }
1959
1960 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1961 {
1962         u32 phytest;
1963
1964         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1965                 u32 phy;
1966
1967                 tg3_writephy(tp, MII_TG3_FET_TEST,
1968                              phytest | MII_TG3_FET_SHADOW_EN);
1969                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1970                         if (enable)
1971                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1972                         else
1973                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1974                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1975                 }
1976                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1977         }
1978 }
1979
1980 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1981 {
1982         u32 reg;
1983
1984         if (!tg3_flag(tp, 5705_PLUS) ||
1985             (tg3_flag(tp, 5717_PLUS) &&
1986              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1987                 return;
1988
1989         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1990                 tg3_phy_fet_toggle_apd(tp, enable);
1991                 return;
1992         }
1993
1994         reg = MII_TG3_MISC_SHDW_WREN |
1995               MII_TG3_MISC_SHDW_SCR5_SEL |
1996               MII_TG3_MISC_SHDW_SCR5_LPED |
1997               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1998               MII_TG3_MISC_SHDW_SCR5_SDTL |
1999               MII_TG3_MISC_SHDW_SCR5_C125OE;
2000         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2001                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2002
2003         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2004
2005
2006         reg = MII_TG3_MISC_SHDW_WREN |
2007               MII_TG3_MISC_SHDW_APD_SEL |
2008               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2009         if (enable)
2010                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2011
2012         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2013 }
2014
2015 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2016 {
2017         u32 phy;
2018
2019         if (!tg3_flag(tp, 5705_PLUS) ||
2020             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2021                 return;
2022
2023         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2024                 u32 ephy;
2025
2026                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2027                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2028
2029                         tg3_writephy(tp, MII_TG3_FET_TEST,
2030                                      ephy | MII_TG3_FET_SHADOW_EN);
2031                         if (!tg3_readphy(tp, reg, &phy)) {
2032                                 if (enable)
2033                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2034                                 else
2035                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2036                                 tg3_writephy(tp, reg, phy);
2037                         }
2038                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2039                 }
2040         } else {
2041                 int ret;
2042
2043                 ret = tg3_phy_auxctl_read(tp,
2044                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2045                 if (!ret) {
2046                         if (enable)
2047                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2048                         else
2049                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2050                         tg3_phy_auxctl_write(tp,
2051                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2052                 }
2053         }
2054 }
2055
2056 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2057 {
2058         int ret;
2059         u32 val;
2060
2061         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2062                 return;
2063
2064         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2065         if (!ret)
2066                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2067                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2068 }
2069
2070 static void tg3_phy_apply_otp(struct tg3 *tp)
2071 {
2072         u32 otp, phy;
2073
2074         if (!tp->phy_otp)
2075                 return;
2076
2077         otp = tp->phy_otp;
2078
2079         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2080                 return;
2081
2082         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2083         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2084         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2085
2086         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2087               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2088         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2089
2090         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2091         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2092         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2093
2094         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2095         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2096
2097         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2098         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2099
2100         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2101               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2102         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2103
2104         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2105 }
2106
2107 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2108 {
2109         u32 val;
2110
2111         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2112                 return;
2113
2114         tp->setlpicnt = 0;
2115
2116         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2117             current_link_up == 1 &&
2118             tp->link_config.active_duplex == DUPLEX_FULL &&
2119             (tp->link_config.active_speed == SPEED_100 ||
2120              tp->link_config.active_speed == SPEED_1000)) {
2121                 u32 eeectl;
2122
2123                 if (tp->link_config.active_speed == SPEED_1000)
2124                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2125                 else
2126                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2127
2128                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2129
2130                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2131                                   TG3_CL45_D7_EEERES_STAT, &val);
2132
2133                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2134                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2135                         tp->setlpicnt = 2;
2136         }
2137
2138         if (!tp->setlpicnt) {
2139                 if (current_link_up == 1 &&
2140                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2141                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2142                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2143                 }
2144
2145                 val = tr32(TG3_CPMU_EEE_MODE);
2146                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2147         }
2148 }
2149
2150 static void tg3_phy_eee_enable(struct tg3 *tp)
2151 {
2152         u32 val;
2153
2154         if (tp->link_config.active_speed == SPEED_1000 &&
2155             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2156              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2157              tg3_flag(tp, 57765_CLASS)) &&
2158             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2159                 val = MII_TG3_DSP_TAP26_ALNOKO |
2160                       MII_TG3_DSP_TAP26_RMRXSTO;
2161                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2162                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2163         }
2164
2165         val = tr32(TG3_CPMU_EEE_MODE);
2166         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2167 }
2168
2169 static int tg3_wait_macro_done(struct tg3 *tp)
2170 {
2171         int limit = 100;
2172
2173         while (limit--) {
2174                 u32 tmp32;
2175
2176                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2177                         if ((tmp32 & 0x1000) == 0)
2178                                 break;
2179                 }
2180         }
2181         if (limit < 0)
2182                 return -EBUSY;
2183
2184         return 0;
2185 }
2186
2187 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2188 {
2189         static const u32 test_pat[4][6] = {
2190         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2191         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2192         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2193         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2194         };
2195         int chan;
2196
2197         for (chan = 0; chan < 4; chan++) {
2198                 int i;
2199
2200                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2201                              (chan * 0x2000) | 0x0200);
2202                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2203
2204                 for (i = 0; i < 6; i++)
2205                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2206                                      test_pat[chan][i]);
2207
2208                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2209                 if (tg3_wait_macro_done(tp)) {
2210                         *resetp = 1;
2211                         return -EBUSY;
2212                 }
2213
2214                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2215                              (chan * 0x2000) | 0x0200);
2216                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2217                 if (tg3_wait_macro_done(tp)) {
2218                         *resetp = 1;
2219                         return -EBUSY;
2220                 }
2221
2222                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2223                 if (tg3_wait_macro_done(tp)) {
2224                         *resetp = 1;
2225                         return -EBUSY;
2226                 }
2227
2228                 for (i = 0; i < 6; i += 2) {
2229                         u32 low, high;
2230
2231                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2232                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2233                             tg3_wait_macro_done(tp)) {
2234                                 *resetp = 1;
2235                                 return -EBUSY;
2236                         }
2237                         low &= 0x7fff;
2238                         high &= 0x000f;
2239                         if (low != test_pat[chan][i] ||
2240                             high != test_pat[chan][i+1]) {
2241                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2242                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2243                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2244
2245                                 return -EBUSY;
2246                         }
2247                 }
2248         }
2249
2250         return 0;
2251 }
2252
2253 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2254 {
2255         int chan;
2256
2257         for (chan = 0; chan < 4; chan++) {
2258                 int i;
2259
2260                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2261                              (chan * 0x2000) | 0x0200);
2262                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2263                 for (i = 0; i < 6; i++)
2264                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2265                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2266                 if (tg3_wait_macro_done(tp))
2267                         return -EBUSY;
2268         }
2269
2270         return 0;
2271 }
2272
2273 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2274 {
2275         u32 reg32, phy9_orig;
2276         int retries, do_phy_reset, err;
2277
2278         retries = 10;
2279         do_phy_reset = 1;
2280         do {
2281                 if (do_phy_reset) {
2282                         err = tg3_bmcr_reset(tp);
2283                         if (err)
2284                                 return err;
2285                         do_phy_reset = 0;
2286                 }
2287
2288                 /* Disable transmitter and interrupt.  */
2289                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2290                         continue;
2291
2292                 reg32 |= 0x3000;
2293                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2294
2295                 /* Set full-duplex, 1000 mbps.  */
2296                 tg3_writephy(tp, MII_BMCR,
2297                              BMCR_FULLDPLX | BMCR_SPEED1000);
2298
2299                 /* Set to master mode.  */
2300                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2301                         continue;
2302
2303                 tg3_writephy(tp, MII_CTRL1000,
2304                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2305
2306                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2307                 if (err)
2308                         return err;
2309
2310                 /* Block the PHY control access.  */
2311                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2312
2313                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2314                 if (!err)
2315                         break;
2316         } while (--retries);
2317
2318         err = tg3_phy_reset_chanpat(tp);
2319         if (err)
2320                 return err;
2321
2322         tg3_phydsp_write(tp, 0x8005, 0x0000);
2323
2324         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2325         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2326
2327         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2328
2329         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2330
2331         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2332                 reg32 &= ~0x3000;
2333                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2334         } else if (!err)
2335                 err = -EBUSY;
2336
2337         return err;
2338 }
2339
2340 /* This will reset the tigon3 PHY if there is no valid
2341  * link unless the FORCE argument is non-zero.
2342  */
2343 static int tg3_phy_reset(struct tg3 *tp)
2344 {
2345         u32 val, cpmuctrl;
2346         int err;
2347
2348         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2349                 val = tr32(GRC_MISC_CFG);
2350                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2351                 udelay(40);
2352         }
2353         err  = tg3_readphy(tp, MII_BMSR, &val);
2354         err |= tg3_readphy(tp, MII_BMSR, &val);
2355         if (err != 0)
2356                 return -EBUSY;
2357
2358         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2359                 netif_carrier_off(tp->dev);
2360                 tg3_link_report(tp);
2361         }
2362
2363         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2364             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2365             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2366                 err = tg3_phy_reset_5703_4_5(tp);
2367                 if (err)
2368                         return err;
2369                 goto out;
2370         }
2371
2372         cpmuctrl = 0;
2373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2374             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2375                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2376                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2377                         tw32(TG3_CPMU_CTRL,
2378                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2379         }
2380
2381         err = tg3_bmcr_reset(tp);
2382         if (err)
2383                 return err;
2384
2385         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2386                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2387                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2388
2389                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2390         }
2391
2392         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2393             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2394                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2395                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2396                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2397                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2398                         udelay(40);
2399                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2400                 }
2401         }
2402
2403         if (tg3_flag(tp, 5717_PLUS) &&
2404             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2405                 return 0;
2406
2407         tg3_phy_apply_otp(tp);
2408
2409         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2410                 tg3_phy_toggle_apd(tp, true);
2411         else
2412                 tg3_phy_toggle_apd(tp, false);
2413
2414 out:
2415         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2416             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2417                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2418                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2419                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2420         }
2421
2422         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2423                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2424                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2425         }
2426
2427         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2428                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2429                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2430                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2431                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2432                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2433                 }
2434         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2435                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2436                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2437                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2438                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2439                                 tg3_writephy(tp, MII_TG3_TEST1,
2440                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2441                         } else
2442                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2443
2444                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2445                 }
2446         }
2447
2448         /* Set Extended packet length bit (bit 14) on all chips that */
2449         /* support jumbo frames */
2450         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2451                 /* Cannot do read-modify-write on 5401 */
2452                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2453         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2454                 /* Set bit 14 with read-modify-write to preserve other bits */
2455                 err = tg3_phy_auxctl_read(tp,
2456                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2457                 if (!err)
2458                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2459                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2460         }
2461
2462         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2463          * jumbo frames transmission.
2464          */
2465         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2466                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2467                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2468                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2469         }
2470
2471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2472                 /* adjust output voltage */
2473                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2474         }
2475
2476         tg3_phy_toggle_automdix(tp, 1);
2477         tg3_phy_set_wirespeed(tp);
2478         return 0;
2479 }
2480
2481 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2482 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2483 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2484                                           TG3_GPIO_MSG_NEED_VAUX)
2485 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2486         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2487          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2488          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2489          (TG3_GPIO_MSG_DRVR_PRES << 12))
2490
2491 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2492         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2493          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2494          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2495          (TG3_GPIO_MSG_NEED_VAUX << 12))
2496
2497 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2498 {
2499         u32 status, shift;
2500
2501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2502             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2503                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2504         else
2505                 status = tr32(TG3_CPMU_DRV_STATUS);
2506
2507         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2508         status &= ~(TG3_GPIO_MSG_MASK << shift);
2509         status |= (newstat << shift);
2510
2511         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2512             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2513                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2514         else
2515                 tw32(TG3_CPMU_DRV_STATUS, status);
2516
2517         return status >> TG3_APE_GPIO_MSG_SHIFT;
2518 }
2519
2520 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2521 {
2522         if (!tg3_flag(tp, IS_NIC))
2523                 return 0;
2524
2525         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2526             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2527             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2528                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2529                         return -EIO;
2530
2531                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2532
2533                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2534                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2535
2536                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2537         } else {
2538                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2539                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2540         }
2541
2542         return 0;
2543 }
2544
2545 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2546 {
2547         u32 grc_local_ctrl;
2548
2549         if (!tg3_flag(tp, IS_NIC) ||
2550             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2551             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2552                 return;
2553
2554         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2555
2556         tw32_wait_f(GRC_LOCAL_CTRL,
2557                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2558                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2559
2560         tw32_wait_f(GRC_LOCAL_CTRL,
2561                     grc_local_ctrl,
2562                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2563
2564         tw32_wait_f(GRC_LOCAL_CTRL,
2565                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2566                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2567 }
2568
2569 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2570 {
2571         if (!tg3_flag(tp, IS_NIC))
2572                 return;
2573
2574         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2575             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2576                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2577                             (GRC_LCLCTRL_GPIO_OE0 |
2578                              GRC_LCLCTRL_GPIO_OE1 |
2579                              GRC_LCLCTRL_GPIO_OE2 |
2580                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2581                              GRC_LCLCTRL_GPIO_OUTPUT1),
2582                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2583         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2584                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2585                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2586                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2587                                      GRC_LCLCTRL_GPIO_OE1 |
2588                                      GRC_LCLCTRL_GPIO_OE2 |
2589                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2590                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2591                                      tp->grc_local_ctrl;
2592                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2593                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2594
2595                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2596                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2597                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2598
2599                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2600                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2601                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2602         } else {
2603                 u32 no_gpio2;
2604                 u32 grc_local_ctrl = 0;
2605
2606                 /* Workaround to prevent overdrawing Amps. */
2607                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2608                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2609                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2610                                     grc_local_ctrl,
2611                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2612                 }
2613
2614                 /* On 5753 and variants, GPIO2 cannot be used. */
2615                 no_gpio2 = tp->nic_sram_data_cfg &
2616                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2617
2618                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2619                                   GRC_LCLCTRL_GPIO_OE1 |
2620                                   GRC_LCLCTRL_GPIO_OE2 |
2621                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2622                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2623                 if (no_gpio2) {
2624                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2625                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2626                 }
2627                 tw32_wait_f(GRC_LOCAL_CTRL,
2628                             tp->grc_local_ctrl | grc_local_ctrl,
2629                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2630
2631                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2632
2633                 tw32_wait_f(GRC_LOCAL_CTRL,
2634                             tp->grc_local_ctrl | grc_local_ctrl,
2635                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2636
2637                 if (!no_gpio2) {
2638                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2639                         tw32_wait_f(GRC_LOCAL_CTRL,
2640                                     tp->grc_local_ctrl | grc_local_ctrl,
2641                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2642                 }
2643         }
2644 }
2645
2646 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2647 {
2648         u32 msg = 0;
2649
2650         /* Serialize power state transitions */
2651         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2652                 return;
2653
2654         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2655                 msg = TG3_GPIO_MSG_NEED_VAUX;
2656
2657         msg = tg3_set_function_status(tp, msg);
2658
2659         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2660                 goto done;
2661
2662         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2663                 tg3_pwrsrc_switch_to_vaux(tp);
2664         else
2665                 tg3_pwrsrc_die_with_vmain(tp);
2666
2667 done:
2668         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2669 }
2670
2671 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2672 {
2673         bool need_vaux = false;
2674
2675         /* The GPIOs do something completely different on 57765. */
2676         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2677                 return;
2678
2679         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2680             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2682                 tg3_frob_aux_power_5717(tp, include_wol ?
2683                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2684                 return;
2685         }
2686
2687         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2688                 struct net_device *dev_peer;
2689
2690                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2691
2692                 /* remove_one() may have been run on the peer. */
2693                 if (dev_peer) {
2694                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2695
2696                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2697                                 return;
2698
2699                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2700                             tg3_flag(tp_peer, ENABLE_ASF))
2701                                 need_vaux = true;
2702                 }
2703         }
2704
2705         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2706             tg3_flag(tp, ENABLE_ASF))
2707                 need_vaux = true;
2708
2709         if (need_vaux)
2710                 tg3_pwrsrc_switch_to_vaux(tp);
2711         else
2712                 tg3_pwrsrc_die_with_vmain(tp);
2713 }
2714
2715 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2716 {
2717         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2718                 return 1;
2719         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2720                 if (speed != SPEED_10)
2721                         return 1;
2722         } else if (speed == SPEED_10)
2723                 return 1;
2724
2725         return 0;
2726 }
2727
2728 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2729 {
2730         u32 val;
2731
2732         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2733                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2734                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2735                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2736
2737                         sg_dig_ctrl |=
2738                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2739                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2740                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2741                 }
2742                 return;
2743         }
2744
2745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2746                 tg3_bmcr_reset(tp);
2747                 val = tr32(GRC_MISC_CFG);
2748                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2749                 udelay(40);
2750                 return;
2751         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2752                 u32 phytest;
2753                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2754                         u32 phy;
2755
2756                         tg3_writephy(tp, MII_ADVERTISE, 0);
2757                         tg3_writephy(tp, MII_BMCR,
2758                                      BMCR_ANENABLE | BMCR_ANRESTART);
2759
2760                         tg3_writephy(tp, MII_TG3_FET_TEST,
2761                                      phytest | MII_TG3_FET_SHADOW_EN);
2762                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2763                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2764                                 tg3_writephy(tp,
2765                                              MII_TG3_FET_SHDW_AUXMODE4,
2766                                              phy);
2767                         }
2768                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2769                 }
2770                 return;
2771         } else if (do_low_power) {
2772                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2773                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2774
2775                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2776                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2777                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2778                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2779         }
2780
2781         /* The PHY should not be powered down on some chips because
2782          * of bugs.
2783          */
2784         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2785             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2786             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2787              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2788             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2789              !tp->pci_fn))
2790                 return;
2791
2792         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2793             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2794                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2795                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2796                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2797                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2798         }
2799
2800         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2801 }
2802
2803 /* tp->lock is held. */
2804 static int tg3_nvram_lock(struct tg3 *tp)
2805 {
2806         if (tg3_flag(tp, NVRAM)) {
2807                 int i;
2808
2809                 if (tp->nvram_lock_cnt == 0) {
2810                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2811                         for (i = 0; i < 8000; i++) {
2812                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2813                                         break;
2814                                 udelay(20);
2815                         }
2816                         if (i == 8000) {
2817                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2818                                 return -ENODEV;
2819                         }
2820                 }
2821                 tp->nvram_lock_cnt++;
2822         }
2823         return 0;
2824 }
2825
2826 /* tp->lock is held. */
2827 static void tg3_nvram_unlock(struct tg3 *tp)
2828 {
2829         if (tg3_flag(tp, NVRAM)) {
2830                 if (tp->nvram_lock_cnt > 0)
2831                         tp->nvram_lock_cnt--;
2832                 if (tp->nvram_lock_cnt == 0)
2833                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2834         }
2835 }
2836
2837 /* tp->lock is held. */
2838 static void tg3_enable_nvram_access(struct tg3 *tp)
2839 {
2840         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2841                 u32 nvaccess = tr32(NVRAM_ACCESS);
2842
2843                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2844         }
2845 }
2846
2847 /* tp->lock is held. */
2848 static void tg3_disable_nvram_access(struct tg3 *tp)
2849 {
2850         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2851                 u32 nvaccess = tr32(NVRAM_ACCESS);
2852
2853                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2854         }
2855 }
2856
2857 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2858                                         u32 offset, u32 *val)
2859 {
2860         u32 tmp;
2861         int i;
2862
2863         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2864                 return -EINVAL;
2865
2866         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2867                                         EEPROM_ADDR_DEVID_MASK |
2868                                         EEPROM_ADDR_READ);
2869         tw32(GRC_EEPROM_ADDR,
2870              tmp |
2871              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2872              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2873               EEPROM_ADDR_ADDR_MASK) |
2874              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2875
2876         for (i = 0; i < 1000; i++) {
2877                 tmp = tr32(GRC_EEPROM_ADDR);
2878
2879                 if (tmp & EEPROM_ADDR_COMPLETE)
2880                         break;
2881                 msleep(1);
2882         }
2883         if (!(tmp & EEPROM_ADDR_COMPLETE))
2884                 return -EBUSY;
2885
2886         tmp = tr32(GRC_EEPROM_DATA);
2887
2888         /*
2889          * The data will always be opposite the native endian
2890          * format.  Perform a blind byteswap to compensate.
2891          */
2892         *val = swab32(tmp);
2893
2894         return 0;
2895 }
2896
2897 #define NVRAM_CMD_TIMEOUT 10000
2898
2899 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2900 {
2901         int i;
2902
2903         tw32(NVRAM_CMD, nvram_cmd);
2904         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2905                 udelay(10);
2906                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2907                         udelay(10);
2908                         break;
2909                 }
2910         }
2911
2912         if (i == NVRAM_CMD_TIMEOUT)
2913                 return -EBUSY;
2914
2915         return 0;
2916 }
2917
2918 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2919 {
2920         if (tg3_flag(tp, NVRAM) &&
2921             tg3_flag(tp, NVRAM_BUFFERED) &&
2922             tg3_flag(tp, FLASH) &&
2923             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2924             (tp->nvram_jedecnum == JEDEC_ATMEL))
2925
2926                 addr = ((addr / tp->nvram_pagesize) <<
2927                         ATMEL_AT45DB0X1B_PAGE_POS) +
2928                        (addr % tp->nvram_pagesize);
2929
2930         return addr;
2931 }
2932
2933 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2934 {
2935         if (tg3_flag(tp, NVRAM) &&
2936             tg3_flag(tp, NVRAM_BUFFERED) &&
2937             tg3_flag(tp, FLASH) &&
2938             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2939             (tp->nvram_jedecnum == JEDEC_ATMEL))
2940
2941                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2942                         tp->nvram_pagesize) +
2943                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2944
2945         return addr;
2946 }
2947
2948 /* NOTE: Data read in from NVRAM is byteswapped according to
2949  * the byteswapping settings for all other register accesses.
2950  * tg3 devices are BE devices, so on a BE machine, the data
2951  * returned will be exactly as it is seen in NVRAM.  On a LE
2952  * machine, the 32-bit value will be byteswapped.
2953  */
2954 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2955 {
2956         int ret;
2957
2958         if (!tg3_flag(tp, NVRAM))
2959                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2960
2961         offset = tg3_nvram_phys_addr(tp, offset);
2962
2963         if (offset > NVRAM_ADDR_MSK)
2964                 return -EINVAL;
2965
2966         ret = tg3_nvram_lock(tp);
2967         if (ret)
2968                 return ret;
2969
2970         tg3_enable_nvram_access(tp);
2971
2972         tw32(NVRAM_ADDR, offset);
2973         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2974                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2975
2976         if (ret == 0)
2977                 *val = tr32(NVRAM_RDDATA);
2978
2979         tg3_disable_nvram_access(tp);
2980
2981         tg3_nvram_unlock(tp);
2982
2983         return ret;
2984 }
2985
2986 /* Ensures NVRAM data is in bytestream format. */
2987 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2988 {
2989         u32 v;
2990         int res = tg3_nvram_read(tp, offset, &v);
2991         if (!res)
2992                 *val = cpu_to_be32(v);
2993         return res;
2994 }
2995
2996 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2997                                     u32 offset, u32 len, u8 *buf)
2998 {
2999         int i, j, rc = 0;
3000         u32 val;
3001
3002         for (i = 0; i < len; i += 4) {
3003                 u32 addr;
3004                 __be32 data;
3005
3006                 addr = offset + i;
3007
3008                 memcpy(&data, buf + i, 4);
3009
3010                 /*
3011                  * The SEEPROM interface expects the data to always be opposite
3012                  * the native endian format.  We accomplish this by reversing
3013                  * all the operations that would have been performed on the
3014                  * data from a call to tg3_nvram_read_be32().
3015                  */
3016                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3017
3018                 val = tr32(GRC_EEPROM_ADDR);
3019                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3020
3021                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3022                         EEPROM_ADDR_READ);
3023                 tw32(GRC_EEPROM_ADDR, val |
3024                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3025                         (addr & EEPROM_ADDR_ADDR_MASK) |
3026                         EEPROM_ADDR_START |
3027                         EEPROM_ADDR_WRITE);
3028
3029                 for (j = 0; j < 1000; j++) {
3030                         val = tr32(GRC_EEPROM_ADDR);
3031
3032                         if (val & EEPROM_ADDR_COMPLETE)
3033                                 break;
3034                         msleep(1);
3035                 }
3036                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3037                         rc = -EBUSY;
3038                         break;
3039                 }
3040         }
3041
3042         return rc;
3043 }
3044
3045 /* offset and length are dword aligned */
3046 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3047                 u8 *buf)
3048 {
3049         int ret = 0;
3050         u32 pagesize = tp->nvram_pagesize;
3051         u32 pagemask = pagesize - 1;
3052         u32 nvram_cmd;
3053         u8 *tmp;
3054
3055         tmp = kmalloc(pagesize, GFP_KERNEL);
3056         if (tmp == NULL)
3057                 return -ENOMEM;
3058
3059         while (len) {
3060                 int j;
3061                 u32 phy_addr, page_off, size;
3062
3063                 phy_addr = offset & ~pagemask;
3064
3065                 for (j = 0; j < pagesize; j += 4) {
3066                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3067                                                   (__be32 *) (tmp + j));
3068                         if (ret)
3069                                 break;
3070                 }
3071                 if (ret)
3072                         break;
3073
3074                 page_off = offset & pagemask;
3075                 size = pagesize;
3076                 if (len < size)
3077                         size = len;
3078
3079                 len -= size;
3080
3081                 memcpy(tmp + page_off, buf, size);
3082
3083                 offset = offset + (pagesize - page_off);
3084
3085                 tg3_enable_nvram_access(tp);
3086
3087                 /*
3088                  * Before we can erase the flash page, we need
3089                  * to issue a special "write enable" command.
3090                  */
3091                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3092
3093                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3094                         break;
3095
3096                 /* Erase the target page */
3097                 tw32(NVRAM_ADDR, phy_addr);
3098
3099                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3100                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3101
3102                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3103                         break;
3104
3105                 /* Issue another write enable to start the write. */
3106                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3107
3108                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3109                         break;
3110
3111                 for (j = 0; j < pagesize; j += 4) {
3112                         __be32 data;
3113
3114                         data = *((__be32 *) (tmp + j));
3115
3116                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3117
3118                         tw32(NVRAM_ADDR, phy_addr + j);
3119
3120                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3121                                 NVRAM_CMD_WR;
3122
3123                         if (j == 0)
3124                                 nvram_cmd |= NVRAM_CMD_FIRST;
3125                         else if (j == (pagesize - 4))
3126                                 nvram_cmd |= NVRAM_CMD_LAST;
3127
3128                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3129                         if (ret)
3130                                 break;
3131                 }
3132                 if (ret)
3133                         break;
3134         }
3135
3136         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3137         tg3_nvram_exec_cmd(tp, nvram_cmd);
3138
3139         kfree(tmp);
3140
3141         return ret;
3142 }
3143
3144 /* offset and length are dword aligned */
3145 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3146                 u8 *buf)
3147 {
3148         int i, ret = 0;
3149
3150         for (i = 0; i < len; i += 4, offset += 4) {
3151                 u32 page_off, phy_addr, nvram_cmd;
3152                 __be32 data;
3153
3154                 memcpy(&data, buf + i, 4);
3155                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3156
3157                 page_off = offset % tp->nvram_pagesize;
3158
3159                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3160
3161                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3162
3163                 if (page_off == 0 || i == 0)
3164                         nvram_cmd |= NVRAM_CMD_FIRST;
3165                 if (page_off == (tp->nvram_pagesize - 4))
3166                         nvram_cmd |= NVRAM_CMD_LAST;
3167
3168                 if (i == (len - 4))
3169                         nvram_cmd |= NVRAM_CMD_LAST;
3170
3171                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3172                     !tg3_flag(tp, FLASH) ||
3173                     !tg3_flag(tp, 57765_PLUS))
3174                         tw32(NVRAM_ADDR, phy_addr);
3175
3176                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3177                     !tg3_flag(tp, 5755_PLUS) &&
3178                     (tp->nvram_jedecnum == JEDEC_ST) &&
3179                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3180                         u32 cmd;
3181
3182                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3183                         ret = tg3_nvram_exec_cmd(tp, cmd);
3184                         if (ret)
3185                                 break;
3186                 }
3187                 if (!tg3_flag(tp, FLASH)) {
3188                         /* We always do complete word writes to eeprom. */
3189                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3190                 }
3191
3192                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3193                 if (ret)
3194                         break;
3195         }
3196         return ret;
3197 }
3198
3199 /* offset and length are dword aligned */
3200 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3201 {
3202         int ret;
3203
3204         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3205                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3206                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3207                 udelay(40);
3208         }
3209
3210         if (!tg3_flag(tp, NVRAM)) {
3211                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3212         } else {
3213                 u32 grc_mode;
3214
3215                 ret = tg3_nvram_lock(tp);
3216                 if (ret)
3217                         return ret;
3218
3219                 tg3_enable_nvram_access(tp);
3220                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3221                         tw32(NVRAM_WRITE1, 0x406);
3222
3223                 grc_mode = tr32(GRC_MODE);
3224                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3225
3226                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3227                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3228                                 buf);
3229                 } else {
3230                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3231                                 buf);
3232                 }
3233
3234                 grc_mode = tr32(GRC_MODE);
3235                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3236
3237                 tg3_disable_nvram_access(tp);
3238                 tg3_nvram_unlock(tp);
3239         }
3240
3241         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3242                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3243                 udelay(40);
3244         }
3245
3246         return ret;
3247 }
3248
3249 #define RX_CPU_SCRATCH_BASE     0x30000
3250 #define RX_CPU_SCRATCH_SIZE     0x04000
3251 #define TX_CPU_SCRATCH_BASE     0x34000
3252 #define TX_CPU_SCRATCH_SIZE     0x04000
3253
3254 /* tp->lock is held. */
3255 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3256 {
3257         int i;
3258
3259         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3260
3261         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3262                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3263
3264                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3265                 return 0;
3266         }
3267         if (offset == RX_CPU_BASE) {
3268                 for (i = 0; i < 10000; i++) {
3269                         tw32(offset + CPU_STATE, 0xffffffff);
3270                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3271                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3272                                 break;
3273                 }
3274
3275                 tw32(offset + CPU_STATE, 0xffffffff);
3276                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3277                 udelay(10);
3278         } else {
3279                 for (i = 0; i < 10000; i++) {
3280                         tw32(offset + CPU_STATE, 0xffffffff);
3281                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3282                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3283                                 break;
3284                 }
3285         }
3286
3287         if (i >= 10000) {
3288                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3289                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3290                 return -ENODEV;
3291         }
3292
3293         /* Clear firmware's nvram arbitration. */
3294         if (tg3_flag(tp, NVRAM))
3295                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3296         return 0;
3297 }
3298
3299 struct fw_info {
3300         unsigned int fw_base;
3301         unsigned int fw_len;
3302         const __be32 *fw_data;
3303 };
3304
3305 /* tp->lock is held. */
3306 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3307                                  u32 cpu_scratch_base, int cpu_scratch_size,
3308                                  struct fw_info *info)
3309 {
3310         int err, lock_err, i;
3311         void (*write_op)(struct tg3 *, u32, u32);
3312
3313         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3314                 netdev_err(tp->dev,
3315                            "%s: Trying to load TX cpu firmware which is 5705\n",
3316                            __func__);
3317                 return -EINVAL;
3318         }
3319
3320         if (tg3_flag(tp, 5705_PLUS))
3321                 write_op = tg3_write_mem;
3322         else
3323                 write_op = tg3_write_indirect_reg32;
3324
3325         /* It is possible that bootcode is still loading at this point.
3326          * Get the nvram lock first before halting the cpu.
3327          */
3328         lock_err = tg3_nvram_lock(tp);
3329         err = tg3_halt_cpu(tp, cpu_base);
3330         if (!lock_err)
3331                 tg3_nvram_unlock(tp);
3332         if (err)
3333                 goto out;
3334
3335         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3336                 write_op(tp, cpu_scratch_base + i, 0);
3337         tw32(cpu_base + CPU_STATE, 0xffffffff);
3338         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3339         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3340                 write_op(tp, (cpu_scratch_base +
3341                               (info->fw_base & 0xffff) +
3342                               (i * sizeof(u32))),
3343                               be32_to_cpu(info->fw_data[i]));
3344
3345         err = 0;
3346
3347 out:
3348         return err;
3349 }
3350
3351 /* tp->lock is held. */
3352 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3353 {
3354         struct fw_info info;
3355         const __be32 *fw_data;
3356         int err, i;
3357
3358         fw_data = (void *)tp->fw->data;
3359
3360         /* Firmware blob starts with version numbers, followed by
3361            start address and length. We are setting complete length.
3362            length = end_address_of_bss - start_address_of_text.
3363            Remainder is the blob to be loaded contiguously
3364            from start address. */
3365
3366         info.fw_base = be32_to_cpu(fw_data[1]);
3367         info.fw_len = tp->fw->size - 12;
3368         info.fw_data = &fw_data[3];
3369
3370         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3371                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3372                                     &info);
3373         if (err)
3374                 return err;
3375
3376         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3377                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3378                                     &info);
3379         if (err)
3380                 return err;
3381
3382         /* Now startup only the RX cpu. */
3383         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3384         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3385
3386         for (i = 0; i < 5; i++) {
3387                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3388                         break;
3389                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3390                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3391                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3392                 udelay(1000);
3393         }
3394         if (i >= 5) {
3395                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3396                            "should be %08x\n", __func__,
3397                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3398                 return -ENODEV;
3399         }
3400         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3401         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3402
3403         return 0;
3404 }
3405
3406 /* tp->lock is held. */
3407 static int tg3_load_tso_firmware(struct tg3 *tp)
3408 {
3409         struct fw_info info;
3410         const __be32 *fw_data;
3411         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3412         int err, i;
3413
3414         if (tg3_flag(tp, HW_TSO_1) ||
3415             tg3_flag(tp, HW_TSO_2) ||
3416             tg3_flag(tp, HW_TSO_3))
3417                 return 0;
3418
3419         fw_data = (void *)tp->fw->data;
3420
3421         /* Firmware blob starts with version numbers, followed by
3422            start address and length. We are setting complete length.
3423            length = end_address_of_bss - start_address_of_text.
3424            Remainder is the blob to be loaded contiguously
3425            from start address. */
3426
3427         info.fw_base = be32_to_cpu(fw_data[1]);
3428         cpu_scratch_size = tp->fw_len;
3429         info.fw_len = tp->fw->size - 12;
3430         info.fw_data = &fw_data[3];
3431
3432         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3433                 cpu_base = RX_CPU_BASE;
3434                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3435         } else {
3436                 cpu_base = TX_CPU_BASE;
3437                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3438                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3439         }
3440
3441         err = tg3_load_firmware_cpu(tp, cpu_base,
3442                                     cpu_scratch_base, cpu_scratch_size,
3443                                     &info);
3444         if (err)
3445                 return err;
3446
3447         /* Now startup the cpu. */
3448         tw32(cpu_base + CPU_STATE, 0xffffffff);
3449         tw32_f(cpu_base + CPU_PC, info.fw_base);
3450
3451         for (i = 0; i < 5; i++) {
3452                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3453                         break;
3454                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3455                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3456                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3457                 udelay(1000);
3458         }
3459         if (i >= 5) {
3460                 netdev_err(tp->dev,
3461                            "%s fails to set CPU PC, is %08x should be %08x\n",
3462                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3463                 return -ENODEV;
3464         }
3465         tw32(cpu_base + CPU_STATE, 0xffffffff);
3466         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3467         return 0;
3468 }
3469
3470
3471 /* tp->lock is held. */
3472 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3473 {
3474         u32 addr_high, addr_low;
3475         int i;
3476
3477         addr_high = ((tp->dev->dev_addr[0] << 8) |
3478                      tp->dev->dev_addr[1]);
3479         addr_low = ((tp->dev->dev_addr[2] << 24) |
3480                     (tp->dev->dev_addr[3] << 16) |
3481                     (tp->dev->dev_addr[4] <<  8) |
3482                     (tp->dev->dev_addr[5] <<  0));
3483         for (i = 0; i < 4; i++) {
3484                 if (i == 1 && skip_mac_1)
3485                         continue;
3486                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3487                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3488         }
3489
3490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3491             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3492                 for (i = 0; i < 12; i++) {
3493                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3494                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3495                 }
3496         }
3497
3498         addr_high = (tp->dev->dev_addr[0] +
3499                      tp->dev->dev_addr[1] +
3500                      tp->dev->dev_addr[2] +
3501                      tp->dev->dev_addr[3] +
3502                      tp->dev->dev_addr[4] +
3503                      tp->dev->dev_addr[5]) &
3504                 TX_BACKOFF_SEED_MASK;
3505         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3506 }
3507
3508 static void tg3_enable_register_access(struct tg3 *tp)
3509 {
3510         /*
3511          * Make sure register accesses (indirect or otherwise) will function
3512          * correctly.
3513          */
3514         pci_write_config_dword(tp->pdev,
3515                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3516 }
3517
3518 static int tg3_power_up(struct tg3 *tp)
3519 {
3520         int err;
3521
3522         tg3_enable_register_access(tp);
3523
3524         err = pci_set_power_state(tp->pdev, PCI_D0);
3525         if (!err) {
3526                 /* Switch out of Vaux if it is a NIC */
3527                 tg3_pwrsrc_switch_to_vmain(tp);
3528         } else {
3529                 netdev_err(tp->dev, "Transition to D0 failed\n");
3530         }
3531
3532         return err;
3533 }
3534
3535 static int tg3_setup_phy(struct tg3 *, int);
3536
3537 static int tg3_power_down_prepare(struct tg3 *tp)
3538 {
3539         u32 misc_host_ctrl;
3540         bool device_should_wake, do_low_power;
3541
3542         tg3_enable_register_access(tp);
3543
3544         /* Restore the CLKREQ setting. */
3545         if (tg3_flag(tp, CLKREQ_BUG)) {
3546                 u16 lnkctl;
3547
3548                 pci_read_config_word(tp->pdev,
3549                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3550                                      &lnkctl);
3551                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3552                 pci_write_config_word(tp->pdev,
3553                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3554                                       lnkctl);
3555         }
3556
3557         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3558         tw32(TG3PCI_MISC_HOST_CTRL,
3559              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3560
3561         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3562                              tg3_flag(tp, WOL_ENABLE);
3563
3564         if (tg3_flag(tp, USE_PHYLIB)) {
3565                 do_low_power = false;
3566                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3567                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3568                         struct phy_device *phydev;
3569                         u32 phyid, advertising;
3570
3571                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3572
3573                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3574
3575                         tp->link_config.speed = phydev->speed;
3576                         tp->link_config.duplex = phydev->duplex;
3577                         tp->link_config.autoneg = phydev->autoneg;
3578                         tp->link_config.advertising = phydev->advertising;
3579
3580                         advertising = ADVERTISED_TP |
3581                                       ADVERTISED_Pause |
3582                                       ADVERTISED_Autoneg |
3583                                       ADVERTISED_10baseT_Half;
3584
3585                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3586                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3587                                         advertising |=
3588                                                 ADVERTISED_100baseT_Half |
3589                                                 ADVERTISED_100baseT_Full |
3590                                                 ADVERTISED_10baseT_Full;
3591                                 else
3592                                         advertising |= ADVERTISED_10baseT_Full;
3593                         }
3594
3595                         phydev->advertising = advertising;
3596
3597                         phy_start_aneg(phydev);
3598
3599                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3600                         if (phyid != PHY_ID_BCMAC131) {
3601                                 phyid &= PHY_BCM_OUI_MASK;
3602                                 if (phyid == PHY_BCM_OUI_1 ||
3603                                     phyid == PHY_BCM_OUI_2 ||
3604                                     phyid == PHY_BCM_OUI_3)
3605                                         do_low_power = true;
3606                         }
3607                 }
3608         } else {
3609                 do_low_power = true;
3610
3611                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3612                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3613
3614                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3615                         tg3_setup_phy(tp, 0);
3616         }
3617
3618         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3619                 u32 val;
3620
3621                 val = tr32(GRC_VCPU_EXT_CTRL);
3622                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3623         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3624                 int i;
3625                 u32 val;
3626
3627                 for (i = 0; i < 200; i++) {
3628                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3629                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3630                                 break;
3631                         msleep(1);
3632                 }
3633         }
3634         if (tg3_flag(tp, WOL_CAP))
3635                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3636                                                      WOL_DRV_STATE_SHUTDOWN |
3637                                                      WOL_DRV_WOL |
3638                                                      WOL_SET_MAGIC_PKT);
3639
3640         if (device_should_wake) {
3641                 u32 mac_mode;
3642
3643                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3644                         if (do_low_power &&
3645                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3646                                 tg3_phy_auxctl_write(tp,
3647                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3648                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3649                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3650                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3651                                 udelay(40);
3652                         }
3653
3654                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3655                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3656                         else
3657                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3658
3659                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3660                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3661                             ASIC_REV_5700) {
3662                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3663                                              SPEED_100 : SPEED_10;
3664                                 if (tg3_5700_link_polarity(tp, speed))
3665                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3666                                 else
3667                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3668                         }
3669                 } else {
3670                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3671                 }
3672
3673                 if (!tg3_flag(tp, 5750_PLUS))
3674                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3675
3676                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3677                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3678                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3679                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3680
3681                 if (tg3_flag(tp, ENABLE_APE))
3682                         mac_mode |= MAC_MODE_APE_TX_EN |
3683                                     MAC_MODE_APE_RX_EN |
3684                                     MAC_MODE_TDE_ENABLE;
3685
3686                 tw32_f(MAC_MODE, mac_mode);
3687                 udelay(100);
3688
3689                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3690                 udelay(10);
3691         }
3692
3693         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3694             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3695              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3696                 u32 base_val;
3697
3698                 base_val = tp->pci_clock_ctrl;
3699                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3700                              CLOCK_CTRL_TXCLK_DISABLE);
3701
3702                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3703                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3704         } else if (tg3_flag(tp, 5780_CLASS) ||
3705                    tg3_flag(tp, CPMU_PRESENT) ||
3706                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3707                 /* do nothing */
3708         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3709                 u32 newbits1, newbits2;
3710
3711                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3712                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3713                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3714                                     CLOCK_CTRL_TXCLK_DISABLE |
3715                                     CLOCK_CTRL_ALTCLK);
3716                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3717                 } else if (tg3_flag(tp, 5705_PLUS)) {
3718                         newbits1 = CLOCK_CTRL_625_CORE;
3719                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3720                 } else {
3721                         newbits1 = CLOCK_CTRL_ALTCLK;
3722                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3723                 }
3724
3725                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3726                             40);
3727
3728                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3729                             40);
3730
3731                 if (!tg3_flag(tp, 5705_PLUS)) {
3732                         u32 newbits3;
3733
3734                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3735                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3736                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3737                                             CLOCK_CTRL_TXCLK_DISABLE |
3738                                             CLOCK_CTRL_44MHZ_CORE);
3739                         } else {
3740                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3741                         }
3742
3743                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3744                                     tp->pci_clock_ctrl | newbits3, 40);
3745                 }
3746         }
3747
3748         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3749                 tg3_power_down_phy(tp, do_low_power);
3750
3751         tg3_frob_aux_power(tp, true);
3752
3753         /* Workaround for unstable PLL clock */
3754         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3755             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3756                 u32 val = tr32(0x7d00);
3757
3758                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3759                 tw32(0x7d00, val);
3760                 if (!tg3_flag(tp, ENABLE_ASF)) {
3761                         int err;
3762
3763                         err = tg3_nvram_lock(tp);
3764                         tg3_halt_cpu(tp, RX_CPU_BASE);
3765                         if (!err)
3766                                 tg3_nvram_unlock(tp);
3767                 }
3768         }
3769
3770         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3771
3772         return 0;
3773 }
3774
3775 static void tg3_power_down(struct tg3 *tp)
3776 {
3777         tg3_power_down_prepare(tp);
3778
3779         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3780         pci_set_power_state(tp->pdev, PCI_D3hot);
3781 }
3782
3783 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3784 {
3785         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3786         case MII_TG3_AUX_STAT_10HALF:
3787                 *speed = SPEED_10;
3788                 *duplex = DUPLEX_HALF;
3789                 break;
3790
3791         case MII_TG3_AUX_STAT_10FULL:
3792                 *speed = SPEED_10;
3793                 *duplex = DUPLEX_FULL;
3794                 break;
3795
3796         case MII_TG3_AUX_STAT_100HALF:
3797                 *speed = SPEED_100;
3798                 *duplex = DUPLEX_HALF;
3799                 break;
3800
3801         case MII_TG3_AUX_STAT_100FULL:
3802                 *speed = SPEED_100;
3803                 *duplex = DUPLEX_FULL;
3804                 break;
3805
3806         case MII_TG3_AUX_STAT_1000HALF:
3807                 *speed = SPEED_1000;
3808                 *duplex = DUPLEX_HALF;
3809                 break;
3810
3811         case MII_TG3_AUX_STAT_1000FULL:
3812                 *speed = SPEED_1000;
3813                 *duplex = DUPLEX_FULL;
3814                 break;
3815
3816         default:
3817                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3818                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3819                                  SPEED_10;
3820                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3821                                   DUPLEX_HALF;
3822                         break;
3823                 }
3824                 *speed = SPEED_UNKNOWN;
3825                 *duplex = DUPLEX_UNKNOWN;
3826                 break;
3827         }
3828 }
3829
3830 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3831 {
3832         int err = 0;
3833         u32 val, new_adv;
3834
3835         new_adv = ADVERTISE_CSMA;
3836         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3837         new_adv |= mii_advertise_flowctrl(flowctrl);
3838
3839         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3840         if (err)
3841                 goto done;
3842
3843         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3844                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3845
3846                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3847                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3848                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3849
3850                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3851                 if (err)
3852                         goto done;
3853         }
3854
3855         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3856                 goto done;
3857
3858         tw32(TG3_CPMU_EEE_MODE,
3859              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3860
3861         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3862         if (!err) {
3863                 u32 err2;
3864
3865                 val = 0;
3866                 /* Advertise 100-BaseTX EEE ability */
3867                 if (advertise & ADVERTISED_100baseT_Full)
3868                         val |= MDIO_AN_EEE_ADV_100TX;
3869                 /* Advertise 1000-BaseT EEE ability */
3870                 if (advertise & ADVERTISED_1000baseT_Full)
3871                         val |= MDIO_AN_EEE_ADV_1000T;
3872                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3873                 if (err)
3874                         val = 0;
3875
3876                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3877                 case ASIC_REV_5717:
3878                 case ASIC_REV_57765:
3879                 case ASIC_REV_57766:
3880                 case ASIC_REV_5719:
3881                         /* If we advertised any eee advertisements above... */
3882                         if (val)
3883                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3884                                       MII_TG3_DSP_TAP26_RMRXSTO |
3885                                       MII_TG3_DSP_TAP26_OPCSINPT;
3886                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3887                         /* Fall through */
3888                 case ASIC_REV_5720:
3889                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3890                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3891                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3892                 }
3893
3894                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3895                 if (!err)
3896                         err = err2;
3897         }
3898
3899 done:
3900         return err;
3901 }
3902
3903 static void tg3_phy_copper_begin(struct tg3 *tp)
3904 {
3905         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3906             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3907                 u32 adv, fc;
3908
3909                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3910                         adv = ADVERTISED_10baseT_Half |
3911                               ADVERTISED_10baseT_Full;
3912                         if (tg3_flag(tp, WOL_SPEED_100MB))
3913                                 adv |= ADVERTISED_100baseT_Half |
3914                                        ADVERTISED_100baseT_Full;
3915
3916                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3917                 } else {
3918                         adv = tp->link_config.advertising;
3919                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3920                                 adv &= ~(ADVERTISED_1000baseT_Half |
3921                                          ADVERTISED_1000baseT_Full);
3922
3923                         fc = tp->link_config.flowctrl;
3924                 }
3925
3926                 tg3_phy_autoneg_cfg(tp, adv, fc);
3927
3928                 tg3_writephy(tp, MII_BMCR,
3929                              BMCR_ANENABLE | BMCR_ANRESTART);
3930         } else {
3931                 int i;
3932                 u32 bmcr, orig_bmcr;
3933
3934                 tp->link_config.active_speed = tp->link_config.speed;
3935                 tp->link_config.active_duplex = tp->link_config.duplex;
3936
3937                 bmcr = 0;
3938                 switch (tp->link_config.speed) {
3939                 default:
3940                 case SPEED_10:
3941                         break;
3942
3943                 case SPEED_100:
3944                         bmcr |= BMCR_SPEED100;
3945                         break;
3946
3947                 case SPEED_1000:
3948                         bmcr |= BMCR_SPEED1000;
3949                         break;
3950                 }
3951
3952                 if (tp->link_config.duplex == DUPLEX_FULL)
3953                         bmcr |= BMCR_FULLDPLX;
3954
3955                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3956                     (bmcr != orig_bmcr)) {
3957                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3958                         for (i = 0; i < 1500; i++) {
3959                                 u32 tmp;
3960
3961                                 udelay(10);
3962                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3963                                     tg3_readphy(tp, MII_BMSR, &tmp))
3964                                         continue;
3965                                 if (!(tmp & BMSR_LSTATUS)) {
3966                                         udelay(40);
3967                                         break;
3968                                 }
3969                         }
3970                         tg3_writephy(tp, MII_BMCR, bmcr);
3971                         udelay(40);
3972                 }
3973         }
3974 }
3975
3976 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3977 {
3978         int err;
3979
3980         /* Turn off tap power management. */
3981         /* Set Extended packet length bit */
3982         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3983
3984         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3985         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3986         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3987         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3988         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3989
3990         udelay(40);
3991
3992         return err;
3993 }
3994
3995 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3996 {
3997         u32 advmsk, tgtadv, advertising;
3998
3999         advertising = tp->link_config.advertising;
4000         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4001
4002         advmsk = ADVERTISE_ALL;
4003         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4004                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4005                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4006         }
4007
4008         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4009                 return false;
4010
4011         if ((*lcladv & advmsk) != tgtadv)
4012                 return false;
4013
4014         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4015                 u32 tg3_ctrl;
4016
4017                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4018
4019                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4020                         return false;
4021
4022                 if (tgtadv &&
4023                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4024                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4025                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4026                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4027                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4028                 } else {
4029                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4030                 }
4031
4032                 if (tg3_ctrl != tgtadv)
4033                         return false;
4034         }
4035
4036         return true;
4037 }
4038
4039 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4040 {
4041         u32 lpeth = 0;
4042
4043         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4044                 u32 val;
4045
4046                 if (tg3_readphy(tp, MII_STAT1000, &val))
4047                         return false;
4048
4049                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4050         }
4051
4052         if (tg3_readphy(tp, MII_LPA, rmtadv))
4053                 return false;
4054
4055         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4056         tp->link_config.rmt_adv = lpeth;
4057
4058         return true;
4059 }
4060
4061 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4062 {
4063         int current_link_up;
4064         u32 bmsr, val;
4065         u32 lcl_adv, rmt_adv;
4066         u16 current_speed;
4067         u8 current_duplex;
4068         int i, err;
4069
4070         tw32(MAC_EVENT, 0);
4071
4072         tw32_f(MAC_STATUS,
4073              (MAC_STATUS_SYNC_CHANGED |
4074               MAC_STATUS_CFG_CHANGED |
4075               MAC_STATUS_MI_COMPLETION |
4076               MAC_STATUS_LNKSTATE_CHANGED));
4077         udelay(40);
4078
4079         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4080                 tw32_f(MAC_MI_MODE,
4081                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4082                 udelay(80);
4083         }
4084
4085         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4086
4087         /* Some third-party PHYs need to be reset on link going
4088          * down.
4089          */
4090         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4091              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4092              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4093             netif_carrier_ok(tp->dev)) {
4094                 tg3_readphy(tp, MII_BMSR, &bmsr);
4095                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4096                     !(bmsr & BMSR_LSTATUS))
4097                         force_reset = 1;
4098         }
4099         if (force_reset)
4100                 tg3_phy_reset(tp);
4101
4102         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4103                 tg3_readphy(tp, MII_BMSR, &bmsr);
4104                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4105                     !tg3_flag(tp, INIT_COMPLETE))
4106                         bmsr = 0;
4107
4108                 if (!(bmsr & BMSR_LSTATUS)) {
4109                         err = tg3_init_5401phy_dsp(tp);
4110                         if (err)
4111                                 return err;
4112
4113                         tg3_readphy(tp, MII_BMSR, &bmsr);
4114                         for (i = 0; i < 1000; i++) {
4115                                 udelay(10);
4116                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4117                                     (bmsr & BMSR_LSTATUS)) {
4118                                         udelay(40);
4119                                         break;
4120                                 }
4121                         }
4122
4123                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4124                             TG3_PHY_REV_BCM5401_B0 &&
4125                             !(bmsr & BMSR_LSTATUS) &&
4126                             tp->link_config.active_speed == SPEED_1000) {
4127                                 err = tg3_phy_reset(tp);
4128                                 if (!err)
4129                                         err = tg3_init_5401phy_dsp(tp);
4130                                 if (err)
4131                                         return err;
4132                         }
4133                 }
4134         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4135                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4136                 /* 5701 {A0,B0} CRC bug workaround */
4137                 tg3_writephy(tp, 0x15, 0x0a75);
4138                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4139                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4140                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4141         }
4142
4143         /* Clear pending interrupts... */
4144         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4145         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4146
4147         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4148                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4149         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4150                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4151
4152         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4153             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4154                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4155                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4156                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4157                 else
4158                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4159         }
4160
4161         current_link_up = 0;
4162         current_speed = SPEED_UNKNOWN;
4163         current_duplex = DUPLEX_UNKNOWN;
4164         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4165         tp->link_config.rmt_adv = 0;
4166
4167         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4168                 err = tg3_phy_auxctl_read(tp,
4169                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4170                                           &val);
4171                 if (!err && !(val & (1 << 10))) {
4172                         tg3_phy_auxctl_write(tp,
4173                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4174                                              val | (1 << 10));
4175                         goto relink;
4176                 }
4177         }
4178
4179         bmsr = 0;
4180         for (i = 0; i < 100; i++) {
4181                 tg3_readphy(tp, MII_BMSR, &bmsr);
4182                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4183                     (bmsr & BMSR_LSTATUS))
4184                         break;
4185                 udelay(40);
4186         }
4187
4188         if (bmsr & BMSR_LSTATUS) {
4189                 u32 aux_stat, bmcr;
4190
4191                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4192                 for (i = 0; i < 2000; i++) {
4193                         udelay(10);
4194                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4195                             aux_stat)
4196                                 break;
4197                 }
4198
4199                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4200                                              &current_speed,
4201                                              &current_duplex);
4202
4203                 bmcr = 0;
4204                 for (i = 0; i < 200; i++) {
4205                         tg3_readphy(tp, MII_BMCR, &bmcr);
4206                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4207                                 continue;
4208                         if (bmcr && bmcr != 0x7fff)
4209                                 break;
4210                         udelay(10);
4211                 }
4212
4213                 lcl_adv = 0;
4214                 rmt_adv = 0;
4215
4216                 tp->link_config.active_speed = current_speed;
4217                 tp->link_config.active_duplex = current_duplex;
4218
4219                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4220                         if ((bmcr & BMCR_ANENABLE) &&
4221                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4222                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4223                                 current_link_up = 1;
4224                 } else {
4225                         if (!(bmcr & BMCR_ANENABLE) &&
4226                             tp->link_config.speed == current_speed &&
4227                             tp->link_config.duplex == current_duplex &&
4228                             tp->link_config.flowctrl ==
4229                             tp->link_config.active_flowctrl) {
4230                                 current_link_up = 1;
4231                         }
4232                 }
4233
4234                 if (current_link_up == 1 &&
4235                     tp->link_config.active_duplex == DUPLEX_FULL) {
4236                         u32 reg, bit;
4237
4238                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4239                                 reg = MII_TG3_FET_GEN_STAT;
4240                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4241                         } else {
4242                                 reg = MII_TG3_EXT_STAT;
4243                                 bit = MII_TG3_EXT_STAT_MDIX;
4244                         }
4245
4246                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4247                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4248
4249                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4250                 }
4251         }
4252
4253 relink:
4254         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4255                 tg3_phy_copper_begin(tp);
4256
4257                 tg3_readphy(tp, MII_BMSR, &bmsr);
4258                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4259                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4260                         current_link_up = 1;
4261         }
4262
4263         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4264         if (current_link_up == 1) {
4265                 if (tp->link_config.active_speed == SPEED_100 ||
4266                     tp->link_config.active_speed == SPEED_10)
4267                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4268                 else
4269                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4270         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4271                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4272         else
4273                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4274
4275         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4276         if (tp->link_config.active_duplex == DUPLEX_HALF)
4277                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4278
4279         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4280                 if (current_link_up == 1 &&
4281                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4282                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4283                 else
4284                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4285         }
4286
4287         /* ??? Without this setting Netgear GA302T PHY does not
4288          * ??? send/receive packets...
4289          */
4290         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4291             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4292                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4293                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4294                 udelay(80);
4295         }
4296
4297         tw32_f(MAC_MODE, tp->mac_mode);
4298         udelay(40);
4299
4300         tg3_phy_eee_adjust(tp, current_link_up);
4301
4302         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4303                 /* Polled via timer. */
4304                 tw32_f(MAC_EVENT, 0);
4305         } else {
4306                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4307         }
4308         udelay(40);
4309
4310         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4311             current_link_up == 1 &&
4312             tp->link_config.active_speed == SPEED_1000 &&
4313             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4314                 udelay(120);
4315                 tw32_f(MAC_STATUS,
4316                      (MAC_STATUS_SYNC_CHANGED |
4317                       MAC_STATUS_CFG_CHANGED));
4318                 udelay(40);
4319                 tg3_write_mem(tp,
4320                               NIC_SRAM_FIRMWARE_MBOX,
4321                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4322         }
4323
4324         /* Prevent send BD corruption. */
4325         if (tg3_flag(tp, CLKREQ_BUG)) {
4326                 u16 oldlnkctl, newlnkctl;
4327
4328                 pci_read_config_word(tp->pdev,
4329                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4330                                      &oldlnkctl);
4331                 if (tp->link_config.active_speed == SPEED_100 ||
4332                     tp->link_config.active_speed == SPEED_10)
4333                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4334                 else
4335                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4336                 if (newlnkctl != oldlnkctl)
4337                         pci_write_config_word(tp->pdev,
4338                                               pci_pcie_cap(tp->pdev) +
4339                                               PCI_EXP_LNKCTL, newlnkctl);
4340         }
4341
4342         if (current_link_up != netif_carrier_ok(tp->dev)) {
4343                 if (current_link_up)
4344                         netif_carrier_on(tp->dev);
4345                 else
4346                         netif_carrier_off(tp->dev);
4347                 tg3_link_report(tp);
4348         }
4349
4350         return 0;
4351 }
4352
4353 struct tg3_fiber_aneginfo {
4354         int state;
4355 #define ANEG_STATE_UNKNOWN              0
4356 #define ANEG_STATE_AN_ENABLE            1
4357 #define ANEG_STATE_RESTART_INIT         2
4358 #define ANEG_STATE_RESTART              3
4359 #define ANEG_STATE_DISABLE_LINK_OK      4
4360 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4361 #define ANEG_STATE_ABILITY_DETECT       6
4362 #define ANEG_STATE_ACK_DETECT_INIT      7
4363 #define ANEG_STATE_ACK_DETECT           8
4364 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4365 #define ANEG_STATE_COMPLETE_ACK         10
4366 #define ANEG_STATE_IDLE_DETECT_INIT     11
4367 #define ANEG_STATE_IDLE_DETECT          12
4368 #define ANEG_STATE_LINK_OK              13
4369 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4370 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4371
4372         u32 flags;
4373 #define MR_AN_ENABLE            0x00000001
4374 #define MR_RESTART_AN           0x00000002
4375 #define MR_AN_COMPLETE          0x00000004
4376 #define MR_PAGE_RX              0x00000008
4377 #define MR_NP_LOADED            0x00000010
4378 #define MR_TOGGLE_TX            0x00000020
4379 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4380 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4381 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4382 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4383 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4384 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4385 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4386 #define MR_TOGGLE_RX            0x00002000
4387 #define MR_NP_RX                0x00004000
4388
4389 #define MR_LINK_OK              0x80000000
4390
4391         unsigned long link_time, cur_time;
4392
4393         u32 ability_match_cfg;
4394         int ability_match_count;
4395
4396         char ability_match, idle_match, ack_match;
4397
4398         u32 txconfig, rxconfig;
4399 #define ANEG_CFG_NP             0x00000080
4400 #define ANEG_CFG_ACK            0x00000040
4401 #define ANEG_CFG_RF2            0x00000020
4402 #define ANEG_CFG_RF1            0x00000010
4403 #define ANEG_CFG_PS2            0x00000001
4404 #define ANEG_CFG_PS1            0x00008000
4405 #define ANEG_CFG_HD             0x00004000
4406 #define ANEG_CFG_FD             0x00002000
4407 #define ANEG_CFG_INVAL          0x00001f06
4408
4409 };
4410 #define ANEG_OK         0
4411 #define ANEG_DONE       1
4412 #define ANEG_TIMER_ENAB 2
4413 #define ANEG_FAILED     -1
4414
4415 #define ANEG_STATE_SETTLE_TIME  10000
4416
4417 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4418                                    struct tg3_fiber_aneginfo *ap)
4419 {
4420         u16 flowctrl;
4421         unsigned long delta;
4422         u32 rx_cfg_reg;
4423         int ret;
4424
4425         if (ap->state == ANEG_STATE_UNKNOWN) {
4426                 ap->rxconfig = 0;
4427                 ap->link_time = 0;
4428                 ap->cur_time = 0;
4429                 ap->ability_match_cfg = 0;
4430                 ap->ability_match_count = 0;
4431                 ap->ability_match = 0;
4432                 ap->idle_match = 0;
4433                 ap->ack_match = 0;
4434         }
4435         ap->cur_time++;
4436
4437         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4438                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4439
4440                 if (rx_cfg_reg != ap->ability_match_cfg) {
4441                         ap->ability_match_cfg = rx_cfg_reg;
4442                         ap->ability_match = 0;
4443                         ap->ability_match_count = 0;
4444                 } else {
4445                         if (++ap->ability_match_count > 1) {
4446                                 ap->ability_match = 1;
4447                                 ap->ability_match_cfg = rx_cfg_reg;
4448                         }
4449                 }
4450                 if (rx_cfg_reg & ANEG_CFG_ACK)
4451                         ap->ack_match = 1;
4452                 else
4453                         ap->ack_match = 0;
4454
4455                 ap->idle_match = 0;
4456         } else {
4457                 ap->idle_match = 1;
4458                 ap->ability_match_cfg = 0;
4459                 ap->ability_match_count = 0;
4460                 ap->ability_match = 0;
4461                 ap->ack_match = 0;
4462
4463                 rx_cfg_reg = 0;
4464         }
4465
4466         ap->rxconfig = rx_cfg_reg;
4467         ret = ANEG_OK;
4468
4469         switch (ap->state) {
4470         case ANEG_STATE_UNKNOWN:
4471                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4472                         ap->state = ANEG_STATE_AN_ENABLE;
4473
4474                 /* fallthru */
4475         case ANEG_STATE_AN_ENABLE:
4476                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4477                 if (ap->flags & MR_AN_ENABLE) {
4478                         ap->link_time = 0;
4479                         ap->cur_time = 0;
4480                         ap->ability_match_cfg = 0;
4481                         ap->ability_match_count = 0;
4482                         ap->ability_match = 0;
4483                         ap->idle_match = 0;
4484                         ap->ack_match = 0;
4485
4486                         ap->state = ANEG_STATE_RESTART_INIT;
4487                 } else {
4488                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4489                 }
4490                 break;
4491
4492         case ANEG_STATE_RESTART_INIT:
4493                 ap->link_time = ap->cur_time;
4494                 ap->flags &= ~(MR_NP_LOADED);
4495                 ap->txconfig = 0;
4496                 tw32(MAC_TX_AUTO_NEG, 0);
4497                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4498                 tw32_f(MAC_MODE, tp->mac_mode);
4499                 udelay(40);
4500
4501                 ret = ANEG_TIMER_ENAB;
4502                 ap->state = ANEG_STATE_RESTART;
4503
4504                 /* fallthru */
4505         case ANEG_STATE_RESTART:
4506                 delta = ap->cur_time - ap->link_time;
4507                 if (delta > ANEG_STATE_SETTLE_TIME)
4508                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4509                 else
4510                         ret = ANEG_TIMER_ENAB;
4511                 break;
4512
4513         case ANEG_STATE_DISABLE_LINK_OK:
4514                 ret = ANEG_DONE;
4515                 break;
4516
4517         case ANEG_STATE_ABILITY_DETECT_INIT:
4518                 ap->flags &= ~(MR_TOGGLE_TX);
4519                 ap->txconfig = ANEG_CFG_FD;
4520                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4521                 if (flowctrl & ADVERTISE_1000XPAUSE)
4522                         ap->txconfig |= ANEG_CFG_PS1;
4523                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4524                         ap->txconfig |= ANEG_CFG_PS2;
4525                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4526                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4527                 tw32_f(MAC_MODE, tp->mac_mode);
4528                 udelay(40);
4529
4530                 ap->state = ANEG_STATE_ABILITY_DETECT;
4531                 break;
4532
4533         case ANEG_STATE_ABILITY_DETECT:
4534                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4535                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4536                 break;
4537
4538         case ANEG_STATE_ACK_DETECT_INIT:
4539                 ap->txconfig |= ANEG_CFG_ACK;
4540                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4541                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4542                 tw32_f(MAC_MODE, tp->mac_mode);
4543                 udelay(40);
4544
4545                 ap->state = ANEG_STATE_ACK_DETECT;
4546
4547                 /* fallthru */
4548         case ANEG_STATE_ACK_DETECT:
4549                 if (ap->ack_match != 0) {
4550                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4551                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4552                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4553                         } else {
4554                                 ap->state = ANEG_STATE_AN_ENABLE;
4555                         }
4556                 } else if (ap->ability_match != 0 &&
4557                            ap->rxconfig == 0) {
4558                         ap->state = ANEG_STATE_AN_ENABLE;
4559                 }
4560                 break;
4561
4562         case ANEG_STATE_COMPLETE_ACK_INIT:
4563                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4564                         ret = ANEG_FAILED;
4565                         break;
4566                 }
4567                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4568                                MR_LP_ADV_HALF_DUPLEX |
4569                                MR_LP_ADV_SYM_PAUSE |
4570                                MR_LP_ADV_ASYM_PAUSE |
4571                                MR_LP_ADV_REMOTE_FAULT1 |
4572                                MR_LP_ADV_REMOTE_FAULT2 |
4573                                MR_LP_ADV_NEXT_PAGE |
4574                                MR_TOGGLE_RX |
4575                                MR_NP_RX);
4576                 if (ap->rxconfig & ANEG_CFG_FD)
4577                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4578                 if (ap->rxconfig & ANEG_CFG_HD)
4579                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4580                 if (ap->rxconfig & ANEG_CFG_PS1)
4581                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4582                 if (ap->rxconfig & ANEG_CFG_PS2)
4583                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4584                 if (ap->rxconfig & ANEG_CFG_RF1)
4585                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4586                 if (ap->rxconfig & ANEG_CFG_RF2)
4587                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4588                 if (ap->rxconfig & ANEG_CFG_NP)
4589                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4590
4591                 ap->link_time = ap->cur_time;
4592
4593                 ap->flags ^= (MR_TOGGLE_TX);
4594                 if (ap->rxconfig & 0x0008)
4595                         ap->flags |= MR_TOGGLE_RX;
4596                 if (ap->rxconfig & ANEG_CFG_NP)
4597                         ap->flags |= MR_NP_RX;
4598                 ap->flags |= MR_PAGE_RX;
4599
4600                 ap->state = ANEG_STATE_COMPLETE_ACK;
4601                 ret = ANEG_TIMER_ENAB;
4602                 break;
4603
4604         case ANEG_STATE_COMPLETE_ACK:
4605                 if (ap->ability_match != 0 &&
4606                     ap->rxconfig == 0) {
4607                         ap->state = ANEG_STATE_AN_ENABLE;
4608                         break;
4609                 }
4610                 delta = ap->cur_time - ap->link_time;
4611                 if (delta > ANEG_STATE_SETTLE_TIME) {
4612                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4613                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4614                         } else {
4615                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4616                                     !(ap->flags & MR_NP_RX)) {
4617                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4618                                 } else {
4619                                         ret = ANEG_FAILED;
4620                                 }
4621                         }
4622                 }
4623                 break;
4624
4625         case ANEG_STATE_IDLE_DETECT_INIT:
4626                 ap->link_time = ap->cur_time;
4627                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4628                 tw32_f(MAC_MODE, tp->mac_mode);
4629                 udelay(40);
4630
4631                 ap->state = ANEG_STATE_IDLE_DETECT;
4632                 ret = ANEG_TIMER_ENAB;
4633                 break;
4634
4635         case ANEG_STATE_IDLE_DETECT:
4636                 if (ap->ability_match != 0 &&
4637                     ap->rxconfig == 0) {
4638                         ap->state = ANEG_STATE_AN_ENABLE;
4639                         break;
4640                 }
4641                 delta = ap->cur_time - ap->link_time;
4642                 if (delta > ANEG_STATE_SETTLE_TIME) {
4643                         /* XXX another gem from the Broadcom driver :( */
4644                         ap->state = ANEG_STATE_LINK_OK;
4645                 }
4646                 break;
4647
4648         case ANEG_STATE_LINK_OK:
4649                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4650                 ret = ANEG_DONE;
4651                 break;
4652
4653         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4654                 /* ??? unimplemented */
4655                 break;
4656
4657         case ANEG_STATE_NEXT_PAGE_WAIT:
4658                 /* ??? unimplemented */
4659                 break;
4660
4661         default:
4662                 ret = ANEG_FAILED;
4663                 break;
4664         }
4665
4666         return ret;
4667 }
4668
4669 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4670 {
4671         int res = 0;
4672         struct tg3_fiber_aneginfo aninfo;
4673         int status = ANEG_FAILED;
4674         unsigned int tick;
4675         u32 tmp;
4676
4677         tw32_f(MAC_TX_AUTO_NEG, 0);
4678
4679         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4680         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4681         udelay(40);
4682
4683         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4684         udelay(40);
4685
4686         memset(&aninfo, 0, sizeof(aninfo));
4687         aninfo.flags |= MR_AN_ENABLE;
4688         aninfo.state = ANEG_STATE_UNKNOWN;
4689         aninfo.cur_time = 0;
4690         tick = 0;
4691         while (++tick < 195000) {
4692                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4693                 if (status == ANEG_DONE || status == ANEG_FAILED)
4694                         break;
4695
4696                 udelay(1);
4697         }
4698
4699         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4700         tw32_f(MAC_MODE, tp->mac_mode);
4701         udelay(40);
4702
4703         *txflags = aninfo.txconfig;
4704         *rxflags = aninfo.flags;
4705
4706         if (status == ANEG_DONE &&
4707             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4708                              MR_LP_ADV_FULL_DUPLEX)))
4709                 res = 1;
4710
4711         return res;
4712 }
4713
4714 static void tg3_init_bcm8002(struct tg3 *tp)
4715 {
4716         u32 mac_status = tr32(MAC_STATUS);
4717         int i;
4718
4719         /* Reset when initting first time or we have a link. */
4720         if (tg3_flag(tp, INIT_COMPLETE) &&
4721             !(mac_status & MAC_STATUS_PCS_SYNCED))
4722                 return;
4723
4724         /* Set PLL lock range. */
4725         tg3_writephy(tp, 0x16, 0x8007);
4726
4727         /* SW reset */
4728         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4729
4730         /* Wait for reset to complete. */
4731         /* XXX schedule_timeout() ... */
4732         for (i = 0; i < 500; i++)
4733                 udelay(10);
4734
4735         /* Config mode; select PMA/Ch 1 regs. */
4736         tg3_writephy(tp, 0x10, 0x8411);
4737
4738         /* Enable auto-lock and comdet, select txclk for tx. */
4739         tg3_writephy(tp, 0x11, 0x0a10);
4740
4741         tg3_writephy(tp, 0x18, 0x00a0);
4742         tg3_writephy(tp, 0x16, 0x41ff);
4743
4744         /* Assert and deassert POR. */
4745         tg3_writephy(tp, 0x13, 0x0400);
4746         udelay(40);
4747         tg3_writephy(tp, 0x13, 0x0000);
4748
4749         tg3_writephy(tp, 0x11, 0x0a50);
4750         udelay(40);
4751         tg3_writephy(tp, 0x11, 0x0a10);
4752
4753         /* Wait for signal to stabilize */
4754         /* XXX schedule_timeout() ... */
4755         for (i = 0; i < 15000; i++)
4756                 udelay(10);
4757
4758         /* Deselect the channel register so we can read the PHYID
4759          * later.
4760          */
4761         tg3_writephy(tp, 0x10, 0x8011);
4762 }
4763
4764 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4765 {
4766         u16 flowctrl;
4767         u32 sg_dig_ctrl, sg_dig_status;
4768         u32 serdes_cfg, expected_sg_dig_ctrl;
4769         int workaround, port_a;
4770         int current_link_up;
4771
4772         serdes_cfg = 0;
4773         expected_sg_dig_ctrl = 0;
4774         workaround = 0;
4775         port_a = 1;
4776         current_link_up = 0;
4777
4778         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4779             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4780                 workaround = 1;
4781                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4782                         port_a = 0;
4783
4784                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4785                 /* preserve bits 20-23 for voltage regulator */
4786                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4787         }
4788
4789         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4790
4791         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4792                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4793                         if (workaround) {
4794                                 u32 val = serdes_cfg;
4795
4796                                 if (port_a)
4797                                         val |= 0xc010000;
4798                                 else
4799                                         val |= 0x4010000;
4800                                 tw32_f(MAC_SERDES_CFG, val);
4801                         }
4802
4803                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4804                 }
4805                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4806                         tg3_setup_flow_control(tp, 0, 0);
4807                         current_link_up = 1;
4808                 }
4809                 goto out;
4810         }
4811
4812         /* Want auto-negotiation.  */
4813         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4814
4815         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4816         if (flowctrl & ADVERTISE_1000XPAUSE)
4817                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4818         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4819                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4820
4821         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4822                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4823                     tp->serdes_counter &&
4824                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4825                                     MAC_STATUS_RCVD_CFG)) ==
4826                      MAC_STATUS_PCS_SYNCED)) {
4827                         tp->serdes_counter--;
4828                         current_link_up = 1;
4829                         goto out;
4830                 }
4831 restart_autoneg:
4832                 if (workaround)
4833                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4834                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4835                 udelay(5);
4836                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4837
4838                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4839                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4840         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4841                                  MAC_STATUS_SIGNAL_DET)) {
4842                 sg_dig_status = tr32(SG_DIG_STATUS);
4843                 mac_status = tr32(MAC_STATUS);
4844
4845                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4846                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4847                         u32 local_adv = 0, remote_adv = 0;
4848
4849                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4850                                 local_adv |= ADVERTISE_1000XPAUSE;
4851                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4852                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4853
4854                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4855                                 remote_adv |= LPA_1000XPAUSE;
4856                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4857                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4858
4859                         tp->link_config.rmt_adv =
4860                                            mii_adv_to_ethtool_adv_x(remote_adv);
4861
4862                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4863                         current_link_up = 1;
4864                         tp->serdes_counter = 0;
4865                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4866                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4867                         if (tp->serdes_counter)
4868                                 tp->serdes_counter--;
4869                         else {
4870                                 if (workaround) {
4871                                         u32 val = serdes_cfg;
4872
4873                                         if (port_a)
4874                                                 val |= 0xc010000;
4875                                         else
4876                                                 val |= 0x4010000;
4877
4878                                         tw32_f(MAC_SERDES_CFG, val);
4879                                 }
4880
4881                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4882                                 udelay(40);
4883
4884                                 /* Link parallel detection - link is up */
4885                                 /* only if we have PCS_SYNC and not */
4886                                 /* receiving config code words */
4887                                 mac_status = tr32(MAC_STATUS);
4888                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4889                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4890                                         tg3_setup_flow_control(tp, 0, 0);
4891                                         current_link_up = 1;
4892                                         tp->phy_flags |=
4893                                                 TG3_PHYFLG_PARALLEL_DETECT;
4894                                         tp->serdes_counter =
4895                                                 SERDES_PARALLEL_DET_TIMEOUT;
4896                                 } else
4897                                         goto restart_autoneg;
4898                         }
4899                 }
4900         } else {
4901                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4902                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4903         }
4904
4905 out:
4906         return current_link_up;
4907 }
4908
4909 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4910 {
4911         int current_link_up = 0;
4912
4913         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4914                 goto out;
4915
4916         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4917                 u32 txflags, rxflags;
4918                 int i;
4919
4920                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4921                         u32 local_adv = 0, remote_adv = 0;
4922
4923                         if (txflags & ANEG_CFG_PS1)
4924                                 local_adv |= ADVERTISE_1000XPAUSE;
4925                         if (txflags & ANEG_CFG_PS2)
4926                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4927
4928                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4929                                 remote_adv |= LPA_1000XPAUSE;
4930                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4931                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4932
4933                         tp->link_config.rmt_adv =
4934                                            mii_adv_to_ethtool_adv_x(remote_adv);
4935
4936                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4937
4938                         current_link_up = 1;
4939                 }
4940                 for (i = 0; i < 30; i++) {
4941                         udelay(20);
4942                         tw32_f(MAC_STATUS,
4943                                (MAC_STATUS_SYNC_CHANGED |
4944                                 MAC_STATUS_CFG_CHANGED));
4945                         udelay(40);
4946                         if ((tr32(MAC_STATUS) &
4947                              (MAC_STATUS_SYNC_CHANGED |
4948                               MAC_STATUS_CFG_CHANGED)) == 0)
4949                                 break;
4950                 }
4951
4952                 mac_status = tr32(MAC_STATUS);
4953                 if (current_link_up == 0 &&
4954                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4955                     !(mac_status & MAC_STATUS_RCVD_CFG))
4956                         current_link_up = 1;
4957         } else {
4958                 tg3_setup_flow_control(tp, 0, 0);
4959
4960                 /* Forcing 1000FD link up. */
4961                 current_link_up = 1;
4962
4963                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4964                 udelay(40);
4965
4966                 tw32_f(MAC_MODE, tp->mac_mode);
4967                 udelay(40);
4968         }
4969
4970 out:
4971         return current_link_up;
4972 }
4973
4974 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4975 {
4976         u32 orig_pause_cfg;
4977         u16 orig_active_speed;
4978         u8 orig_active_duplex;
4979         u32 mac_status;
4980         int current_link_up;
4981         int i;
4982
4983         orig_pause_cfg = tp->link_config.active_flowctrl;
4984         orig_active_speed = tp->link_config.active_speed;
4985         orig_active_duplex = tp->link_config.active_duplex;
4986
4987         if (!tg3_flag(tp, HW_AUTONEG) &&
4988             netif_carrier_ok(tp->dev) &&
4989             tg3_flag(tp, INIT_COMPLETE)) {
4990                 mac_status = tr32(MAC_STATUS);
4991                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4992                                MAC_STATUS_SIGNAL_DET |
4993                                MAC_STATUS_CFG_CHANGED |
4994                                MAC_STATUS_RCVD_CFG);
4995                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4996                                    MAC_STATUS_SIGNAL_DET)) {
4997                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4998                                             MAC_STATUS_CFG_CHANGED));
4999                         return 0;
5000                 }
5001         }
5002
5003         tw32_f(MAC_TX_AUTO_NEG, 0);
5004
5005         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5006         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5007         tw32_f(MAC_MODE, tp->mac_mode);
5008         udelay(40);
5009
5010         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5011                 tg3_init_bcm8002(tp);
5012
5013         /* Enable link change event even when serdes polling.  */
5014         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5015         udelay(40);
5016
5017         current_link_up = 0;
5018         tp->link_config.rmt_adv = 0;
5019         mac_status = tr32(MAC_STATUS);
5020
5021         if (tg3_flag(tp, HW_AUTONEG))
5022                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5023         else
5024                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5025
5026         tp->napi[0].hw_status->status =
5027                 (SD_STATUS_UPDATED |
5028                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5029
5030         for (i = 0; i < 100; i++) {
5031                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5032                                     MAC_STATUS_CFG_CHANGED));
5033                 udelay(5);
5034                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5035                                          MAC_STATUS_CFG_CHANGED |
5036                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5037                         break;
5038         }
5039
5040         mac_status = tr32(MAC_STATUS);
5041         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5042                 current_link_up = 0;
5043                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5044                     tp->serdes_counter == 0) {
5045                         tw32_f(MAC_MODE, (tp->mac_mode |
5046                                           MAC_MODE_SEND_CONFIGS));
5047                         udelay(1);
5048                         tw32_f(MAC_MODE, tp->mac_mode);
5049                 }
5050         }
5051
5052         if (current_link_up == 1) {
5053                 tp->link_config.active_speed = SPEED_1000;
5054                 tp->link_config.active_duplex = DUPLEX_FULL;
5055                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5056                                     LED_CTRL_LNKLED_OVERRIDE |
5057                                     LED_CTRL_1000MBPS_ON));
5058         } else {
5059                 tp->link_config.active_speed = SPEED_UNKNOWN;
5060                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5061                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5062                                     LED_CTRL_LNKLED_OVERRIDE |
5063                                     LED_CTRL_TRAFFIC_OVERRIDE));
5064         }
5065
5066         if (current_link_up != netif_carrier_ok(tp->dev)) {
5067                 if (current_link_up)
5068                         netif_carrier_on(tp->dev);
5069                 else
5070                         netif_carrier_off(tp->dev);
5071                 tg3_link_report(tp);
5072         } else {
5073                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5074                 if (orig_pause_cfg != now_pause_cfg ||
5075                     orig_active_speed != tp->link_config.active_speed ||
5076                     orig_active_duplex != tp->link_config.active_duplex)
5077                         tg3_link_report(tp);
5078         }
5079
5080         return 0;
5081 }
5082
5083 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5084 {
5085         int current_link_up, err = 0;
5086         u32 bmsr, bmcr;
5087         u16 current_speed;
5088         u8 current_duplex;
5089         u32 local_adv, remote_adv;
5090
5091         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5092         tw32_f(MAC_MODE, tp->mac_mode);
5093         udelay(40);
5094
5095         tw32(MAC_EVENT, 0);
5096
5097         tw32_f(MAC_STATUS,
5098              (MAC_STATUS_SYNC_CHANGED |
5099               MAC_STATUS_CFG_CHANGED |
5100               MAC_STATUS_MI_COMPLETION |
5101               MAC_STATUS_LNKSTATE_CHANGED));
5102         udelay(40);
5103
5104         if (force_reset)
5105                 tg3_phy_reset(tp);
5106
5107         current_link_up = 0;
5108         current_speed = SPEED_UNKNOWN;
5109         current_duplex = DUPLEX_UNKNOWN;
5110         tp->link_config.rmt_adv = 0;
5111
5112         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5113         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5114         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5115                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5116                         bmsr |= BMSR_LSTATUS;
5117                 else
5118                         bmsr &= ~BMSR_LSTATUS;
5119         }
5120
5121         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5122
5123         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5124             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5125                 /* do nothing, just check for link up at the end */
5126         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5127                 u32 adv, newadv;
5128
5129                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5130                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5131                                  ADVERTISE_1000XPAUSE |
5132                                  ADVERTISE_1000XPSE_ASYM |
5133                                  ADVERTISE_SLCT);
5134
5135                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5136                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5137
5138                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5139                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5140                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5141                         tg3_writephy(tp, MII_BMCR, bmcr);
5142
5143                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5144                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5145                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5146
5147                         return err;
5148                 }
5149         } else {
5150                 u32 new_bmcr;
5151
5152                 bmcr &= ~BMCR_SPEED1000;
5153                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5154
5155                 if (tp->link_config.duplex == DUPLEX_FULL)
5156                         new_bmcr |= BMCR_FULLDPLX;
5157
5158                 if (new_bmcr != bmcr) {
5159                         /* BMCR_SPEED1000 is a reserved bit that needs
5160                          * to be set on write.
5161                          */
5162                         new_bmcr |= BMCR_SPEED1000;
5163
5164                         /* Force a linkdown */
5165                         if (netif_carrier_ok(tp->dev)) {
5166                                 u32 adv;
5167
5168                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5169                                 adv &= ~(ADVERTISE_1000XFULL |
5170                                          ADVERTISE_1000XHALF |
5171                                          ADVERTISE_SLCT);
5172                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5173                                 tg3_writephy(tp, MII_BMCR, bmcr |
5174                                                            BMCR_ANRESTART |
5175                                                            BMCR_ANENABLE);
5176                                 udelay(10);
5177                                 netif_carrier_off(tp->dev);
5178                         }
5179                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5180                         bmcr = new_bmcr;
5181                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5182                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5183                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5184                             ASIC_REV_5714) {
5185                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5186                                         bmsr |= BMSR_LSTATUS;
5187                                 else
5188                                         bmsr &= ~BMSR_LSTATUS;
5189                         }
5190                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5191                 }
5192         }
5193
5194         if (bmsr & BMSR_LSTATUS) {
5195                 current_speed = SPEED_1000;
5196                 current_link_up = 1;
5197                 if (bmcr & BMCR_FULLDPLX)
5198                         current_duplex = DUPLEX_FULL;
5199                 else
5200                         current_duplex = DUPLEX_HALF;
5201
5202                 local_adv = 0;
5203                 remote_adv = 0;
5204
5205                 if (bmcr & BMCR_ANENABLE) {
5206                         u32 common;
5207
5208                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5209                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5210                         common = local_adv & remote_adv;
5211                         if (common & (ADVERTISE_1000XHALF |
5212                                       ADVERTISE_1000XFULL)) {
5213                                 if (common & ADVERTISE_1000XFULL)
5214                                         current_duplex = DUPLEX_FULL;
5215                                 else
5216                                         current_duplex = DUPLEX_HALF;
5217
5218                                 tp->link_config.rmt_adv =
5219                                            mii_adv_to_ethtool_adv_x(remote_adv);
5220                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5221                                 /* Link is up via parallel detect */
5222                         } else {
5223                                 current_link_up = 0;
5224                         }
5225                 }
5226         }
5227
5228         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5229                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5230
5231         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5232         if (tp->link_config.active_duplex == DUPLEX_HALF)
5233                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5234
5235         tw32_f(MAC_MODE, tp->mac_mode);
5236         udelay(40);
5237
5238         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5239
5240         tp->link_config.active_speed = current_speed;
5241         tp->link_config.active_duplex = current_duplex;
5242
5243         if (current_link_up != netif_carrier_ok(tp->dev)) {
5244                 if (current_link_up)
5245                         netif_carrier_on(tp->dev);
5246                 else {
5247                         netif_carrier_off(tp->dev);
5248                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5249                 }
5250                 tg3_link_report(tp);
5251         }
5252         return err;
5253 }
5254
5255 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5256 {
5257         if (tp->serdes_counter) {
5258                 /* Give autoneg time to complete. */
5259                 tp->serdes_counter--;
5260                 return;
5261         }
5262
5263         if (!netif_carrier_ok(tp->dev) &&
5264             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5265                 u32 bmcr;
5266
5267                 tg3_readphy(tp, MII_BMCR, &bmcr);
5268                 if (bmcr & BMCR_ANENABLE) {
5269                         u32 phy1, phy2;
5270
5271                         /* Select shadow register 0x1f */
5272                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5273                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5274
5275                         /* Select expansion interrupt status register */
5276                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5277                                          MII_TG3_DSP_EXP1_INT_STAT);
5278                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5279                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5280
5281                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5282                                 /* We have signal detect and not receiving
5283                                  * config code words, link is up by parallel
5284                                  * detection.
5285                                  */
5286
5287                                 bmcr &= ~BMCR_ANENABLE;
5288                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5289                                 tg3_writephy(tp, MII_BMCR, bmcr);
5290                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5291                         }
5292                 }
5293         } else if (netif_carrier_ok(tp->dev) &&
5294                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5295                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5296                 u32 phy2;
5297
5298                 /* Select expansion interrupt status register */
5299                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5300                                  MII_TG3_DSP_EXP1_INT_STAT);
5301                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5302                 if (phy2 & 0x20) {
5303                         u32 bmcr;
5304
5305                         /* Config code words received, turn on autoneg. */
5306                         tg3_readphy(tp, MII_BMCR, &bmcr);
5307                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5308
5309                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5310
5311                 }
5312         }
5313 }
5314
5315 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5316 {
5317         u32 val;
5318         int err;
5319
5320         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5321                 err = tg3_setup_fiber_phy(tp, force_reset);
5322         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5323                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5324         else
5325                 err = tg3_setup_copper_phy(tp, force_reset);
5326
5327         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5328                 u32 scale;
5329
5330                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5331                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5332                         scale = 65;
5333                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5334                         scale = 6;
5335                 else
5336                         scale = 12;
5337
5338                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5339                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5340                 tw32(GRC_MISC_CFG, val);
5341         }
5342
5343         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5344               (6 << TX_LENGTHS_IPG_SHIFT);
5345         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5346                 val |= tr32(MAC_TX_LENGTHS) &
5347                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5348                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5349
5350         if (tp->link_config.active_speed == SPEED_1000 &&
5351             tp->link_config.active_duplex == DUPLEX_HALF)
5352                 tw32(MAC_TX_LENGTHS, val |
5353                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5354         else
5355                 tw32(MAC_TX_LENGTHS, val |
5356                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5357
5358         if (!tg3_flag(tp, 5705_PLUS)) {
5359                 if (netif_carrier_ok(tp->dev)) {
5360                         tw32(HOSTCC_STAT_COAL_TICKS,
5361                              tp->coal.stats_block_coalesce_usecs);
5362                 } else {
5363                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5364                 }
5365         }
5366
5367         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5368                 val = tr32(PCIE_PWR_MGMT_THRESH);
5369                 if (!netif_carrier_ok(tp->dev))
5370                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5371                               tp->pwrmgmt_thresh;
5372                 else
5373                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5374                 tw32(PCIE_PWR_MGMT_THRESH, val);
5375         }
5376
5377         return err;
5378 }
5379
5380 static inline int tg3_irq_sync(struct tg3 *tp)
5381 {
5382         return tp->irq_sync;
5383 }
5384
5385 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5386 {
5387         int i;
5388
5389         dst = (u32 *)((u8 *)dst + off);
5390         for (i = 0; i < len; i += sizeof(u32))
5391                 *dst++ = tr32(off + i);
5392 }
5393
5394 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5395 {
5396         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5397         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5398         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5399         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5400         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5401         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5402         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5403         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5404         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5405         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5406         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5407         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5408         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5409         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5410         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5411         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5412         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5413         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5414         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5415
5416         if (tg3_flag(tp, SUPPORT_MSIX))
5417                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5418
5419         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5420         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5421         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5422         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5423         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5424         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5425         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5426         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5427
5428         if (!tg3_flag(tp, 5705_PLUS)) {
5429                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5430                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5431                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5432         }
5433
5434         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5435         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5436         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5437         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5438         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5439
5440         if (tg3_flag(tp, NVRAM))
5441                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5442 }
5443
5444 static void tg3_dump_state(struct tg3 *tp)
5445 {
5446         int i;
5447         u32 *regs;
5448
5449         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5450         if (!regs) {
5451                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5452                 return;
5453         }
5454
5455         if (tg3_flag(tp, PCI_EXPRESS)) {
5456                 /* Read up to but not including private PCI registers */
5457                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5458                         regs[i / sizeof(u32)] = tr32(i);
5459         } else
5460                 tg3_dump_legacy_regs(tp, regs);
5461
5462         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5463                 if (!regs[i + 0] && !regs[i + 1] &&
5464                     !regs[i + 2] && !regs[i + 3])
5465                         continue;
5466
5467                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5468                            i * 4,
5469                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5470         }
5471
5472         kfree(regs);
5473
5474         for (i = 0; i < tp->irq_cnt; i++) {
5475                 struct tg3_napi *tnapi = &tp->napi[i];
5476
5477                 /* SW status block */
5478                 netdev_err(tp->dev,
5479                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5480                            i,
5481                            tnapi->hw_status->status,
5482                            tnapi->hw_status->status_tag,
5483                            tnapi->hw_status->rx_jumbo_consumer,
5484                            tnapi->hw_status->rx_consumer,
5485                            tnapi->hw_status->rx_mini_consumer,
5486                            tnapi->hw_status->idx[0].rx_producer,
5487                            tnapi->hw_status->idx[0].tx_consumer);
5488
5489                 netdev_err(tp->dev,
5490                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5491                            i,
5492                            tnapi->last_tag, tnapi->last_irq_tag,
5493                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5494                            tnapi->rx_rcb_ptr,
5495                            tnapi->prodring.rx_std_prod_idx,
5496                            tnapi->prodring.rx_std_cons_idx,
5497                            tnapi->prodring.rx_jmb_prod_idx,
5498                            tnapi->prodring.rx_jmb_cons_idx);
5499         }
5500 }
5501
5502 /* This is called whenever we suspect that the system chipset is re-
5503  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5504  * is bogus tx completions. We try to recover by setting the
5505  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5506  * in the workqueue.
5507  */
5508 static void tg3_tx_recover(struct tg3 *tp)
5509 {
5510         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5511                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5512
5513         netdev_warn(tp->dev,
5514                     "The system may be re-ordering memory-mapped I/O "
5515                     "cycles to the network device, attempting to recover. "
5516                     "Please report the problem to the driver maintainer "
5517                     "and include system chipset information.\n");
5518
5519         spin_lock(&tp->lock);
5520         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5521         spin_unlock(&tp->lock);
5522 }
5523
5524 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5525 {
5526         /* Tell compiler to fetch tx indices from memory. */
5527         barrier();
5528         return tnapi->tx_pending -
5529                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5530 }
5531
5532 /* Tigon3 never reports partial packet sends.  So we do not
5533  * need special logic to handle SKBs that have not had all
5534  * of their frags sent yet, like SunGEM does.
5535  */
5536 static void tg3_tx(struct tg3_napi *tnapi)
5537 {
5538         struct tg3 *tp = tnapi->tp;
5539         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5540         u32 sw_idx = tnapi->tx_cons;
5541         struct netdev_queue *txq;
5542         int index = tnapi - tp->napi;
5543         unsigned int pkts_compl = 0, bytes_compl = 0;
5544
5545         if (tg3_flag(tp, ENABLE_TSS))
5546                 index--;
5547
5548         txq = netdev_get_tx_queue(tp->dev, index);
5549
5550         while (sw_idx != hw_idx) {
5551                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5552                 struct sk_buff *skb = ri->skb;
5553                 int i, tx_bug = 0;
5554
5555                 if (unlikely(skb == NULL)) {
5556                         tg3_tx_recover(tp);
5557                         return;
5558                 }
5559
5560                 pci_unmap_single(tp->pdev,
5561                                  dma_unmap_addr(ri, mapping),
5562                                  skb_headlen(skb),
5563                                  PCI_DMA_TODEVICE);
5564
5565                 ri->skb = NULL;
5566
5567                 while (ri->fragmented) {
5568                         ri->fragmented = false;
5569                         sw_idx = NEXT_TX(sw_idx);
5570                         ri = &tnapi->tx_buffers[sw_idx];
5571                 }
5572
5573                 sw_idx = NEXT_TX(sw_idx);
5574
5575                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5576                         ri = &tnapi->tx_buffers[sw_idx];
5577                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5578                                 tx_bug = 1;
5579
5580                         pci_unmap_page(tp->pdev,
5581                                        dma_unmap_addr(ri, mapping),
5582                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5583                                        PCI_DMA_TODEVICE);
5584
5585                         while (ri->fragmented) {
5586                                 ri->fragmented = false;
5587                                 sw_idx = NEXT_TX(sw_idx);
5588                                 ri = &tnapi->tx_buffers[sw_idx];
5589                         }
5590
5591                         sw_idx = NEXT_TX(sw_idx);
5592                 }
5593
5594                 pkts_compl++;
5595                 bytes_compl += skb->len;
5596
5597                 dev_kfree_skb(skb);
5598
5599                 if (unlikely(tx_bug)) {
5600                         tg3_tx_recover(tp);
5601                         return;
5602                 }
5603         }
5604
5605         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5606
5607         tnapi->tx_cons = sw_idx;
5608
5609         /* Need to make the tx_cons update visible to tg3_start_xmit()
5610          * before checking for netif_queue_stopped().  Without the
5611          * memory barrier, there is a small possibility that tg3_start_xmit()
5612          * will miss it and cause the queue to be stopped forever.
5613          */
5614         smp_mb();
5615
5616         if (unlikely(netif_tx_queue_stopped(txq) &&
5617                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5618                 __netif_tx_lock(txq, smp_processor_id());
5619                 if (netif_tx_queue_stopped(txq) &&
5620                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5621                         netif_tx_wake_queue(txq);
5622                 __netif_tx_unlock(txq);
5623         }
5624 }
5625
5626 static void tg3_frag_free(bool is_frag, void *data)
5627 {
5628         if (is_frag)
5629                 put_page(virt_to_head_page(data));
5630         else
5631                 kfree(data);
5632 }
5633
5634 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5635 {
5636         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5637                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5638
5639         if (!ri->data)
5640                 return;
5641
5642         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5643                          map_sz, PCI_DMA_FROMDEVICE);
5644         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5645         ri->data = NULL;
5646 }
5647
5648
5649 /* Returns size of skb allocated or < 0 on error.
5650  *
5651  * We only need to fill in the address because the other members
5652  * of the RX descriptor are invariant, see tg3_init_rings.
5653  *
5654  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5655  * posting buffers we only dirty the first cache line of the RX
5656  * descriptor (containing the address).  Whereas for the RX status
5657  * buffers the cpu only reads the last cacheline of the RX descriptor
5658  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5659  */
5660 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5661                              u32 opaque_key, u32 dest_idx_unmasked,
5662                              unsigned int *frag_size)
5663 {
5664         struct tg3_rx_buffer_desc *desc;
5665         struct ring_info *map;
5666         u8 *data;
5667         dma_addr_t mapping;
5668         int skb_size, data_size, dest_idx;
5669
5670         switch (opaque_key) {
5671         case RXD_OPAQUE_RING_STD:
5672                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5673                 desc = &tpr->rx_std[dest_idx];
5674                 map = &tpr->rx_std_buffers[dest_idx];
5675                 data_size = tp->rx_pkt_map_sz;
5676                 break;
5677
5678         case RXD_OPAQUE_RING_JUMBO:
5679                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5680                 desc = &tpr->rx_jmb[dest_idx].std;
5681                 map = &tpr->rx_jmb_buffers[dest_idx];
5682                 data_size = TG3_RX_JMB_MAP_SZ;
5683                 break;
5684
5685         default:
5686                 return -EINVAL;
5687         }
5688
5689         /* Do not overwrite any of the map or rp information
5690          * until we are sure we can commit to a new buffer.
5691          *
5692          * Callers depend upon this behavior and assume that
5693          * we leave everything unchanged if we fail.
5694          */
5695         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5696                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5697         if (skb_size <= PAGE_SIZE) {
5698                 data = netdev_alloc_frag(skb_size);
5699                 *frag_size = skb_size;
5700         } else {
5701                 data = kmalloc(skb_size, GFP_ATOMIC);
5702                 *frag_size = 0;
5703         }
5704         if (!data)
5705                 return -ENOMEM;
5706
5707         mapping = pci_map_single(tp->pdev,
5708                                  data + TG3_RX_OFFSET(tp),
5709                                  data_size,
5710                                  PCI_DMA_FROMDEVICE);
5711         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5712                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5713                 return -EIO;
5714         }
5715
5716         map->data = data;
5717         dma_unmap_addr_set(map, mapping, mapping);
5718
5719         desc->addr_hi = ((u64)mapping >> 32);
5720         desc->addr_lo = ((u64)mapping & 0xffffffff);
5721
5722         return data_size;
5723 }
5724
5725 /* We only need to move over in the address because the other
5726  * members of the RX descriptor are invariant.  See notes above
5727  * tg3_alloc_rx_data for full details.
5728  */
5729 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5730                            struct tg3_rx_prodring_set *dpr,
5731                            u32 opaque_key, int src_idx,
5732                            u32 dest_idx_unmasked)
5733 {
5734         struct tg3 *tp = tnapi->tp;
5735         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5736         struct ring_info *src_map, *dest_map;
5737         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5738         int dest_idx;
5739
5740         switch (opaque_key) {
5741         case RXD_OPAQUE_RING_STD:
5742                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5743                 dest_desc = &dpr->rx_std[dest_idx];
5744                 dest_map = &dpr->rx_std_buffers[dest_idx];
5745                 src_desc = &spr->rx_std[src_idx];
5746                 src_map = &spr->rx_std_buffers[src_idx];
5747                 break;
5748
5749         case RXD_OPAQUE_RING_JUMBO:
5750                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5751                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5752                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5753                 src_desc = &spr->rx_jmb[src_idx].std;
5754                 src_map = &spr->rx_jmb_buffers[src_idx];
5755                 break;
5756
5757         default:
5758                 return;
5759         }
5760
5761         dest_map->data = src_map->data;
5762         dma_unmap_addr_set(dest_map, mapping,
5763                            dma_unmap_addr(src_map, mapping));
5764         dest_desc->addr_hi = src_desc->addr_hi;
5765         dest_desc->addr_lo = src_desc->addr_lo;
5766
5767         /* Ensure that the update to the skb happens after the physical
5768          * addresses have been transferred to the new BD location.
5769          */
5770         smp_wmb();
5771
5772         src_map->data = NULL;
5773 }
5774
5775 /* The RX ring scheme is composed of multiple rings which post fresh
5776  * buffers to the chip, and one special ring the chip uses to report
5777  * status back to the host.
5778  *
5779  * The special ring reports the status of received packets to the
5780  * host.  The chip does not write into the original descriptor the
5781  * RX buffer was obtained from.  The chip simply takes the original
5782  * descriptor as provided by the host, updates the status and length
5783  * field, then writes this into the next status ring entry.
5784  *
5785  * Each ring the host uses to post buffers to the chip is described
5786  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5787  * it is first placed into the on-chip ram.  When the packet's length
5788  * is known, it walks down the TG3_BDINFO entries to select the ring.
5789  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5790  * which is within the range of the new packet's length is chosen.
5791  *
5792  * The "separate ring for rx status" scheme may sound queer, but it makes
5793  * sense from a cache coherency perspective.  If only the host writes
5794  * to the buffer post rings, and only the chip writes to the rx status
5795  * rings, then cache lines never move beyond shared-modified state.
5796  * If both the host and chip were to write into the same ring, cache line
5797  * eviction could occur since both entities want it in an exclusive state.
5798  */
5799 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5800 {
5801         struct tg3 *tp = tnapi->tp;
5802         u32 work_mask, rx_std_posted = 0;
5803         u32 std_prod_idx, jmb_prod_idx;
5804         u32 sw_idx = tnapi->rx_rcb_ptr;
5805         u16 hw_idx;
5806         int received;
5807         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5808
5809         hw_idx = *(tnapi->rx_rcb_prod_idx);
5810         /*
5811          * We need to order the read of hw_idx and the read of
5812          * the opaque cookie.
5813          */
5814         rmb();
5815         work_mask = 0;
5816         received = 0;
5817         std_prod_idx = tpr->rx_std_prod_idx;
5818         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5819         while (sw_idx != hw_idx && budget > 0) {
5820                 struct ring_info *ri;
5821                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5822                 unsigned int len;
5823                 struct sk_buff *skb;
5824                 dma_addr_t dma_addr;
5825                 u32 opaque_key, desc_idx, *post_ptr;
5826                 u8 *data;
5827
5828                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5829                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5830                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5831                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5832                         dma_addr = dma_unmap_addr(ri, mapping);
5833                         data = ri->data;
5834                         post_ptr = &std_prod_idx;
5835                         rx_std_posted++;
5836                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5837                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5838                         dma_addr = dma_unmap_addr(ri, mapping);
5839                         data = ri->data;
5840                         post_ptr = &jmb_prod_idx;
5841                 } else
5842                         goto next_pkt_nopost;
5843
5844                 work_mask |= opaque_key;
5845
5846                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5847                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5848                 drop_it:
5849                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5850                                        desc_idx, *post_ptr);
5851                 drop_it_no_recycle:
5852                         /* Other statistics kept track of by card. */
5853                         tp->rx_dropped++;
5854                         goto next_pkt;
5855                 }
5856
5857                 prefetch(data + TG3_RX_OFFSET(tp));
5858                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5859                       ETH_FCS_LEN;
5860
5861                 if (len > TG3_RX_COPY_THRESH(tp)) {
5862                         int skb_size;
5863                         unsigned int frag_size;
5864
5865                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5866                                                     *post_ptr, &frag_size);
5867                         if (skb_size < 0)
5868                                 goto drop_it;
5869
5870                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5871                                          PCI_DMA_FROMDEVICE);
5872
5873                         skb = build_skb(data, frag_size);
5874                         if (!skb) {
5875                                 tg3_frag_free(frag_size != 0, data);
5876                                 goto drop_it_no_recycle;
5877                         }
5878                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5879                         /* Ensure that the update to the data happens
5880                          * after the usage of the old DMA mapping.
5881                          */
5882                         smp_wmb();
5883
5884                         ri->data = NULL;
5885
5886                 } else {
5887                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5888                                        desc_idx, *post_ptr);
5889
5890                         skb = netdev_alloc_skb(tp->dev,
5891                                                len + TG3_RAW_IP_ALIGN);
5892                         if (skb == NULL)
5893                                 goto drop_it_no_recycle;
5894
5895                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5896                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5897                         memcpy(skb->data,
5898                                data + TG3_RX_OFFSET(tp),
5899                                len);
5900                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5901                 }
5902
5903                 skb_put(skb, len);
5904                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5905                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5906                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5907                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5908                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5909                 else
5910                         skb_checksum_none_assert(skb);
5911
5912                 skb->protocol = eth_type_trans(skb, tp->dev);
5913
5914                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5915                     skb->protocol != htons(ETH_P_8021Q)) {
5916                         dev_kfree_skb(skb);
5917                         goto drop_it_no_recycle;
5918                 }
5919
5920                 if (desc->type_flags & RXD_FLAG_VLAN &&
5921                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5922                         __vlan_hwaccel_put_tag(skb,
5923                                                desc->err_vlan & RXD_VLAN_MASK);
5924
5925                 napi_gro_receive(&tnapi->napi, skb);
5926
5927                 received++;
5928                 budget--;
5929
5930 next_pkt:
5931                 (*post_ptr)++;
5932
5933                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5934                         tpr->rx_std_prod_idx = std_prod_idx &
5935                                                tp->rx_std_ring_mask;
5936                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5937                                      tpr->rx_std_prod_idx);
5938                         work_mask &= ~RXD_OPAQUE_RING_STD;
5939                         rx_std_posted = 0;
5940                 }
5941 next_pkt_nopost:
5942                 sw_idx++;
5943                 sw_idx &= tp->rx_ret_ring_mask;
5944
5945                 /* Refresh hw_idx to see if there is new work */
5946                 if (sw_idx == hw_idx) {
5947                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5948                         rmb();
5949                 }
5950         }
5951
5952         /* ACK the status ring. */
5953         tnapi->rx_rcb_ptr = sw_idx;
5954         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5955
5956         /* Refill RX ring(s). */
5957         if (!tg3_flag(tp, ENABLE_RSS)) {
5958                 /* Sync BD data before updating mailbox */
5959                 wmb();
5960
5961                 if (work_mask & RXD_OPAQUE_RING_STD) {
5962                         tpr->rx_std_prod_idx = std_prod_idx &
5963                                                tp->rx_std_ring_mask;
5964                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5965                                      tpr->rx_std_prod_idx);
5966                 }
5967                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5968                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5969                                                tp->rx_jmb_ring_mask;
5970                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5971                                      tpr->rx_jmb_prod_idx);
5972                 }
5973                 mmiowb();
5974         } else if (work_mask) {
5975                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5976                  * updated before the producer indices can be updated.
5977                  */
5978                 smp_wmb();
5979
5980                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5981                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5982
5983                 if (tnapi != &tp->napi[1]) {
5984                         tp->rx_refill = true;
5985                         napi_schedule(&tp->napi[1].napi);
5986                 }
5987         }
5988
5989         return received;
5990 }
5991
5992 static void tg3_poll_link(struct tg3 *tp)
5993 {
5994         /* handle link change and other phy events */
5995         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5996                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5997
5998                 if (sblk->status & SD_STATUS_LINK_CHG) {
5999                         sblk->status = SD_STATUS_UPDATED |
6000                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6001                         spin_lock(&tp->lock);
6002                         if (tg3_flag(tp, USE_PHYLIB)) {
6003                                 tw32_f(MAC_STATUS,
6004                                      (MAC_STATUS_SYNC_CHANGED |
6005                                       MAC_STATUS_CFG_CHANGED |
6006                                       MAC_STATUS_MI_COMPLETION |
6007                                       MAC_STATUS_LNKSTATE_CHANGED));
6008                                 udelay(40);
6009                         } else
6010                                 tg3_setup_phy(tp, 0);
6011                         spin_unlock(&tp->lock);
6012                 }
6013         }
6014 }
6015
6016 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6017                                 struct tg3_rx_prodring_set *dpr,
6018                                 struct tg3_rx_prodring_set *spr)
6019 {
6020         u32 si, di, cpycnt, src_prod_idx;
6021         int i, err = 0;
6022
6023         while (1) {
6024                 src_prod_idx = spr->rx_std_prod_idx;
6025
6026                 /* Make sure updates to the rx_std_buffers[] entries and the
6027                  * standard producer index are seen in the correct order.
6028                  */
6029                 smp_rmb();
6030
6031                 if (spr->rx_std_cons_idx == src_prod_idx)
6032                         break;
6033
6034                 if (spr->rx_std_cons_idx < src_prod_idx)
6035                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6036                 else
6037                         cpycnt = tp->rx_std_ring_mask + 1 -
6038                                  spr->rx_std_cons_idx;
6039
6040                 cpycnt = min(cpycnt,
6041                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6042
6043                 si = spr->rx_std_cons_idx;
6044                 di = dpr->rx_std_prod_idx;
6045
6046                 for (i = di; i < di + cpycnt; i++) {
6047                         if (dpr->rx_std_buffers[i].data) {
6048                                 cpycnt = i - di;
6049                                 err = -ENOSPC;
6050                                 break;
6051                         }
6052                 }
6053
6054                 if (!cpycnt)
6055                         break;
6056
6057                 /* Ensure that updates to the rx_std_buffers ring and the
6058                  * shadowed hardware producer ring from tg3_recycle_skb() are
6059                  * ordered correctly WRT the skb check above.
6060                  */
6061                 smp_rmb();
6062
6063                 memcpy(&dpr->rx_std_buffers[di],
6064                        &spr->rx_std_buffers[si],
6065                        cpycnt * sizeof(struct ring_info));
6066
6067                 for (i = 0; i < cpycnt; i++, di++, si++) {
6068                         struct tg3_rx_buffer_desc *sbd, *dbd;
6069                         sbd = &spr->rx_std[si];
6070                         dbd = &dpr->rx_std[di];
6071                         dbd->addr_hi = sbd->addr_hi;
6072                         dbd->addr_lo = sbd->addr_lo;
6073                 }
6074
6075                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6076                                        tp->rx_std_ring_mask;
6077                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6078                                        tp->rx_std_ring_mask;
6079         }
6080
6081         while (1) {
6082                 src_prod_idx = spr->rx_jmb_prod_idx;
6083
6084                 /* Make sure updates to the rx_jmb_buffers[] entries and
6085                  * the jumbo producer index are seen in the correct order.
6086                  */
6087                 smp_rmb();
6088
6089                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6090                         break;
6091
6092                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6093                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6094                 else
6095                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6096                                  spr->rx_jmb_cons_idx;
6097
6098                 cpycnt = min(cpycnt,
6099                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6100
6101                 si = spr->rx_jmb_cons_idx;
6102                 di = dpr->rx_jmb_prod_idx;
6103
6104                 for (i = di; i < di + cpycnt; i++) {
6105                         if (dpr->rx_jmb_buffers[i].data) {
6106                                 cpycnt = i - di;
6107                                 err = -ENOSPC;
6108                                 break;
6109                         }
6110                 }
6111
6112                 if (!cpycnt)
6113                         break;
6114
6115                 /* Ensure that updates to the rx_jmb_buffers ring and the
6116                  * shadowed hardware producer ring from tg3_recycle_skb() are
6117                  * ordered correctly WRT the skb check above.
6118                  */
6119                 smp_rmb();
6120
6121                 memcpy(&dpr->rx_jmb_buffers[di],
6122                        &spr->rx_jmb_buffers[si],
6123                        cpycnt * sizeof(struct ring_info));
6124
6125                 for (i = 0; i < cpycnt; i++, di++, si++) {
6126                         struct tg3_rx_buffer_desc *sbd, *dbd;
6127                         sbd = &spr->rx_jmb[si].std;
6128                         dbd = &dpr->rx_jmb[di].std;
6129                         dbd->addr_hi = sbd->addr_hi;
6130                         dbd->addr_lo = sbd->addr_lo;
6131                 }
6132
6133                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6134                                        tp->rx_jmb_ring_mask;
6135                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6136                                        tp->rx_jmb_ring_mask;
6137         }
6138
6139         return err;
6140 }
6141
6142 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6143 {
6144         struct tg3 *tp = tnapi->tp;
6145
6146         /* run TX completion thread */
6147         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6148                 tg3_tx(tnapi);
6149                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6150                         return work_done;
6151         }
6152
6153         if (!tnapi->rx_rcb_prod_idx)
6154                 return work_done;
6155
6156         /* run RX thread, within the bounds set by NAPI.
6157          * All RX "locking" is done by ensuring outside
6158          * code synchronizes with tg3->napi.poll()
6159          */
6160         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6161                 work_done += tg3_rx(tnapi, budget - work_done);
6162
6163         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6164                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6165                 int i, err = 0;
6166                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6167                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6168
6169                 tp->rx_refill = false;
6170                 for (i = 1; i < tp->irq_cnt; i++)
6171                         err |= tg3_rx_prodring_xfer(tp, dpr,
6172                                                     &tp->napi[i].prodring);
6173
6174                 wmb();
6175
6176                 if (std_prod_idx != dpr->rx_std_prod_idx)
6177                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6178                                      dpr->rx_std_prod_idx);
6179
6180                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6181                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6182                                      dpr->rx_jmb_prod_idx);
6183
6184                 mmiowb();
6185
6186                 if (err)
6187                         tw32_f(HOSTCC_MODE, tp->coal_now);
6188         }
6189
6190         return work_done;
6191 }
6192
6193 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6194 {
6195         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6196                 schedule_work(&tp->reset_task);
6197 }
6198
6199 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6200 {
6201         cancel_work_sync(&tp->reset_task);
6202         tg3_flag_clear(tp, RESET_TASK_PENDING);
6203         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6204 }
6205
6206 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6207 {
6208         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6209         struct tg3 *tp = tnapi->tp;
6210         int work_done = 0;
6211         struct tg3_hw_status *sblk = tnapi->hw_status;
6212
6213         while (1) {
6214                 work_done = tg3_poll_work(tnapi, work_done, budget);
6215
6216                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6217                         goto tx_recovery;
6218
6219                 if (unlikely(work_done >= budget))
6220                         break;
6221
6222                 /* tp->last_tag is used in tg3_int_reenable() below
6223                  * to tell the hw how much work has been processed,
6224                  * so we must read it before checking for more work.
6225                  */
6226                 tnapi->last_tag = sblk->status_tag;
6227                 tnapi->last_irq_tag = tnapi->last_tag;
6228                 rmb();
6229
6230                 /* check for RX/TX work to do */
6231                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6232                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6233
6234                         /* This test here is not race free, but will reduce
6235                          * the number of interrupts by looping again.
6236                          */
6237                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6238                                 continue;
6239
6240                         napi_complete(napi);
6241                         /* Reenable interrupts. */
6242                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6243
6244                         /* This test here is synchronized by napi_schedule()
6245                          * and napi_complete() to close the race condition.
6246                          */
6247                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6248                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6249                                                   HOSTCC_MODE_ENABLE |
6250                                                   tnapi->coal_now);
6251                         }
6252                         mmiowb();
6253                         break;
6254                 }
6255         }
6256
6257         return work_done;
6258
6259 tx_recovery:
6260         /* work_done is guaranteed to be less than budget. */
6261         napi_complete(napi);
6262         tg3_reset_task_schedule(tp);
6263         return work_done;
6264 }
6265
6266 static void tg3_process_error(struct tg3 *tp)
6267 {
6268         u32 val;
6269         bool real_error = false;
6270
6271         if (tg3_flag(tp, ERROR_PROCESSED))
6272                 return;
6273
6274         /* Check Flow Attention register */
6275         val = tr32(HOSTCC_FLOW_ATTN);
6276         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6277                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6278                 real_error = true;
6279         }
6280
6281         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6282                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6283                 real_error = true;
6284         }
6285
6286         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6287                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6288                 real_error = true;
6289         }
6290
6291         if (!real_error)
6292                 return;
6293
6294         tg3_dump_state(tp);
6295
6296         tg3_flag_set(tp, ERROR_PROCESSED);
6297         tg3_reset_task_schedule(tp);
6298 }
6299
6300 static int tg3_poll(struct napi_struct *napi, int budget)
6301 {
6302         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6303         struct tg3 *tp = tnapi->tp;
6304         int work_done = 0;
6305         struct tg3_hw_status *sblk = tnapi->hw_status;
6306
6307         while (1) {
6308                 if (sblk->status & SD_STATUS_ERROR)
6309                         tg3_process_error(tp);
6310
6311                 tg3_poll_link(tp);
6312
6313                 work_done = tg3_poll_work(tnapi, work_done, budget);
6314
6315                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6316                         goto tx_recovery;
6317
6318                 if (unlikely(work_done >= budget))
6319                         break;
6320
6321                 if (tg3_flag(tp, TAGGED_STATUS)) {
6322                         /* tp->last_tag is used in tg3_int_reenable() below
6323                          * to tell the hw how much work has been processed,
6324                          * so we must read it before checking for more work.
6325                          */
6326                         tnapi->last_tag = sblk->status_tag;
6327                         tnapi->last_irq_tag = tnapi->last_tag;
6328                         rmb();
6329                 } else
6330                         sblk->status &= ~SD_STATUS_UPDATED;
6331
6332                 if (likely(!tg3_has_work(tnapi))) {
6333                         napi_complete(napi);
6334                         tg3_int_reenable(tnapi);
6335                         break;
6336                 }
6337         }
6338
6339         return work_done;
6340
6341 tx_recovery:
6342         /* work_done is guaranteed to be less than budget. */
6343         napi_complete(napi);
6344         tg3_reset_task_schedule(tp);
6345         return work_done;
6346 }
6347
6348 static void tg3_napi_disable(struct tg3 *tp)
6349 {
6350         int i;
6351
6352         for (i = tp->irq_cnt - 1; i >= 0; i--)
6353                 napi_disable(&tp->napi[i].napi);
6354 }
6355
6356 static void tg3_napi_enable(struct tg3 *tp)
6357 {
6358         int i;
6359
6360         for (i = 0; i < tp->irq_cnt; i++)
6361                 napi_enable(&tp->napi[i].napi);
6362 }
6363
6364 static void tg3_napi_init(struct tg3 *tp)
6365 {
6366         int i;
6367
6368         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6369         for (i = 1; i < tp->irq_cnt; i++)
6370                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6371 }
6372
6373 static void tg3_napi_fini(struct tg3 *tp)
6374 {
6375         int i;
6376
6377         for (i = 0; i < tp->irq_cnt; i++)
6378                 netif_napi_del(&tp->napi[i].napi);
6379 }
6380
6381 static inline void tg3_netif_stop(struct tg3 *tp)
6382 {
6383         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6384         tg3_napi_disable(tp);
6385         netif_tx_disable(tp->dev);
6386 }
6387
6388 static inline void tg3_netif_start(struct tg3 *tp)
6389 {
6390         /* NOTE: unconditional netif_tx_wake_all_queues is only
6391          * appropriate so long as all callers are assured to
6392          * have free tx slots (such as after tg3_init_hw)
6393          */
6394         netif_tx_wake_all_queues(tp->dev);
6395
6396         tg3_napi_enable(tp);
6397         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6398         tg3_enable_ints(tp);
6399 }
6400
6401 static void tg3_irq_quiesce(struct tg3 *tp)
6402 {
6403         int i;
6404
6405         BUG_ON(tp->irq_sync);
6406
6407         tp->irq_sync = 1;
6408         smp_mb();
6409
6410         for (i = 0; i < tp->irq_cnt; i++)
6411                 synchronize_irq(tp->napi[i].irq_vec);
6412 }
6413
6414 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6415  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6416  * with as well.  Most of the time, this is not necessary except when
6417  * shutting down the device.
6418  */
6419 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6420 {
6421         spin_lock_bh(&tp->lock);
6422         if (irq_sync)
6423                 tg3_irq_quiesce(tp);
6424 }
6425
6426 static inline void tg3_full_unlock(struct tg3 *tp)
6427 {
6428         spin_unlock_bh(&tp->lock);
6429 }
6430
6431 /* One-shot MSI handler - Chip automatically disables interrupt
6432  * after sending MSI so driver doesn't have to do it.
6433  */
6434 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6435 {
6436         struct tg3_napi *tnapi = dev_id;
6437         struct tg3 *tp = tnapi->tp;
6438
6439         prefetch(tnapi->hw_status);
6440         if (tnapi->rx_rcb)
6441                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6442
6443         if (likely(!tg3_irq_sync(tp)))
6444                 napi_schedule(&tnapi->napi);
6445
6446         return IRQ_HANDLED;
6447 }
6448
6449 /* MSI ISR - No need to check for interrupt sharing and no need to
6450  * flush status block and interrupt mailbox. PCI ordering rules
6451  * guarantee that MSI will arrive after the status block.
6452  */
6453 static irqreturn_t tg3_msi(int irq, void *dev_id)
6454 {
6455         struct tg3_napi *tnapi = dev_id;
6456         struct tg3 *tp = tnapi->tp;
6457
6458         prefetch(tnapi->hw_status);
6459         if (tnapi->rx_rcb)
6460                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6461         /*
6462          * Writing any value to intr-mbox-0 clears PCI INTA# and
6463          * chip-internal interrupt pending events.
6464          * Writing non-zero to intr-mbox-0 additional tells the
6465          * NIC to stop sending us irqs, engaging "in-intr-handler"
6466          * event coalescing.
6467          */
6468         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6469         if (likely(!tg3_irq_sync(tp)))
6470                 napi_schedule(&tnapi->napi);
6471
6472         return IRQ_RETVAL(1);
6473 }
6474
6475 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6476 {
6477         struct tg3_napi *tnapi = dev_id;
6478         struct tg3 *tp = tnapi->tp;
6479         struct tg3_hw_status *sblk = tnapi->hw_status;
6480         unsigned int handled = 1;
6481
6482         /* In INTx mode, it is possible for the interrupt to arrive at
6483          * the CPU before the status block posted prior to the interrupt.
6484          * Reading the PCI State register will confirm whether the
6485          * interrupt is ours and will flush the status block.
6486          */
6487         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6488                 if (tg3_flag(tp, CHIP_RESETTING) ||
6489                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6490                         handled = 0;
6491                         goto out;
6492                 }
6493         }
6494
6495         /*
6496          * Writing any value to intr-mbox-0 clears PCI INTA# and
6497          * chip-internal interrupt pending events.
6498          * Writing non-zero to intr-mbox-0 additional tells the
6499          * NIC to stop sending us irqs, engaging "in-intr-handler"
6500          * event coalescing.
6501          *
6502          * Flush the mailbox to de-assert the IRQ immediately to prevent
6503          * spurious interrupts.  The flush impacts performance but
6504          * excessive spurious interrupts can be worse in some cases.
6505          */
6506         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6507         if (tg3_irq_sync(tp))
6508                 goto out;
6509         sblk->status &= ~SD_STATUS_UPDATED;
6510         if (likely(tg3_has_work(tnapi))) {
6511                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6512                 napi_schedule(&tnapi->napi);
6513         } else {
6514                 /* No work, shared interrupt perhaps?  re-enable
6515                  * interrupts, and flush that PCI write
6516                  */
6517                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6518                                0x00000000);
6519         }
6520 out:
6521         return IRQ_RETVAL(handled);
6522 }
6523
6524 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6525 {
6526         struct tg3_napi *tnapi = dev_id;
6527         struct tg3 *tp = tnapi->tp;
6528         struct tg3_hw_status *sblk = tnapi->hw_status;
6529         unsigned int handled = 1;
6530
6531         /* In INTx mode, it is possible for the interrupt to arrive at
6532          * the CPU before the status block posted prior to the interrupt.
6533          * Reading the PCI State register will confirm whether the
6534          * interrupt is ours and will flush the status block.
6535          */
6536         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6537                 if (tg3_flag(tp, CHIP_RESETTING) ||
6538                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6539                         handled = 0;
6540                         goto out;
6541                 }
6542         }
6543
6544         /*
6545          * writing any value to intr-mbox-0 clears PCI INTA# and
6546          * chip-internal interrupt pending events.
6547          * writing non-zero to intr-mbox-0 additional tells the
6548          * NIC to stop sending us irqs, engaging "in-intr-handler"
6549          * event coalescing.
6550          *
6551          * Flush the mailbox to de-assert the IRQ immediately to prevent
6552          * spurious interrupts.  The flush impacts performance but
6553          * excessive spurious interrupts can be worse in some cases.
6554          */
6555         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6556
6557         /*
6558          * In a shared interrupt configuration, sometimes other devices'
6559          * interrupts will scream.  We record the current status tag here
6560          * so that the above check can report that the screaming interrupts
6561          * are unhandled.  Eventually they will be silenced.
6562          */
6563         tnapi->last_irq_tag = sblk->status_tag;
6564
6565         if (tg3_irq_sync(tp))
6566                 goto out;
6567
6568         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6569
6570         napi_schedule(&tnapi->napi);
6571
6572 out:
6573         return IRQ_RETVAL(handled);
6574 }
6575
6576 /* ISR for interrupt test */
6577 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6578 {
6579         struct tg3_napi *tnapi = dev_id;
6580         struct tg3 *tp = tnapi->tp;
6581         struct tg3_hw_status *sblk = tnapi->hw_status;
6582
6583         if ((sblk->status & SD_STATUS_UPDATED) ||
6584             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6585                 tg3_disable_ints(tp);
6586                 return IRQ_RETVAL(1);
6587         }
6588         return IRQ_RETVAL(0);
6589 }
6590
6591 #ifdef CONFIG_NET_POLL_CONTROLLER
6592 static void tg3_poll_controller(struct net_device *dev)
6593 {
6594         int i;
6595         struct tg3 *tp = netdev_priv(dev);
6596
6597         for (i = 0; i < tp->irq_cnt; i++)
6598                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6599 }
6600 #endif
6601
6602 static void tg3_tx_timeout(struct net_device *dev)
6603 {
6604         struct tg3 *tp = netdev_priv(dev);
6605
6606         if (netif_msg_tx_err(tp)) {
6607                 netdev_err(dev, "transmit timed out, resetting\n");
6608                 tg3_dump_state(tp);
6609         }
6610
6611         tg3_reset_task_schedule(tp);
6612 }
6613
6614 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6615 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6616 {
6617         u32 base = (u32) mapping & 0xffffffff;
6618
6619         return (base > 0xffffdcc0) && (base + len + 8 < base);
6620 }
6621
6622 /* Test for DMA addresses > 40-bit */
6623 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6624                                           int len)
6625 {
6626 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6627         if (tg3_flag(tp, 40BIT_DMA_BUG))
6628                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6629         return 0;
6630 #else
6631         return 0;
6632 #endif
6633 }
6634
6635 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6636                                  dma_addr_t mapping, u32 len, u32 flags,
6637                                  u32 mss, u32 vlan)
6638 {
6639         txbd->addr_hi = ((u64) mapping >> 32);
6640         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6641         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6642         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6643 }
6644
6645 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6646                             dma_addr_t map, u32 len, u32 flags,
6647                             u32 mss, u32 vlan)
6648 {
6649         struct tg3 *tp = tnapi->tp;
6650         bool hwbug = false;
6651
6652         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6653                 hwbug = true;
6654
6655         if (tg3_4g_overflow_test(map, len))
6656                 hwbug = true;
6657
6658         if (tg3_40bit_overflow_test(tp, map, len))
6659                 hwbug = true;
6660
6661         if (tp->dma_limit) {
6662                 u32 prvidx = *entry;
6663                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6664                 while (len > tp->dma_limit && *budget) {
6665                         u32 frag_len = tp->dma_limit;
6666                         len -= tp->dma_limit;
6667
6668                         /* Avoid the 8byte DMA problem */
6669                         if (len <= 8) {
6670                                 len += tp->dma_limit / 2;
6671                                 frag_len = tp->dma_limit / 2;
6672                         }
6673
6674                         tnapi->tx_buffers[*entry].fragmented = true;
6675
6676                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6677                                       frag_len, tmp_flag, mss, vlan);
6678                         *budget -= 1;
6679                         prvidx = *entry;
6680                         *entry = NEXT_TX(*entry);
6681
6682                         map += frag_len;
6683                 }
6684
6685                 if (len) {
6686                         if (*budget) {
6687                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6688                                               len, flags, mss, vlan);
6689                                 *budget -= 1;
6690                                 *entry = NEXT_TX(*entry);
6691                         } else {
6692                                 hwbug = true;
6693                                 tnapi->tx_buffers[prvidx].fragmented = false;
6694                         }
6695                 }
6696         } else {
6697                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6698                               len, flags, mss, vlan);
6699                 *entry = NEXT_TX(*entry);
6700         }
6701
6702         return hwbug;
6703 }
6704
6705 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6706 {
6707         int i;
6708         struct sk_buff *skb;
6709         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6710
6711         skb = txb->skb;
6712         txb->skb = NULL;
6713
6714         pci_unmap_single(tnapi->tp->pdev,
6715                          dma_unmap_addr(txb, mapping),
6716                          skb_headlen(skb),
6717                          PCI_DMA_TODEVICE);
6718
6719         while (txb->fragmented) {
6720                 txb->fragmented = false;
6721                 entry = NEXT_TX(entry);
6722                 txb = &tnapi->tx_buffers[entry];
6723         }
6724
6725         for (i = 0; i <= last; i++) {
6726                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6727
6728                 entry = NEXT_TX(entry);
6729                 txb = &tnapi->tx_buffers[entry];
6730
6731                 pci_unmap_page(tnapi->tp->pdev,
6732                                dma_unmap_addr(txb, mapping),
6733                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6734
6735                 while (txb->fragmented) {
6736                         txb->fragmented = false;
6737                         entry = NEXT_TX(entry);
6738                         txb = &tnapi->tx_buffers[entry];
6739                 }
6740         }
6741 }
6742
6743 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6744 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6745                                        struct sk_buff **pskb,
6746                                        u32 *entry, u32 *budget,
6747                                        u32 base_flags, u32 mss, u32 vlan)
6748 {
6749         struct tg3 *tp = tnapi->tp;
6750         struct sk_buff *new_skb, *skb = *pskb;
6751         dma_addr_t new_addr = 0;
6752         int ret = 0;
6753
6754         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6755                 new_skb = skb_copy(skb, GFP_ATOMIC);
6756         else {
6757                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6758
6759                 new_skb = skb_copy_expand(skb,
6760                                           skb_headroom(skb) + more_headroom,
6761                                           skb_tailroom(skb), GFP_ATOMIC);
6762         }
6763
6764         if (!new_skb) {
6765                 ret = -1;
6766         } else {
6767                 /* New SKB is guaranteed to be linear. */
6768                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6769                                           PCI_DMA_TODEVICE);
6770                 /* Make sure the mapping succeeded */
6771                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6772                         dev_kfree_skb(new_skb);
6773                         ret = -1;
6774                 } else {
6775                         u32 save_entry = *entry;
6776
6777                         base_flags |= TXD_FLAG_END;
6778
6779                         tnapi->tx_buffers[*entry].skb = new_skb;
6780                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6781                                            mapping, new_addr);
6782
6783                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6784                                             new_skb->len, base_flags,
6785                                             mss, vlan)) {
6786                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6787                                 dev_kfree_skb(new_skb);
6788                                 ret = -1;
6789                         }
6790                 }
6791         }
6792
6793         dev_kfree_skb(skb);
6794         *pskb = new_skb;
6795         return ret;
6796 }
6797
6798 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6799
6800 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6801  * TSO header is greater than 80 bytes.
6802  */
6803 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6804 {
6805         struct sk_buff *segs, *nskb;
6806         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6807
6808         /* Estimate the number of fragments in the worst case */
6809         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6810                 netif_stop_queue(tp->dev);
6811
6812                 /* netif_tx_stop_queue() must be done before checking
6813                  * checking tx index in tg3_tx_avail() below, because in
6814                  * tg3_tx(), we update tx index before checking for
6815                  * netif_tx_queue_stopped().
6816                  */
6817                 smp_mb();
6818                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6819                         return NETDEV_TX_BUSY;
6820
6821                 netif_wake_queue(tp->dev);
6822         }
6823
6824         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6825         if (IS_ERR(segs))
6826                 goto tg3_tso_bug_end;
6827
6828         do {
6829                 nskb = segs;
6830                 segs = segs->next;
6831                 nskb->next = NULL;
6832                 tg3_start_xmit(nskb, tp->dev);
6833         } while (segs);
6834
6835 tg3_tso_bug_end:
6836         dev_kfree_skb(skb);
6837
6838         return NETDEV_TX_OK;
6839 }
6840
6841 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6842  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6843  */
6844 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6845 {
6846         struct tg3 *tp = netdev_priv(dev);
6847         u32 len, entry, base_flags, mss, vlan = 0;
6848         u32 budget;
6849         int i = -1, would_hit_hwbug;
6850         dma_addr_t mapping;
6851         struct tg3_napi *tnapi;
6852         struct netdev_queue *txq;
6853         unsigned int last;
6854
6855         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6856         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6857         if (tg3_flag(tp, ENABLE_TSS))
6858                 tnapi++;
6859
6860         budget = tg3_tx_avail(tnapi);
6861
6862         /* We are running in BH disabled context with netif_tx_lock
6863          * and TX reclaim runs via tp->napi.poll inside of a software
6864          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6865          * no IRQ context deadlocks to worry about either.  Rejoice!
6866          */
6867         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6868                 if (!netif_tx_queue_stopped(txq)) {
6869                         netif_tx_stop_queue(txq);
6870
6871                         /* This is a hard error, log it. */
6872                         netdev_err(dev,
6873                                    "BUG! Tx Ring full when queue awake!\n");
6874                 }
6875                 return NETDEV_TX_BUSY;
6876         }
6877
6878         entry = tnapi->tx_prod;
6879         base_flags = 0;
6880         if (skb->ip_summed == CHECKSUM_PARTIAL)
6881                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6882
6883         mss = skb_shinfo(skb)->gso_size;
6884         if (mss) {
6885                 struct iphdr *iph;
6886                 u32 tcp_opt_len, hdr_len;
6887
6888                 if (skb_header_cloned(skb) &&
6889                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6890                         goto drop;
6891
6892                 iph = ip_hdr(skb);
6893                 tcp_opt_len = tcp_optlen(skb);
6894
6895                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6896
6897                 if (!skb_is_gso_v6(skb)) {
6898                         iph->check = 0;
6899                         iph->tot_len = htons(mss + hdr_len);
6900                 }
6901
6902                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6903                     tg3_flag(tp, TSO_BUG))
6904                         return tg3_tso_bug(tp, skb);
6905
6906                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6907                                TXD_FLAG_CPU_POST_DMA);
6908
6909                 if (tg3_flag(tp, HW_TSO_1) ||
6910                     tg3_flag(tp, HW_TSO_2) ||
6911                     tg3_flag(tp, HW_TSO_3)) {
6912                         tcp_hdr(skb)->check = 0;
6913                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6914                 } else
6915                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6916                                                                  iph->daddr, 0,
6917                                                                  IPPROTO_TCP,
6918                                                                  0);
6919
6920                 if (tg3_flag(tp, HW_TSO_3)) {
6921                         mss |= (hdr_len & 0xc) << 12;
6922                         if (hdr_len & 0x10)
6923                                 base_flags |= 0x00000010;
6924                         base_flags |= (hdr_len & 0x3e0) << 5;
6925                 } else if (tg3_flag(tp, HW_TSO_2))
6926                         mss |= hdr_len << 9;
6927                 else if (tg3_flag(tp, HW_TSO_1) ||
6928                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6929                         if (tcp_opt_len || iph->ihl > 5) {
6930                                 int tsflags;
6931
6932                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6933                                 mss |= (tsflags << 11);
6934                         }
6935                 } else {
6936                         if (tcp_opt_len || iph->ihl > 5) {
6937                                 int tsflags;
6938
6939                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6940                                 base_flags |= tsflags << 12;
6941                         }
6942                 }
6943         }
6944
6945         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6946             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6947                 base_flags |= TXD_FLAG_JMB_PKT;
6948
6949         if (vlan_tx_tag_present(skb)) {
6950                 base_flags |= TXD_FLAG_VLAN;
6951                 vlan = vlan_tx_tag_get(skb);
6952         }
6953
6954         len = skb_headlen(skb);
6955
6956         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6957         if (pci_dma_mapping_error(tp->pdev, mapping))
6958                 goto drop;
6959
6960
6961         tnapi->tx_buffers[entry].skb = skb;
6962         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6963
6964         would_hit_hwbug = 0;
6965
6966         if (tg3_flag(tp, 5701_DMA_BUG))
6967                 would_hit_hwbug = 1;
6968
6969         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6970                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6971                             mss, vlan)) {
6972                 would_hit_hwbug = 1;
6973         } else if (skb_shinfo(skb)->nr_frags > 0) {
6974                 u32 tmp_mss = mss;
6975
6976                 if (!tg3_flag(tp, HW_TSO_1) &&
6977                     !tg3_flag(tp, HW_TSO_2) &&
6978                     !tg3_flag(tp, HW_TSO_3))
6979                         tmp_mss = 0;
6980
6981                 /* Now loop through additional data
6982                  * fragments, and queue them.
6983                  */
6984                 last = skb_shinfo(skb)->nr_frags - 1;
6985                 for (i = 0; i <= last; i++) {
6986                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6987
6988                         len = skb_frag_size(frag);
6989                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6990                                                    len, DMA_TO_DEVICE);
6991
6992                         tnapi->tx_buffers[entry].skb = NULL;
6993                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6994                                            mapping);
6995                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6996                                 goto dma_error;
6997
6998                         if (!budget ||
6999                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7000                                             len, base_flags |
7001                                             ((i == last) ? TXD_FLAG_END : 0),
7002                                             tmp_mss, vlan)) {
7003                                 would_hit_hwbug = 1;
7004                                 break;
7005                         }
7006                 }
7007         }
7008
7009         if (would_hit_hwbug) {
7010                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7011
7012                 /* If the workaround fails due to memory/mapping
7013                  * failure, silently drop this packet.
7014                  */
7015                 entry = tnapi->tx_prod;
7016                 budget = tg3_tx_avail(tnapi);
7017                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7018                                                 base_flags, mss, vlan))
7019                         goto drop_nofree;
7020         }
7021
7022         skb_tx_timestamp(skb);
7023         netdev_tx_sent_queue(txq, skb->len);
7024
7025         /* Sync BD data before updating mailbox */
7026         wmb();
7027
7028         /* Packets are ready, update Tx producer idx local and on card. */
7029         tw32_tx_mbox(tnapi->prodmbox, entry);
7030
7031         tnapi->tx_prod = entry;
7032         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7033                 netif_tx_stop_queue(txq);
7034
7035                 /* netif_tx_stop_queue() must be done before checking
7036                  * checking tx index in tg3_tx_avail() below, because in
7037                  * tg3_tx(), we update tx index before checking for
7038                  * netif_tx_queue_stopped().
7039                  */
7040                 smp_mb();
7041                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7042                         netif_tx_wake_queue(txq);
7043         }
7044
7045         mmiowb();
7046         return NETDEV_TX_OK;
7047
7048 dma_error:
7049         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7050         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7051 drop:
7052         dev_kfree_skb(skb);
7053 drop_nofree:
7054         tp->tx_dropped++;
7055         return NETDEV_TX_OK;
7056 }
7057
7058 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7059 {
7060         if (enable) {
7061                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7062                                   MAC_MODE_PORT_MODE_MASK);
7063
7064                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7065
7066                 if (!tg3_flag(tp, 5705_PLUS))
7067                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7068
7069                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7070                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7071                 else
7072                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7073         } else {
7074                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7075
7076                 if (tg3_flag(tp, 5705_PLUS) ||
7077                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7078                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7079                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7080         }
7081
7082         tw32(MAC_MODE, tp->mac_mode);
7083         udelay(40);
7084 }
7085
7086 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7087 {
7088         u32 val, bmcr, mac_mode, ptest = 0;
7089
7090         tg3_phy_toggle_apd(tp, false);
7091         tg3_phy_toggle_automdix(tp, 0);
7092
7093         if (extlpbk && tg3_phy_set_extloopbk(tp))
7094                 return -EIO;
7095
7096         bmcr = BMCR_FULLDPLX;
7097         switch (speed) {
7098         case SPEED_10:
7099                 break;
7100         case SPEED_100:
7101                 bmcr |= BMCR_SPEED100;
7102                 break;
7103         case SPEED_1000:
7104         default:
7105                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7106                         speed = SPEED_100;
7107                         bmcr |= BMCR_SPEED100;
7108                 } else {
7109                         speed = SPEED_1000;
7110                         bmcr |= BMCR_SPEED1000;
7111                 }
7112         }
7113
7114         if (extlpbk) {
7115                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7116                         tg3_readphy(tp, MII_CTRL1000, &val);
7117                         val |= CTL1000_AS_MASTER |
7118                                CTL1000_ENABLE_MASTER;
7119                         tg3_writephy(tp, MII_CTRL1000, val);
7120                 } else {
7121                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7122                                 MII_TG3_FET_PTEST_TRIM_2;
7123                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7124                 }
7125         } else
7126                 bmcr |= BMCR_LOOPBACK;
7127
7128         tg3_writephy(tp, MII_BMCR, bmcr);
7129
7130         /* The write needs to be flushed for the FETs */
7131         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7132                 tg3_readphy(tp, MII_BMCR, &bmcr);
7133
7134         udelay(40);
7135
7136         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7137             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7138                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7139                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7140                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7141
7142                 /* The write needs to be flushed for the AC131 */
7143                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7144         }
7145
7146         /* Reset to prevent losing 1st rx packet intermittently */
7147         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7148             tg3_flag(tp, 5780_CLASS)) {
7149                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7150                 udelay(10);
7151                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7152         }
7153
7154         mac_mode = tp->mac_mode &
7155                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7156         if (speed == SPEED_1000)
7157                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7158         else
7159                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7160
7161         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7162                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7163
7164                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7165                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7166                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7167                         mac_mode |= MAC_MODE_LINK_POLARITY;
7168
7169                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7170                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7171         }
7172
7173         tw32(MAC_MODE, mac_mode);
7174         udelay(40);
7175
7176         return 0;
7177 }
7178
7179 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7180 {
7181         struct tg3 *tp = netdev_priv(dev);
7182
7183         if (features & NETIF_F_LOOPBACK) {
7184                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7185                         return;
7186
7187                 spin_lock_bh(&tp->lock);
7188                 tg3_mac_loopback(tp, true);
7189                 netif_carrier_on(tp->dev);
7190                 spin_unlock_bh(&tp->lock);
7191                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7192         } else {
7193                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7194                         return;
7195
7196                 spin_lock_bh(&tp->lock);
7197                 tg3_mac_loopback(tp, false);
7198                 /* Force link status check */
7199                 tg3_setup_phy(tp, 1);
7200                 spin_unlock_bh(&tp->lock);
7201                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7202         }
7203 }
7204
7205 static netdev_features_t tg3_fix_features(struct net_device *dev,
7206         netdev_features_t features)
7207 {
7208         struct tg3 *tp = netdev_priv(dev);
7209
7210         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7211                 features &= ~NETIF_F_ALL_TSO;
7212
7213         return features;
7214 }
7215
7216 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7217 {
7218         netdev_features_t changed = dev->features ^ features;
7219
7220         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7221                 tg3_set_loopback(dev, features);
7222
7223         return 0;
7224 }
7225
7226 static void tg3_rx_prodring_free(struct tg3 *tp,
7227                                  struct tg3_rx_prodring_set *tpr)
7228 {
7229         int i;
7230
7231         if (tpr != &tp->napi[0].prodring) {
7232                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7233                      i = (i + 1) & tp->rx_std_ring_mask)
7234                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7235                                         tp->rx_pkt_map_sz);
7236
7237                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7238                         for (i = tpr->rx_jmb_cons_idx;
7239                              i != tpr->rx_jmb_prod_idx;
7240                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7241                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7242                                                 TG3_RX_JMB_MAP_SZ);
7243                         }
7244                 }
7245
7246                 return;
7247         }
7248
7249         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7250                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7251                                 tp->rx_pkt_map_sz);
7252
7253         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7254                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7255                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7256                                         TG3_RX_JMB_MAP_SZ);
7257         }
7258 }
7259
7260 /* Initialize rx rings for packet processing.
7261  *
7262  * The chip has been shut down and the driver detached from
7263  * the networking, so no interrupts or new tx packets will
7264  * end up in the driver.  tp->{tx,}lock are held and thus
7265  * we may not sleep.
7266  */
7267 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7268                                  struct tg3_rx_prodring_set *tpr)
7269 {
7270         u32 i, rx_pkt_dma_sz;
7271
7272         tpr->rx_std_cons_idx = 0;
7273         tpr->rx_std_prod_idx = 0;
7274         tpr->rx_jmb_cons_idx = 0;
7275         tpr->rx_jmb_prod_idx = 0;
7276
7277         if (tpr != &tp->napi[0].prodring) {
7278                 memset(&tpr->rx_std_buffers[0], 0,
7279                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7280                 if (tpr->rx_jmb_buffers)
7281                         memset(&tpr->rx_jmb_buffers[0], 0,
7282                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7283                 goto done;
7284         }
7285
7286         /* Zero out all descriptors. */
7287         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7288
7289         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7290         if (tg3_flag(tp, 5780_CLASS) &&
7291             tp->dev->mtu > ETH_DATA_LEN)
7292                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7293         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7294
7295         /* Initialize invariants of the rings, we only set this
7296          * stuff once.  This works because the card does not
7297          * write into the rx buffer posting rings.
7298          */
7299         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7300                 struct tg3_rx_buffer_desc *rxd;
7301
7302                 rxd = &tpr->rx_std[i];
7303                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7304                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7305                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7306                                (i << RXD_OPAQUE_INDEX_SHIFT));
7307         }
7308
7309         /* Now allocate fresh SKBs for each rx ring. */
7310         for (i = 0; i < tp->rx_pending; i++) {
7311                 unsigned int frag_size;
7312
7313                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7314                                       &frag_size) < 0) {
7315                         netdev_warn(tp->dev,
7316                                     "Using a smaller RX standard ring. Only "
7317                                     "%d out of %d buffers were allocated "
7318                                     "successfully\n", i, tp->rx_pending);
7319                         if (i == 0)
7320                                 goto initfail;
7321                         tp->rx_pending = i;
7322                         break;
7323                 }
7324         }
7325
7326         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7327                 goto done;
7328
7329         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7330
7331         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7332                 goto done;
7333
7334         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7335                 struct tg3_rx_buffer_desc *rxd;
7336
7337                 rxd = &tpr->rx_jmb[i].std;
7338                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7339                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7340                                   RXD_FLAG_JUMBO;
7341                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7342                        (i << RXD_OPAQUE_INDEX_SHIFT));
7343         }
7344
7345         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7346                 unsigned int frag_size;
7347
7348                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7349                                       &frag_size) < 0) {
7350                         netdev_warn(tp->dev,
7351                                     "Using a smaller RX jumbo ring. Only %d "
7352                                     "out of %d buffers were allocated "
7353                                     "successfully\n", i, tp->rx_jumbo_pending);
7354                         if (i == 0)
7355                                 goto initfail;
7356                         tp->rx_jumbo_pending = i;
7357                         break;
7358                 }
7359         }
7360
7361 done:
7362         return 0;
7363
7364 initfail:
7365         tg3_rx_prodring_free(tp, tpr);
7366         return -ENOMEM;
7367 }
7368
7369 static void tg3_rx_prodring_fini(struct tg3 *tp,
7370                                  struct tg3_rx_prodring_set *tpr)
7371 {
7372         kfree(tpr->rx_std_buffers);
7373         tpr->rx_std_buffers = NULL;
7374         kfree(tpr->rx_jmb_buffers);
7375         tpr->rx_jmb_buffers = NULL;
7376         if (tpr->rx_std) {
7377                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7378                                   tpr->rx_std, tpr->rx_std_mapping);
7379                 tpr->rx_std = NULL;
7380         }
7381         if (tpr->rx_jmb) {
7382                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7383                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7384                 tpr->rx_jmb = NULL;
7385         }
7386 }
7387
7388 static int tg3_rx_prodring_init(struct tg3 *tp,
7389                                 struct tg3_rx_prodring_set *tpr)
7390 {
7391         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7392                                       GFP_KERNEL);
7393         if (!tpr->rx_std_buffers)
7394                 return -ENOMEM;
7395
7396         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7397                                          TG3_RX_STD_RING_BYTES(tp),
7398                                          &tpr->rx_std_mapping,
7399                                          GFP_KERNEL);
7400         if (!tpr->rx_std)
7401                 goto err_out;
7402
7403         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7404                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7405                                               GFP_KERNEL);
7406                 if (!tpr->rx_jmb_buffers)
7407                         goto err_out;
7408
7409                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7410                                                  TG3_RX_JMB_RING_BYTES(tp),
7411                                                  &tpr->rx_jmb_mapping,
7412                                                  GFP_KERNEL);
7413                 if (!tpr->rx_jmb)
7414                         goto err_out;
7415         }
7416
7417         return 0;
7418
7419 err_out:
7420         tg3_rx_prodring_fini(tp, tpr);
7421         return -ENOMEM;
7422 }
7423
7424 /* Free up pending packets in all rx/tx rings.
7425  *
7426  * The chip has been shut down and the driver detached from
7427  * the networking, so no interrupts or new tx packets will
7428  * end up in the driver.  tp->{tx,}lock is not held and we are not
7429  * in an interrupt context and thus may sleep.
7430  */
7431 static void tg3_free_rings(struct tg3 *tp)
7432 {
7433         int i, j;
7434
7435         for (j = 0; j < tp->irq_cnt; j++) {
7436                 struct tg3_napi *tnapi = &tp->napi[j];
7437
7438                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7439
7440                 if (!tnapi->tx_buffers)
7441                         continue;
7442
7443                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7444                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7445
7446                         if (!skb)
7447                                 continue;
7448
7449                         tg3_tx_skb_unmap(tnapi, i,
7450                                          skb_shinfo(skb)->nr_frags - 1);
7451
7452                         dev_kfree_skb_any(skb);
7453                 }
7454                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7455         }
7456 }
7457
7458 /* Initialize tx/rx rings for packet processing.
7459  *
7460  * The chip has been shut down and the driver detached from
7461  * the networking, so no interrupts or new tx packets will
7462  * end up in the driver.  tp->{tx,}lock are held and thus
7463  * we may not sleep.
7464  */
7465 static int tg3_init_rings(struct tg3 *tp)
7466 {
7467         int i;
7468
7469         /* Free up all the SKBs. */
7470         tg3_free_rings(tp);
7471
7472         for (i = 0; i < tp->irq_cnt; i++) {
7473                 struct tg3_napi *tnapi = &tp->napi[i];
7474
7475                 tnapi->last_tag = 0;
7476                 tnapi->last_irq_tag = 0;
7477                 tnapi->hw_status->status = 0;
7478                 tnapi->hw_status->status_tag = 0;
7479                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7480
7481                 tnapi->tx_prod = 0;
7482                 tnapi->tx_cons = 0;
7483                 if (tnapi->tx_ring)
7484                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7485
7486                 tnapi->rx_rcb_ptr = 0;
7487                 if (tnapi->rx_rcb)
7488                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7489
7490                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7491                         tg3_free_rings(tp);
7492                         return -ENOMEM;
7493                 }
7494         }
7495
7496         return 0;
7497 }
7498
7499 /*
7500  * Must not be invoked with interrupt sources disabled and
7501  * the hardware shutdown down.
7502  */
7503 static void tg3_free_consistent(struct tg3 *tp)
7504 {
7505         int i;
7506
7507         for (i = 0; i < tp->irq_cnt; i++) {
7508                 struct tg3_napi *tnapi = &tp->napi[i];
7509
7510                 if (tnapi->tx_ring) {
7511                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7512                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7513                         tnapi->tx_ring = NULL;
7514                 }
7515
7516                 kfree(tnapi->tx_buffers);
7517                 tnapi->tx_buffers = NULL;
7518
7519                 if (tnapi->rx_rcb) {
7520                         dma_free_coherent(&tp->pdev->dev,
7521                                           TG3_RX_RCB_RING_BYTES(tp),
7522                                           tnapi->rx_rcb,
7523                                           tnapi->rx_rcb_mapping);
7524                         tnapi->rx_rcb = NULL;
7525                 }
7526
7527                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7528
7529                 if (tnapi->hw_status) {
7530                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7531                                           tnapi->hw_status,
7532                                           tnapi->status_mapping);
7533                         tnapi->hw_status = NULL;
7534                 }
7535         }
7536
7537         if (tp->hw_stats) {
7538                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7539                                   tp->hw_stats, tp->stats_mapping);
7540                 tp->hw_stats = NULL;
7541         }
7542 }
7543
7544 /*
7545  * Must not be invoked with interrupt sources disabled and
7546  * the hardware shutdown down.  Can sleep.
7547  */
7548 static int tg3_alloc_consistent(struct tg3 *tp)
7549 {
7550         int i;
7551
7552         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7553                                           sizeof(struct tg3_hw_stats),
7554                                           &tp->stats_mapping,
7555                                           GFP_KERNEL);
7556         if (!tp->hw_stats)
7557                 goto err_out;
7558
7559         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7560
7561         for (i = 0; i < tp->irq_cnt; i++) {
7562                 struct tg3_napi *tnapi = &tp->napi[i];
7563                 struct tg3_hw_status *sblk;
7564
7565                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7566                                                       TG3_HW_STATUS_SIZE,
7567                                                       &tnapi->status_mapping,
7568                                                       GFP_KERNEL);
7569                 if (!tnapi->hw_status)
7570                         goto err_out;
7571
7572                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7573                 sblk = tnapi->hw_status;
7574
7575                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7576                         goto err_out;
7577
7578                 /* If multivector TSS is enabled, vector 0 does not handle
7579                  * tx interrupts.  Don't allocate any resources for it.
7580                  */
7581                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7582                     (i && tg3_flag(tp, ENABLE_TSS))) {
7583                         tnapi->tx_buffers = kzalloc(
7584                                                sizeof(struct tg3_tx_ring_info) *
7585                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7586                         if (!tnapi->tx_buffers)
7587                                 goto err_out;
7588
7589                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7590                                                             TG3_TX_RING_BYTES,
7591                                                         &tnapi->tx_desc_mapping,
7592                                                             GFP_KERNEL);
7593                         if (!tnapi->tx_ring)
7594                                 goto err_out;
7595                 }
7596
7597                 /*
7598                  * When RSS is enabled, the status block format changes
7599                  * slightly.  The "rx_jumbo_consumer", "reserved",
7600                  * and "rx_mini_consumer" members get mapped to the
7601                  * other three rx return ring producer indexes.
7602                  */
7603                 switch (i) {
7604                 default:
7605                         if (tg3_flag(tp, ENABLE_RSS)) {
7606                                 tnapi->rx_rcb_prod_idx = NULL;
7607                                 break;
7608                         }
7609                         /* Fall through */
7610                 case 1:
7611                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7612                         break;
7613                 case 2:
7614                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7615                         break;
7616                 case 3:
7617                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7618                         break;
7619                 case 4:
7620                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7621                         break;
7622                 }
7623
7624                 /*
7625                  * If multivector RSS is enabled, vector 0 does not handle
7626                  * rx or tx interrupts.  Don't allocate any resources for it.
7627                  */
7628                 if (!i && tg3_flag(tp, ENABLE_RSS))
7629                         continue;
7630
7631                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7632                                                    TG3_RX_RCB_RING_BYTES(tp),
7633                                                    &tnapi->rx_rcb_mapping,
7634                                                    GFP_KERNEL);
7635                 if (!tnapi->rx_rcb)
7636                         goto err_out;
7637
7638                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7639         }
7640
7641         return 0;
7642
7643 err_out:
7644         tg3_free_consistent(tp);
7645         return -ENOMEM;
7646 }
7647
7648 #define MAX_WAIT_CNT 1000
7649
7650 /* To stop a block, clear the enable bit and poll till it
7651  * clears.  tp->lock is held.
7652  */
7653 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7654 {
7655         unsigned int i;
7656         u32 val;
7657
7658         if (tg3_flag(tp, 5705_PLUS)) {
7659                 switch (ofs) {
7660                 case RCVLSC_MODE:
7661                 case DMAC_MODE:
7662                 case MBFREE_MODE:
7663                 case BUFMGR_MODE:
7664                 case MEMARB_MODE:
7665                         /* We can't enable/disable these bits of the
7666                          * 5705/5750, just say success.
7667                          */
7668                         return 0;
7669
7670                 default:
7671                         break;
7672                 }
7673         }
7674
7675         val = tr32(ofs);
7676         val &= ~enable_bit;
7677         tw32_f(ofs, val);
7678
7679         for (i = 0; i < MAX_WAIT_CNT; i++) {
7680                 udelay(100);
7681                 val = tr32(ofs);
7682                 if ((val & enable_bit) == 0)
7683                         break;
7684         }
7685
7686         if (i == MAX_WAIT_CNT && !silent) {
7687                 dev_err(&tp->pdev->dev,
7688                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7689                         ofs, enable_bit);
7690                 return -ENODEV;
7691         }
7692
7693         return 0;
7694 }
7695
7696 /* tp->lock is held. */
7697 static int tg3_abort_hw(struct tg3 *tp, int silent)
7698 {
7699         int i, err;
7700
7701         tg3_disable_ints(tp);
7702
7703         tp->rx_mode &= ~RX_MODE_ENABLE;
7704         tw32_f(MAC_RX_MODE, tp->rx_mode);
7705         udelay(10);
7706
7707         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7708         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7709         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7710         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7711         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7712         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7713
7714         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7715         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7716         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7717         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7718         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7719         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7720         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7721
7722         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7723         tw32_f(MAC_MODE, tp->mac_mode);
7724         udelay(40);
7725
7726         tp->tx_mode &= ~TX_MODE_ENABLE;
7727         tw32_f(MAC_TX_MODE, tp->tx_mode);
7728
7729         for (i = 0; i < MAX_WAIT_CNT; i++) {
7730                 udelay(100);
7731                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7732                         break;
7733         }
7734         if (i >= MAX_WAIT_CNT) {
7735                 dev_err(&tp->pdev->dev,
7736                         "%s timed out, TX_MODE_ENABLE will not clear "
7737                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7738                 err |= -ENODEV;
7739         }
7740
7741         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7742         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7743         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7744
7745         tw32(FTQ_RESET, 0xffffffff);
7746         tw32(FTQ_RESET, 0x00000000);
7747
7748         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7749         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7750
7751         for (i = 0; i < tp->irq_cnt; i++) {
7752                 struct tg3_napi *tnapi = &tp->napi[i];
7753                 if (tnapi->hw_status)
7754                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7755         }
7756
7757         return err;
7758 }
7759
7760 /* Save PCI command register before chip reset */
7761 static void tg3_save_pci_state(struct tg3 *tp)
7762 {
7763         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7764 }
7765
7766 /* Restore PCI state after chip reset */
7767 static void tg3_restore_pci_state(struct tg3 *tp)
7768 {
7769         u32 val;
7770
7771         /* Re-enable indirect register accesses. */
7772         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7773                                tp->misc_host_ctrl);
7774
7775         /* Set MAX PCI retry to zero. */
7776         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7777         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7778             tg3_flag(tp, PCIX_MODE))
7779                 val |= PCISTATE_RETRY_SAME_DMA;
7780         /* Allow reads and writes to the APE register and memory space. */
7781         if (tg3_flag(tp, ENABLE_APE))
7782                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7783                        PCISTATE_ALLOW_APE_SHMEM_WR |
7784                        PCISTATE_ALLOW_APE_PSPACE_WR;
7785         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7786
7787         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7788
7789         if (!tg3_flag(tp, PCI_EXPRESS)) {
7790                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7791                                       tp->pci_cacheline_sz);
7792                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7793                                       tp->pci_lat_timer);
7794         }
7795
7796         /* Make sure PCI-X relaxed ordering bit is clear. */
7797         if (tg3_flag(tp, PCIX_MODE)) {
7798                 u16 pcix_cmd;
7799
7800                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7801                                      &pcix_cmd);
7802                 pcix_cmd &= ~PCI_X_CMD_ERO;
7803                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7804                                       pcix_cmd);
7805         }
7806
7807         if (tg3_flag(tp, 5780_CLASS)) {
7808
7809                 /* Chip reset on 5780 will reset MSI enable bit,
7810                  * so need to restore it.
7811                  */
7812                 if (tg3_flag(tp, USING_MSI)) {
7813                         u16 ctrl;
7814
7815                         pci_read_config_word(tp->pdev,
7816                                              tp->msi_cap + PCI_MSI_FLAGS,
7817                                              &ctrl);
7818                         pci_write_config_word(tp->pdev,
7819                                               tp->msi_cap + PCI_MSI_FLAGS,
7820                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7821                         val = tr32(MSGINT_MODE);
7822                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7823                 }
7824         }
7825 }
7826
7827 /* tp->lock is held. */
7828 static int tg3_chip_reset(struct tg3 *tp)
7829 {
7830         u32 val;
7831         void (*write_op)(struct tg3 *, u32, u32);
7832         int i, err;
7833
7834         tg3_nvram_lock(tp);
7835
7836         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7837
7838         /* No matching tg3_nvram_unlock() after this because
7839          * chip reset below will undo the nvram lock.
7840          */
7841         tp->nvram_lock_cnt = 0;
7842
7843         /* GRC_MISC_CFG core clock reset will clear the memory
7844          * enable bit in PCI register 4 and the MSI enable bit
7845          * on some chips, so we save relevant registers here.
7846          */
7847         tg3_save_pci_state(tp);
7848
7849         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7850             tg3_flag(tp, 5755_PLUS))
7851                 tw32(GRC_FASTBOOT_PC, 0);
7852
7853         /*
7854          * We must avoid the readl() that normally takes place.
7855          * It locks machines, causes machine checks, and other
7856          * fun things.  So, temporarily disable the 5701
7857          * hardware workaround, while we do the reset.
7858          */
7859         write_op = tp->write32;
7860         if (write_op == tg3_write_flush_reg32)
7861                 tp->write32 = tg3_write32;
7862
7863         /* Prevent the irq handler from reading or writing PCI registers
7864          * during chip reset when the memory enable bit in the PCI command
7865          * register may be cleared.  The chip does not generate interrupt
7866          * at this time, but the irq handler may still be called due to irq
7867          * sharing or irqpoll.
7868          */
7869         tg3_flag_set(tp, CHIP_RESETTING);
7870         for (i = 0; i < tp->irq_cnt; i++) {
7871                 struct tg3_napi *tnapi = &tp->napi[i];
7872                 if (tnapi->hw_status) {
7873                         tnapi->hw_status->status = 0;
7874                         tnapi->hw_status->status_tag = 0;
7875                 }
7876                 tnapi->last_tag = 0;
7877                 tnapi->last_irq_tag = 0;
7878         }
7879         smp_mb();
7880
7881         for (i = 0; i < tp->irq_cnt; i++)
7882                 synchronize_irq(tp->napi[i].irq_vec);
7883
7884         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7885                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7886                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7887         }
7888
7889         /* do the reset */
7890         val = GRC_MISC_CFG_CORECLK_RESET;
7891
7892         if (tg3_flag(tp, PCI_EXPRESS)) {
7893                 /* Force PCIe 1.0a mode */
7894                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7895                     !tg3_flag(tp, 57765_PLUS) &&
7896                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7897                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7898                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7899
7900                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7901                         tw32(GRC_MISC_CFG, (1 << 29));
7902                         val |= (1 << 29);
7903                 }
7904         }
7905
7906         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7907                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7908                 tw32(GRC_VCPU_EXT_CTRL,
7909                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7910         }
7911
7912         /* Manage gphy power for all CPMU absent PCIe devices. */
7913         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7914                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7915
7916         tw32(GRC_MISC_CFG, val);
7917
7918         /* restore 5701 hardware bug workaround write method */
7919         tp->write32 = write_op;
7920
7921         /* Unfortunately, we have to delay before the PCI read back.
7922          * Some 575X chips even will not respond to a PCI cfg access
7923          * when the reset command is given to the chip.
7924          *
7925          * How do these hardware designers expect things to work
7926          * properly if the PCI write is posted for a long period
7927          * of time?  It is always necessary to have some method by
7928          * which a register read back can occur to push the write
7929          * out which does the reset.
7930          *
7931          * For most tg3 variants the trick below was working.
7932          * Ho hum...
7933          */
7934         udelay(120);
7935
7936         /* Flush PCI posted writes.  The normal MMIO registers
7937          * are inaccessible at this time so this is the only
7938          * way to make this reliably (actually, this is no longer
7939          * the case, see above).  I tried to use indirect
7940          * register read/write but this upset some 5701 variants.
7941          */
7942         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7943
7944         udelay(120);
7945
7946         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7947                 u16 val16;
7948
7949                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7950                         int i;
7951                         u32 cfg_val;
7952
7953                         /* Wait for link training to complete.  */
7954                         for (i = 0; i < 5000; i++)
7955                                 udelay(100);
7956
7957                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7958                         pci_write_config_dword(tp->pdev, 0xc4,
7959                                                cfg_val | (1 << 15));
7960                 }
7961
7962                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7963                 pci_read_config_word(tp->pdev,
7964                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7965                                      &val16);
7966                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7967                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7968                 /*
7969                  * Older PCIe devices only support the 128 byte
7970                  * MPS setting.  Enforce the restriction.
7971                  */
7972                 if (!tg3_flag(tp, CPMU_PRESENT))
7973                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7974                 pci_write_config_word(tp->pdev,
7975                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7976                                       val16);
7977
7978                 /* Clear error status */
7979                 pci_write_config_word(tp->pdev,
7980                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7981                                       PCI_EXP_DEVSTA_CED |
7982                                       PCI_EXP_DEVSTA_NFED |
7983                                       PCI_EXP_DEVSTA_FED |
7984                                       PCI_EXP_DEVSTA_URD);
7985         }
7986
7987         tg3_restore_pci_state(tp);
7988
7989         tg3_flag_clear(tp, CHIP_RESETTING);
7990         tg3_flag_clear(tp, ERROR_PROCESSED);
7991
7992         val = 0;
7993         if (tg3_flag(tp, 5780_CLASS))
7994                 val = tr32(MEMARB_MODE);
7995         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7996
7997         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7998                 tg3_stop_fw(tp);
7999                 tw32(0x5000, 0x400);
8000         }
8001
8002         tw32(GRC_MODE, tp->grc_mode);
8003
8004         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8005                 val = tr32(0xc4);
8006
8007                 tw32(0xc4, val | (1 << 15));
8008         }
8009
8010         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8011             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8012                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8013                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8014                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8015                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8016         }
8017
8018         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8019                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8020                 val = tp->mac_mode;
8021         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8022                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8023                 val = tp->mac_mode;
8024         } else
8025                 val = 0;
8026
8027         tw32_f(MAC_MODE, val);
8028         udelay(40);
8029
8030         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8031
8032         err = tg3_poll_fw(tp);
8033         if (err)
8034                 return err;
8035
8036         tg3_mdio_start(tp);
8037
8038         if (tg3_flag(tp, PCI_EXPRESS) &&
8039             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8040             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8041             !tg3_flag(tp, 57765_PLUS)) {
8042                 val = tr32(0x7c00);
8043
8044                 tw32(0x7c00, val | (1 << 25));
8045         }
8046
8047         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8048                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8049                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8050         }
8051
8052         /* Reprobe ASF enable state.  */
8053         tg3_flag_clear(tp, ENABLE_ASF);
8054         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8055         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8056         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8057                 u32 nic_cfg;
8058
8059                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8060                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8061                         tg3_flag_set(tp, ENABLE_ASF);
8062                         tp->last_event_jiffies = jiffies;
8063                         if (tg3_flag(tp, 5750_PLUS))
8064                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8065                 }
8066         }
8067
8068         return 0;
8069 }
8070
8071 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8072 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8073
8074 /* tp->lock is held. */
8075 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8076 {
8077         int err;
8078
8079         tg3_stop_fw(tp);
8080
8081         tg3_write_sig_pre_reset(tp, kind);
8082
8083         tg3_abort_hw(tp, silent);
8084         err = tg3_chip_reset(tp);
8085
8086         __tg3_set_mac_addr(tp, 0);
8087
8088         tg3_write_sig_legacy(tp, kind);
8089         tg3_write_sig_post_reset(tp, kind);
8090
8091         if (tp->hw_stats) {
8092                 /* Save the stats across chip resets... */
8093                 tg3_get_nstats(tp, &tp->net_stats_prev);
8094                 tg3_get_estats(tp, &tp->estats_prev);
8095
8096                 /* And make sure the next sample is new data */
8097                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8098         }
8099
8100         if (err)
8101                 return err;
8102
8103         return 0;
8104 }
8105
8106 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8107 {
8108         struct tg3 *tp = netdev_priv(dev);
8109         struct sockaddr *addr = p;
8110         int err = 0, skip_mac_1 = 0;
8111
8112         if (!is_valid_ether_addr(addr->sa_data))
8113                 return -EADDRNOTAVAIL;
8114
8115         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8116
8117         if (!netif_running(dev))
8118                 return 0;
8119
8120         if (tg3_flag(tp, ENABLE_ASF)) {
8121                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8122
8123                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8124                 addr0_low = tr32(MAC_ADDR_0_LOW);
8125                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8126                 addr1_low = tr32(MAC_ADDR_1_LOW);
8127
8128                 /* Skip MAC addr 1 if ASF is using it. */
8129                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8130                     !(addr1_high == 0 && addr1_low == 0))
8131                         skip_mac_1 = 1;
8132         }
8133         spin_lock_bh(&tp->lock);
8134         __tg3_set_mac_addr(tp, skip_mac_1);
8135         spin_unlock_bh(&tp->lock);
8136
8137         return err;
8138 }
8139
8140 /* tp->lock is held. */
8141 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8142                            dma_addr_t mapping, u32 maxlen_flags,
8143                            u32 nic_addr)
8144 {
8145         tg3_write_mem(tp,
8146                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8147                       ((u64) mapping >> 32));
8148         tg3_write_mem(tp,
8149                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8150                       ((u64) mapping & 0xffffffff));
8151         tg3_write_mem(tp,
8152                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8153                        maxlen_flags);
8154
8155         if (!tg3_flag(tp, 5705_PLUS))
8156                 tg3_write_mem(tp,
8157                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8158                               nic_addr);
8159 }
8160
8161 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8162 {
8163         int i;
8164
8165         if (!tg3_flag(tp, ENABLE_TSS)) {
8166                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8167                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8168                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8169         } else {
8170                 tw32(HOSTCC_TXCOL_TICKS, 0);
8171                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8172                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8173         }
8174
8175         if (!tg3_flag(tp, ENABLE_RSS)) {
8176                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8177                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8178                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8179         } else {
8180                 tw32(HOSTCC_RXCOL_TICKS, 0);
8181                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8182                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8183         }
8184
8185         if (!tg3_flag(tp, 5705_PLUS)) {
8186                 u32 val = ec->stats_block_coalesce_usecs;
8187
8188                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8189                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8190
8191                 if (!netif_carrier_ok(tp->dev))
8192                         val = 0;
8193
8194                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8195         }
8196
8197         for (i = 0; i < tp->irq_cnt - 1; i++) {
8198                 u32 reg;
8199
8200                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8201                 tw32(reg, ec->rx_coalesce_usecs);
8202                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8203                 tw32(reg, ec->rx_max_coalesced_frames);
8204                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8205                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8206
8207                 if (tg3_flag(tp, ENABLE_TSS)) {
8208                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8209                         tw32(reg, ec->tx_coalesce_usecs);
8210                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8211                         tw32(reg, ec->tx_max_coalesced_frames);
8212                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8213                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8214                 }
8215         }
8216
8217         for (; i < tp->irq_max - 1; i++) {
8218                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8219                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8220                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8221
8222                 if (tg3_flag(tp, ENABLE_TSS)) {
8223                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8224                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8225                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8226                 }
8227         }
8228 }
8229
8230 /* tp->lock is held. */
8231 static void tg3_rings_reset(struct tg3 *tp)
8232 {
8233         int i;
8234         u32 stblk, txrcb, rxrcb, limit;
8235         struct tg3_napi *tnapi = &tp->napi[0];
8236
8237         /* Disable all transmit rings but the first. */
8238         if (!tg3_flag(tp, 5705_PLUS))
8239                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8240         else if (tg3_flag(tp, 5717_PLUS))
8241                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8242         else if (tg3_flag(tp, 57765_CLASS))
8243                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8244         else
8245                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8246
8247         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8248              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8249                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8250                               BDINFO_FLAGS_DISABLED);
8251
8252
8253         /* Disable all receive return rings but the first. */
8254         if (tg3_flag(tp, 5717_PLUS))
8255                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8256         else if (!tg3_flag(tp, 5705_PLUS))
8257                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8258         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8259                  tg3_flag(tp, 57765_CLASS))
8260                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8261         else
8262                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8263
8264         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8265              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8266                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8267                               BDINFO_FLAGS_DISABLED);
8268
8269         /* Disable interrupts */
8270         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8271         tp->napi[0].chk_msi_cnt = 0;
8272         tp->napi[0].last_rx_cons = 0;
8273         tp->napi[0].last_tx_cons = 0;
8274
8275         /* Zero mailbox registers. */
8276         if (tg3_flag(tp, SUPPORT_MSIX)) {
8277                 for (i = 1; i < tp->irq_max; i++) {
8278                         tp->napi[i].tx_prod = 0;
8279                         tp->napi[i].tx_cons = 0;
8280                         if (tg3_flag(tp, ENABLE_TSS))
8281                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8282                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8283                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8284                         tp->napi[i].chk_msi_cnt = 0;
8285                         tp->napi[i].last_rx_cons = 0;
8286                         tp->napi[i].last_tx_cons = 0;
8287                 }
8288                 if (!tg3_flag(tp, ENABLE_TSS))
8289                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8290         } else {
8291                 tp->napi[0].tx_prod = 0;
8292                 tp->napi[0].tx_cons = 0;
8293                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8294                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8295         }
8296
8297         /* Make sure the NIC-based send BD rings are disabled. */
8298         if (!tg3_flag(tp, 5705_PLUS)) {
8299                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8300                 for (i = 0; i < 16; i++)
8301                         tw32_tx_mbox(mbox + i * 8, 0);
8302         }
8303
8304         txrcb = NIC_SRAM_SEND_RCB;
8305         rxrcb = NIC_SRAM_RCV_RET_RCB;
8306
8307         /* Clear status block in ram. */
8308         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8309
8310         /* Set status block DMA address */
8311         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8312              ((u64) tnapi->status_mapping >> 32));
8313         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8314              ((u64) tnapi->status_mapping & 0xffffffff));
8315
8316         if (tnapi->tx_ring) {
8317                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8318                                (TG3_TX_RING_SIZE <<
8319                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8320                                NIC_SRAM_TX_BUFFER_DESC);
8321                 txrcb += TG3_BDINFO_SIZE;
8322         }
8323
8324         if (tnapi->rx_rcb) {
8325                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8326                                (tp->rx_ret_ring_mask + 1) <<
8327                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8328                 rxrcb += TG3_BDINFO_SIZE;
8329         }
8330
8331         stblk = HOSTCC_STATBLCK_RING1;
8332
8333         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8334                 u64 mapping = (u64)tnapi->status_mapping;
8335                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8336                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8337
8338                 /* Clear status block in ram. */
8339                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8340
8341                 if (tnapi->tx_ring) {
8342                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8343                                        (TG3_TX_RING_SIZE <<
8344                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8345                                        NIC_SRAM_TX_BUFFER_DESC);
8346                         txrcb += TG3_BDINFO_SIZE;
8347                 }
8348
8349                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8350                                ((tp->rx_ret_ring_mask + 1) <<
8351                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8352
8353                 stblk += 8;
8354                 rxrcb += TG3_BDINFO_SIZE;
8355         }
8356 }
8357
8358 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8359 {
8360         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8361
8362         if (!tg3_flag(tp, 5750_PLUS) ||
8363             tg3_flag(tp, 5780_CLASS) ||
8364             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8365             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8366             tg3_flag(tp, 57765_PLUS))
8367                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8368         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8369                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8370                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8371         else
8372                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8373
8374         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8375         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8376
8377         val = min(nic_rep_thresh, host_rep_thresh);
8378         tw32(RCVBDI_STD_THRESH, val);
8379
8380         if (tg3_flag(tp, 57765_PLUS))
8381                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8382
8383         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8384                 return;
8385
8386         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8387
8388         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8389
8390         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8391         tw32(RCVBDI_JUMBO_THRESH, val);
8392
8393         if (tg3_flag(tp, 57765_PLUS))
8394                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8395 }
8396
8397 static inline u32 calc_crc(unsigned char *buf, int len)
8398 {
8399         u32 reg;
8400         u32 tmp;
8401         int j, k;
8402
8403         reg = 0xffffffff;
8404
8405         for (j = 0; j < len; j++) {
8406                 reg ^= buf[j];
8407
8408                 for (k = 0; k < 8; k++) {
8409                         tmp = reg & 0x01;
8410
8411                         reg >>= 1;
8412
8413                         if (tmp)
8414                                 reg ^= 0xedb88320;
8415                 }
8416         }
8417
8418         return ~reg;
8419 }
8420
8421 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8422 {
8423         /* accept or reject all multicast frames */
8424         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8425         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8426         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8427         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8428 }
8429
8430 static void __tg3_set_rx_mode(struct net_device *dev)
8431 {
8432         struct tg3 *tp = netdev_priv(dev);
8433         u32 rx_mode;
8434
8435         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8436                                   RX_MODE_KEEP_VLAN_TAG);
8437
8438 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8439         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8440          * flag clear.
8441          */
8442         if (!tg3_flag(tp, ENABLE_ASF))
8443                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8444 #endif
8445
8446         if (dev->flags & IFF_PROMISC) {
8447                 /* Promiscuous mode. */
8448                 rx_mode |= RX_MODE_PROMISC;
8449         } else if (dev->flags & IFF_ALLMULTI) {
8450                 /* Accept all multicast. */
8451                 tg3_set_multi(tp, 1);
8452         } else if (netdev_mc_empty(dev)) {
8453                 /* Reject all multicast. */
8454                 tg3_set_multi(tp, 0);
8455         } else {
8456                 /* Accept one or more multicast(s). */
8457                 struct netdev_hw_addr *ha;
8458                 u32 mc_filter[4] = { 0, };
8459                 u32 regidx;
8460                 u32 bit;
8461                 u32 crc;
8462
8463                 netdev_for_each_mc_addr(ha, dev) {
8464                         crc = calc_crc(ha->addr, ETH_ALEN);
8465                         bit = ~crc & 0x7f;
8466                         regidx = (bit & 0x60) >> 5;
8467                         bit &= 0x1f;
8468                         mc_filter[regidx] |= (1 << bit);
8469                 }
8470
8471                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8472                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8473                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8474                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8475         }
8476
8477         if (rx_mode != tp->rx_mode) {
8478                 tp->rx_mode = rx_mode;
8479                 tw32_f(MAC_RX_MODE, rx_mode);
8480                 udelay(10);
8481         }
8482 }
8483
8484 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8485 {
8486         int i;
8487
8488         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8489                 tp->rss_ind_tbl[i] =
8490                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8491 }
8492
8493 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8494 {
8495         int i;
8496
8497         if (!tg3_flag(tp, SUPPORT_MSIX))
8498                 return;
8499
8500         if (tp->irq_cnt <= 2) {
8501                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8502                 return;
8503         }
8504
8505         /* Validate table against current IRQ count */
8506         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8507                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8508                         break;
8509         }
8510
8511         if (i != TG3_RSS_INDIR_TBL_SIZE)
8512                 tg3_rss_init_dflt_indir_tbl(tp);
8513 }
8514
8515 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8516 {
8517         int i = 0;
8518         u32 reg = MAC_RSS_INDIR_TBL_0;
8519
8520         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8521                 u32 val = tp->rss_ind_tbl[i];
8522                 i++;
8523                 for (; i % 8; i++) {
8524                         val <<= 4;
8525                         val |= tp->rss_ind_tbl[i];
8526                 }
8527                 tw32(reg, val);
8528                 reg += 4;
8529         }
8530 }
8531
8532 /* tp->lock is held. */
8533 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8534 {
8535         u32 val, rdmac_mode;
8536         int i, err, limit;
8537         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8538
8539         tg3_disable_ints(tp);
8540
8541         tg3_stop_fw(tp);
8542
8543         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8544
8545         if (tg3_flag(tp, INIT_COMPLETE))
8546                 tg3_abort_hw(tp, 1);
8547
8548         /* Enable MAC control of LPI */
8549         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8550                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8551                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8552                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8553
8554                 tw32_f(TG3_CPMU_EEE_CTRL,
8555                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8556
8557                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8558                       TG3_CPMU_EEEMD_LPI_IN_TX |
8559                       TG3_CPMU_EEEMD_LPI_IN_RX |
8560                       TG3_CPMU_EEEMD_EEE_ENABLE;
8561
8562                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8563                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8564
8565                 if (tg3_flag(tp, ENABLE_APE))
8566                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8567
8568                 tw32_f(TG3_CPMU_EEE_MODE, val);
8569
8570                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8571                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8572                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8573
8574                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8575                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8576                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8577         }
8578
8579         if (reset_phy)
8580                 tg3_phy_reset(tp);
8581
8582         err = tg3_chip_reset(tp);
8583         if (err)
8584                 return err;
8585
8586         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8587
8588         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8589                 val = tr32(TG3_CPMU_CTRL);
8590                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8591                 tw32(TG3_CPMU_CTRL, val);
8592
8593                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8594                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8595                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8596                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8597
8598                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8599                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8600                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8601                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8602
8603                 val = tr32(TG3_CPMU_HST_ACC);
8604                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8605                 val |= CPMU_HST_ACC_MACCLK_6_25;
8606                 tw32(TG3_CPMU_HST_ACC, val);
8607         }
8608
8609         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8610                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8611                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8612                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8613                 tw32(PCIE_PWR_MGMT_THRESH, val);
8614
8615                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8616                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8617
8618                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8619
8620                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8621                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8622         }
8623
8624         if (tg3_flag(tp, L1PLLPD_EN)) {
8625                 u32 grc_mode = tr32(GRC_MODE);
8626
8627                 /* Access the lower 1K of PL PCIE block registers. */
8628                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8629                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8630
8631                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8632                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8633                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8634
8635                 tw32(GRC_MODE, grc_mode);
8636         }
8637
8638         if (tg3_flag(tp, 57765_CLASS)) {
8639                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8640                         u32 grc_mode = tr32(GRC_MODE);
8641
8642                         /* Access the lower 1K of PL PCIE block registers. */
8643                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8644                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8645
8646                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8647                                    TG3_PCIE_PL_LO_PHYCTL5);
8648                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8649                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8650
8651                         tw32(GRC_MODE, grc_mode);
8652                 }
8653
8654                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8655                         u32 grc_mode = tr32(GRC_MODE);
8656
8657                         /* Access the lower 1K of DL PCIE block registers. */
8658                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8659                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8660
8661                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8662                                    TG3_PCIE_DL_LO_FTSMAX);
8663                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8664                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8665                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8666
8667                         tw32(GRC_MODE, grc_mode);
8668                 }
8669
8670                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8671                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8672                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8673                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8674         }
8675
8676         /* This works around an issue with Athlon chipsets on
8677          * B3 tigon3 silicon.  This bit has no effect on any
8678          * other revision.  But do not set this on PCI Express
8679          * chips and don't even touch the clocks if the CPMU is present.
8680          */
8681         if (!tg3_flag(tp, CPMU_PRESENT)) {
8682                 if (!tg3_flag(tp, PCI_EXPRESS))
8683                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8684                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8685         }
8686
8687         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8688             tg3_flag(tp, PCIX_MODE)) {
8689                 val = tr32(TG3PCI_PCISTATE);
8690                 val |= PCISTATE_RETRY_SAME_DMA;
8691                 tw32(TG3PCI_PCISTATE, val);
8692         }
8693
8694         if (tg3_flag(tp, ENABLE_APE)) {
8695                 /* Allow reads and writes to the
8696                  * APE register and memory space.
8697                  */
8698                 val = tr32(TG3PCI_PCISTATE);
8699                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8700                        PCISTATE_ALLOW_APE_SHMEM_WR |
8701                        PCISTATE_ALLOW_APE_PSPACE_WR;
8702                 tw32(TG3PCI_PCISTATE, val);
8703         }
8704
8705         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8706                 /* Enable some hw fixes.  */
8707                 val = tr32(TG3PCI_MSI_DATA);
8708                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8709                 tw32(TG3PCI_MSI_DATA, val);
8710         }
8711
8712         /* Descriptor ring init may make accesses to the
8713          * NIC SRAM area to setup the TX descriptors, so we
8714          * can only do this after the hardware has been
8715          * successfully reset.
8716          */
8717         err = tg3_init_rings(tp);
8718         if (err)
8719                 return err;
8720
8721         if (tg3_flag(tp, 57765_PLUS)) {
8722                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8723                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8724                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8725                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8726                 if (!tg3_flag(tp, 57765_CLASS) &&
8727                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8728                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8729                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8730         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8731                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8732                 /* This value is determined during the probe time DMA
8733                  * engine test, tg3_test_dma.
8734                  */
8735                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8736         }
8737
8738         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8739                           GRC_MODE_4X_NIC_SEND_RINGS |
8740                           GRC_MODE_NO_TX_PHDR_CSUM |
8741                           GRC_MODE_NO_RX_PHDR_CSUM);
8742         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8743
8744         /* Pseudo-header checksum is done by hardware logic and not
8745          * the offload processers, so make the chip do the pseudo-
8746          * header checksums on receive.  For transmit it is more
8747          * convenient to do the pseudo-header checksum in software
8748          * as Linux does that on transmit for us in all cases.
8749          */
8750         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8751
8752         tw32(GRC_MODE,
8753              tp->grc_mode |
8754              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8755
8756         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8757         val = tr32(GRC_MISC_CFG);
8758         val &= ~0xff;
8759         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8760         tw32(GRC_MISC_CFG, val);
8761
8762         /* Initialize MBUF/DESC pool. */
8763         if (tg3_flag(tp, 5750_PLUS)) {
8764                 /* Do nothing.  */
8765         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8766                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8767                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8768                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8769                 else
8770                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8771                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8772                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8773         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8774                 int fw_len;
8775
8776                 fw_len = tp->fw_len;
8777                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8778                 tw32(BUFMGR_MB_POOL_ADDR,
8779                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8780                 tw32(BUFMGR_MB_POOL_SIZE,
8781                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8782         }
8783
8784         if (tp->dev->mtu <= ETH_DATA_LEN) {
8785                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8786                      tp->bufmgr_config.mbuf_read_dma_low_water);
8787                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8788                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8789                 tw32(BUFMGR_MB_HIGH_WATER,
8790                      tp->bufmgr_config.mbuf_high_water);
8791         } else {
8792                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8793                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8794                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8795                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8796                 tw32(BUFMGR_MB_HIGH_WATER,
8797                      tp->bufmgr_config.mbuf_high_water_jumbo);
8798         }
8799         tw32(BUFMGR_DMA_LOW_WATER,
8800              tp->bufmgr_config.dma_low_water);
8801         tw32(BUFMGR_DMA_HIGH_WATER,
8802              tp->bufmgr_config.dma_high_water);
8803
8804         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8805         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8806                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8807         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8808             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8809             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8810                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8811         tw32(BUFMGR_MODE, val);
8812         for (i = 0; i < 2000; i++) {
8813                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8814                         break;
8815                 udelay(10);
8816         }
8817         if (i >= 2000) {
8818                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8819                 return -ENODEV;
8820         }
8821
8822         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8823                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8824
8825         tg3_setup_rxbd_thresholds(tp);
8826
8827         /* Initialize TG3_BDINFO's at:
8828          *  RCVDBDI_STD_BD:     standard eth size rx ring
8829          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8830          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8831          *
8832          * like so:
8833          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8834          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8835          *                              ring attribute flags
8836          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8837          *
8838          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8839          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8840          *
8841          * The size of each ring is fixed in the firmware, but the location is
8842          * configurable.
8843          */
8844         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8845              ((u64) tpr->rx_std_mapping >> 32));
8846         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8847              ((u64) tpr->rx_std_mapping & 0xffffffff));
8848         if (!tg3_flag(tp, 5717_PLUS))
8849                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8850                      NIC_SRAM_RX_BUFFER_DESC);
8851
8852         /* Disable the mini ring */
8853         if (!tg3_flag(tp, 5705_PLUS))
8854                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8855                      BDINFO_FLAGS_DISABLED);
8856
8857         /* Program the jumbo buffer descriptor ring control
8858          * blocks on those devices that have them.
8859          */
8860         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8861             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8862
8863                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8864                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8865                              ((u64) tpr->rx_jmb_mapping >> 32));
8866                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8867                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8868                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8869                               BDINFO_FLAGS_MAXLEN_SHIFT;
8870                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8871                              val | BDINFO_FLAGS_USE_EXT_RECV);
8872                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8873                             tg3_flag(tp, 57765_CLASS))
8874                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8875                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8876                 } else {
8877                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8878                              BDINFO_FLAGS_DISABLED);
8879                 }
8880
8881                 if (tg3_flag(tp, 57765_PLUS)) {
8882                         val = TG3_RX_STD_RING_SIZE(tp);
8883                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8884                         val |= (TG3_RX_STD_DMA_SZ << 2);
8885                 } else
8886                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8887         } else
8888                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8889
8890         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8891
8892         tpr->rx_std_prod_idx = tp->rx_pending;
8893         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8894
8895         tpr->rx_jmb_prod_idx =
8896                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8897         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8898
8899         tg3_rings_reset(tp);
8900
8901         /* Initialize MAC address and backoff seed. */
8902         __tg3_set_mac_addr(tp, 0);
8903
8904         /* MTU + ethernet header + FCS + optional VLAN tag */
8905         tw32(MAC_RX_MTU_SIZE,
8906              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8907
8908         /* The slot time is changed by tg3_setup_phy if we
8909          * run at gigabit with half duplex.
8910          */
8911         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8912               (6 << TX_LENGTHS_IPG_SHIFT) |
8913               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8914
8915         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8916                 val |= tr32(MAC_TX_LENGTHS) &
8917                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8918                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8919
8920         tw32(MAC_TX_LENGTHS, val);
8921
8922         /* Receive rules. */
8923         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8924         tw32(RCVLPC_CONFIG, 0x0181);
8925
8926         /* Calculate RDMAC_MODE setting early, we need it to determine
8927          * the RCVLPC_STATE_ENABLE mask.
8928          */
8929         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8930                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8931                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8932                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8933                       RDMAC_MODE_LNGREAD_ENAB);
8934
8935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8936                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8937
8938         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8939             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8940             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8941                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8942                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8943                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8944
8945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8946             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8947                 if (tg3_flag(tp, TSO_CAPABLE) &&
8948                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8949                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8950                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8951                            !tg3_flag(tp, IS_5788)) {
8952                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8953                 }
8954         }
8955
8956         if (tg3_flag(tp, PCI_EXPRESS))
8957                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8958
8959         if (tg3_flag(tp, HW_TSO_1) ||
8960             tg3_flag(tp, HW_TSO_2) ||
8961             tg3_flag(tp, HW_TSO_3))
8962                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8963
8964         if (tg3_flag(tp, 57765_PLUS) ||
8965             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8966             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8967                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8968
8969         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8970                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8971
8972         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8973             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8974             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8975             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8976             tg3_flag(tp, 57765_PLUS)) {
8977                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8978                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8979                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8980                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8981                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8982                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8983                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8984                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8985                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8986                 }
8987                 tw32(TG3_RDMA_RSRVCTRL_REG,
8988                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8989         }
8990
8991         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8992             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8993                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8994                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8995                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8996                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8997         }
8998
8999         /* Receive/send statistics. */
9000         if (tg3_flag(tp, 5750_PLUS)) {
9001                 val = tr32(RCVLPC_STATS_ENABLE);
9002                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9003                 tw32(RCVLPC_STATS_ENABLE, val);
9004         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9005                    tg3_flag(tp, TSO_CAPABLE)) {
9006                 val = tr32(RCVLPC_STATS_ENABLE);
9007                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9008                 tw32(RCVLPC_STATS_ENABLE, val);
9009         } else {
9010                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9011         }
9012         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9013         tw32(SNDDATAI_STATSENAB, 0xffffff);
9014         tw32(SNDDATAI_STATSCTRL,
9015              (SNDDATAI_SCTRL_ENABLE |
9016               SNDDATAI_SCTRL_FASTUPD));
9017
9018         /* Setup host coalescing engine. */
9019         tw32(HOSTCC_MODE, 0);
9020         for (i = 0; i < 2000; i++) {
9021                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9022                         break;
9023                 udelay(10);
9024         }
9025
9026         __tg3_set_coalesce(tp, &tp->coal);
9027
9028         if (!tg3_flag(tp, 5705_PLUS)) {
9029                 /* Status/statistics block address.  See tg3_timer,
9030                  * the tg3_periodic_fetch_stats call there, and
9031                  * tg3_get_stats to see how this works for 5705/5750 chips.
9032                  */
9033                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9034                      ((u64) tp->stats_mapping >> 32));
9035                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9036                      ((u64) tp->stats_mapping & 0xffffffff));
9037                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9038
9039                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9040
9041                 /* Clear statistics and status block memory areas */
9042                 for (i = NIC_SRAM_STATS_BLK;
9043                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9044                      i += sizeof(u32)) {
9045                         tg3_write_mem(tp, i, 0);
9046                         udelay(40);
9047                 }
9048         }
9049
9050         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9051
9052         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9053         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9054         if (!tg3_flag(tp, 5705_PLUS))
9055                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9056
9057         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9058                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9059                 /* reset to prevent losing 1st rx packet intermittently */
9060                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9061                 udelay(10);
9062         }
9063
9064         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9065                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9066                         MAC_MODE_FHDE_ENABLE;
9067         if (tg3_flag(tp, ENABLE_APE))
9068                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9069         if (!tg3_flag(tp, 5705_PLUS) &&
9070             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9071             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9072                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9073         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9074         udelay(40);
9075
9076         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9077          * If TG3_FLAG_IS_NIC is zero, we should read the
9078          * register to preserve the GPIO settings for LOMs. The GPIOs,
9079          * whether used as inputs or outputs, are set by boot code after
9080          * reset.
9081          */
9082         if (!tg3_flag(tp, IS_NIC)) {
9083                 u32 gpio_mask;
9084
9085                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9086                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9087                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9088
9089                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9090                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9091                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9092
9093                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9094                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9095
9096                 tp->grc_local_ctrl &= ~gpio_mask;
9097                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9098
9099                 /* GPIO1 must be driven high for eeprom write protect */
9100                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9101                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9102                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9103         }
9104         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9105         udelay(100);
9106
9107         if (tg3_flag(tp, USING_MSIX)) {
9108                 val = tr32(MSGINT_MODE);
9109                 val |= MSGINT_MODE_ENABLE;
9110                 if (tp->irq_cnt > 1)
9111                         val |= MSGINT_MODE_MULTIVEC_EN;
9112                 if (!tg3_flag(tp, 1SHOT_MSI))
9113                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9114                 tw32(MSGINT_MODE, val);
9115         }
9116
9117         if (!tg3_flag(tp, 5705_PLUS)) {
9118                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9119                 udelay(40);
9120         }
9121
9122         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9123                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9124                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9125                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9126                WDMAC_MODE_LNGREAD_ENAB);
9127
9128         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9129             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9130                 if (tg3_flag(tp, TSO_CAPABLE) &&
9131                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9132                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9133                         /* nothing */
9134                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9135                            !tg3_flag(tp, IS_5788)) {
9136                         val |= WDMAC_MODE_RX_ACCEL;
9137                 }
9138         }
9139
9140         /* Enable host coalescing bug fix */
9141         if (tg3_flag(tp, 5755_PLUS))
9142                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9143
9144         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9145                 val |= WDMAC_MODE_BURST_ALL_DATA;
9146
9147         tw32_f(WDMAC_MODE, val);
9148         udelay(40);
9149
9150         if (tg3_flag(tp, PCIX_MODE)) {
9151                 u16 pcix_cmd;
9152
9153                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9154                                      &pcix_cmd);
9155                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9156                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9157                         pcix_cmd |= PCI_X_CMD_READ_2K;
9158                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9159                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9160                         pcix_cmd |= PCI_X_CMD_READ_2K;
9161                 }
9162                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9163                                       pcix_cmd);
9164         }
9165
9166         tw32_f(RDMAC_MODE, rdmac_mode);
9167         udelay(40);
9168
9169         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9170         if (!tg3_flag(tp, 5705_PLUS))
9171                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9172
9173         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9174                 tw32(SNDDATAC_MODE,
9175                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9176         else
9177                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9178
9179         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9180         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9181         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9182         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9183                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9184         tw32(RCVDBDI_MODE, val);
9185         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9186         if (tg3_flag(tp, HW_TSO_1) ||
9187             tg3_flag(tp, HW_TSO_2) ||
9188             tg3_flag(tp, HW_TSO_3))
9189                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9190         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9191         if (tg3_flag(tp, ENABLE_TSS))
9192                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9193         tw32(SNDBDI_MODE, val);
9194         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9195
9196         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9197                 err = tg3_load_5701_a0_firmware_fix(tp);
9198                 if (err)
9199                         return err;
9200         }
9201
9202         if (tg3_flag(tp, TSO_CAPABLE)) {
9203                 err = tg3_load_tso_firmware(tp);
9204                 if (err)
9205                         return err;
9206         }
9207
9208         tp->tx_mode = TX_MODE_ENABLE;
9209
9210         if (tg3_flag(tp, 5755_PLUS) ||
9211             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9212                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9213
9214         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9215                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9216                 tp->tx_mode &= ~val;
9217                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9218         }
9219
9220         tw32_f(MAC_TX_MODE, tp->tx_mode);
9221         udelay(100);
9222
9223         if (tg3_flag(tp, ENABLE_RSS)) {
9224                 tg3_rss_write_indir_tbl(tp);
9225
9226                 /* Setup the "secret" hash key. */
9227                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9228                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9229                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9230                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9231                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9232                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9233                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9234                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9235                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9236                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9237         }
9238
9239         tp->rx_mode = RX_MODE_ENABLE;
9240         if (tg3_flag(tp, 5755_PLUS))
9241                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9242
9243         if (tg3_flag(tp, ENABLE_RSS))
9244                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9245                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9246                                RX_MODE_RSS_IPV6_HASH_EN |
9247                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9248                                RX_MODE_RSS_IPV4_HASH_EN |
9249                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9250
9251         tw32_f(MAC_RX_MODE, tp->rx_mode);
9252         udelay(10);
9253
9254         tw32(MAC_LED_CTRL, tp->led_ctrl);
9255
9256         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9257         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9258                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9259                 udelay(10);
9260         }
9261         tw32_f(MAC_RX_MODE, tp->rx_mode);
9262         udelay(10);
9263
9264         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9265                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9266                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9267                         /* Set drive transmission level to 1.2V  */
9268                         /* only if the signal pre-emphasis bit is not set  */
9269                         val = tr32(MAC_SERDES_CFG);
9270                         val &= 0xfffff000;
9271                         val |= 0x880;
9272                         tw32(MAC_SERDES_CFG, val);
9273                 }
9274                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9275                         tw32(MAC_SERDES_CFG, 0x616000);
9276         }
9277
9278         /* Prevent chip from dropping frames when flow control
9279          * is enabled.
9280          */
9281         if (tg3_flag(tp, 57765_CLASS))
9282                 val = 1;
9283         else
9284                 val = 2;
9285         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9286
9287         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9288             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9289                 /* Use hardware link auto-negotiation */
9290                 tg3_flag_set(tp, HW_AUTONEG);
9291         }
9292
9293         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9294             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9295                 u32 tmp;
9296
9297                 tmp = tr32(SERDES_RX_CTRL);
9298                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9299                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9300                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9301                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9302         }
9303
9304         if (!tg3_flag(tp, USE_PHYLIB)) {
9305                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9306                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9307
9308                 err = tg3_setup_phy(tp, 0);
9309                 if (err)
9310                         return err;
9311
9312                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9313                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9314                         u32 tmp;
9315
9316                         /* Clear CRC stats. */
9317                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9318                                 tg3_writephy(tp, MII_TG3_TEST1,
9319                                              tmp | MII_TG3_TEST1_CRC_EN);
9320                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9321                         }
9322                 }
9323         }
9324
9325         __tg3_set_rx_mode(tp->dev);
9326
9327         /* Initialize receive rules. */
9328         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9329         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9330         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9331         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9332
9333         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9334                 limit = 8;
9335         else
9336                 limit = 16;
9337         if (tg3_flag(tp, ENABLE_ASF))
9338                 limit -= 4;
9339         switch (limit) {
9340         case 16:
9341                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9342         case 15:
9343                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9344         case 14:
9345                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9346         case 13:
9347                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9348         case 12:
9349                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9350         case 11:
9351                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9352         case 10:
9353                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9354         case 9:
9355                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9356         case 8:
9357                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9358         case 7:
9359                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9360         case 6:
9361                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9362         case 5:
9363                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9364         case 4:
9365                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9366         case 3:
9367                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9368         case 2:
9369         case 1:
9370
9371         default:
9372                 break;
9373         }
9374
9375         if (tg3_flag(tp, ENABLE_APE))
9376                 /* Write our heartbeat update interval to APE. */
9377                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9378                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9379
9380         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9381
9382         return 0;
9383 }
9384
9385 /* Called at device open time to get the chip ready for
9386  * packet processing.  Invoked with tp->lock held.
9387  */
9388 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9389 {
9390         tg3_switch_clocks(tp);
9391
9392         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9393
9394         return tg3_reset_hw(tp, reset_phy);
9395 }
9396
9397 #define TG3_STAT_ADD32(PSTAT, REG) \
9398 do {    u32 __val = tr32(REG); \
9399         (PSTAT)->low += __val; \
9400         if ((PSTAT)->low < __val) \
9401                 (PSTAT)->high += 1; \
9402 } while (0)
9403
9404 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9405 {
9406         struct tg3_hw_stats *sp = tp->hw_stats;
9407
9408         if (!netif_carrier_ok(tp->dev))
9409                 return;
9410
9411         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9412         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9413         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9414         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9415         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9416         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9417         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9418         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9419         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9420         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9421         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9422         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9423         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9424
9425         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9426         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9427         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9428         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9429         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9430         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9431         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9432         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9433         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9434         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9435         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9436         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9437         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9438         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9439
9440         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9441         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9442             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9443             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9444                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9445         } else {
9446                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9447                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9448                 if (val) {
9449                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9450                         sp->rx_discards.low += val;
9451                         if (sp->rx_discards.low < val)
9452                                 sp->rx_discards.high += 1;
9453                 }
9454                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9455         }
9456         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9457 }
9458
9459 static void tg3_chk_missed_msi(struct tg3 *tp)
9460 {
9461         u32 i;
9462
9463         for (i = 0; i < tp->irq_cnt; i++) {
9464                 struct tg3_napi *tnapi = &tp->napi[i];
9465
9466                 if (tg3_has_work(tnapi)) {
9467                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9468                             tnapi->last_tx_cons == tnapi->tx_cons) {
9469                                 if (tnapi->chk_msi_cnt < 1) {
9470                                         tnapi->chk_msi_cnt++;
9471                                         return;
9472                                 }
9473                                 tg3_msi(0, tnapi);
9474                         }
9475                 }
9476                 tnapi->chk_msi_cnt = 0;
9477                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9478                 tnapi->last_tx_cons = tnapi->tx_cons;
9479         }
9480 }
9481
9482 static void tg3_timer(unsigned long __opaque)
9483 {
9484         struct tg3 *tp = (struct tg3 *) __opaque;
9485
9486         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9487                 goto restart_timer;
9488
9489         spin_lock(&tp->lock);
9490
9491         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9492             tg3_flag(tp, 57765_CLASS))
9493                 tg3_chk_missed_msi(tp);
9494
9495         if (!tg3_flag(tp, TAGGED_STATUS)) {
9496                 /* All of this garbage is because when using non-tagged
9497                  * IRQ status the mailbox/status_block protocol the chip
9498                  * uses with the cpu is race prone.
9499                  */
9500                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9501                         tw32(GRC_LOCAL_CTRL,
9502                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9503                 } else {
9504                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9505                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9506                 }
9507
9508                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9509                         spin_unlock(&tp->lock);
9510                         tg3_reset_task_schedule(tp);
9511                         goto restart_timer;
9512                 }
9513         }
9514
9515         /* This part only runs once per second. */
9516         if (!--tp->timer_counter) {
9517                 if (tg3_flag(tp, 5705_PLUS))
9518                         tg3_periodic_fetch_stats(tp);
9519
9520                 if (tp->setlpicnt && !--tp->setlpicnt)
9521                         tg3_phy_eee_enable(tp);
9522
9523                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9524                         u32 mac_stat;
9525                         int phy_event;
9526
9527                         mac_stat = tr32(MAC_STATUS);
9528
9529                         phy_event = 0;
9530                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9531                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9532                                         phy_event = 1;
9533                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9534                                 phy_event = 1;
9535
9536                         if (phy_event)
9537                                 tg3_setup_phy(tp, 0);
9538                 } else if (tg3_flag(tp, POLL_SERDES)) {
9539                         u32 mac_stat = tr32(MAC_STATUS);
9540                         int need_setup = 0;
9541
9542                         if (netif_carrier_ok(tp->dev) &&
9543                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9544                                 need_setup = 1;
9545                         }
9546                         if (!netif_carrier_ok(tp->dev) &&
9547                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9548                                          MAC_STATUS_SIGNAL_DET))) {
9549                                 need_setup = 1;
9550                         }
9551                         if (need_setup) {
9552                                 if (!tp->serdes_counter) {
9553                                         tw32_f(MAC_MODE,
9554                                              (tp->mac_mode &
9555                                               ~MAC_MODE_PORT_MODE_MASK));
9556                                         udelay(40);
9557                                         tw32_f(MAC_MODE, tp->mac_mode);
9558                                         udelay(40);
9559                                 }
9560                                 tg3_setup_phy(tp, 0);
9561                         }
9562                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9563                            tg3_flag(tp, 5780_CLASS)) {
9564                         tg3_serdes_parallel_detect(tp);
9565                 }
9566
9567                 tp->timer_counter = tp->timer_multiplier;
9568         }
9569
9570         /* Heartbeat is only sent once every 2 seconds.
9571          *
9572          * The heartbeat is to tell the ASF firmware that the host
9573          * driver is still alive.  In the event that the OS crashes,
9574          * ASF needs to reset the hardware to free up the FIFO space
9575          * that may be filled with rx packets destined for the host.
9576          * If the FIFO is full, ASF will no longer function properly.
9577          *
9578          * Unintended resets have been reported on real time kernels
9579          * where the timer doesn't run on time.  Netpoll will also have
9580          * same problem.
9581          *
9582          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9583          * to check the ring condition when the heartbeat is expiring
9584          * before doing the reset.  This will prevent most unintended
9585          * resets.
9586          */
9587         if (!--tp->asf_counter) {
9588                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9589                         tg3_wait_for_event_ack(tp);
9590
9591                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9592                                       FWCMD_NICDRV_ALIVE3);
9593                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9594                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9595                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9596
9597                         tg3_generate_fw_event(tp);
9598                 }
9599                 tp->asf_counter = tp->asf_multiplier;
9600         }
9601
9602         spin_unlock(&tp->lock);
9603
9604 restart_timer:
9605         tp->timer.expires = jiffies + tp->timer_offset;
9606         add_timer(&tp->timer);
9607 }
9608
9609 static void __devinit tg3_timer_init(struct tg3 *tp)
9610 {
9611         if (tg3_flag(tp, TAGGED_STATUS) &&
9612             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9613             !tg3_flag(tp, 57765_CLASS))
9614                 tp->timer_offset = HZ;
9615         else
9616                 tp->timer_offset = HZ / 10;
9617
9618         BUG_ON(tp->timer_offset > HZ);
9619
9620         tp->timer_multiplier = (HZ / tp->timer_offset);
9621         tp->asf_multiplier = (HZ / tp->timer_offset) *
9622                              TG3_FW_UPDATE_FREQ_SEC;
9623
9624         init_timer(&tp->timer);
9625         tp->timer.data = (unsigned long) tp;
9626         tp->timer.function = tg3_timer;
9627 }
9628
9629 static void tg3_timer_start(struct tg3 *tp)
9630 {
9631         tp->asf_counter   = tp->asf_multiplier;
9632         tp->timer_counter = tp->timer_multiplier;
9633
9634         tp->timer.expires = jiffies + tp->timer_offset;
9635         add_timer(&tp->timer);
9636 }
9637
9638 static void tg3_timer_stop(struct tg3 *tp)
9639 {
9640         del_timer_sync(&tp->timer);
9641 }
9642
9643 /* Restart hardware after configuration changes, self-test, etc.
9644  * Invoked with tp->lock held.
9645  */
9646 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9647         __releases(tp->lock)
9648         __acquires(tp->lock)
9649 {
9650         int err;
9651
9652         err = tg3_init_hw(tp, reset_phy);
9653         if (err) {
9654                 netdev_err(tp->dev,
9655                            "Failed to re-initialize device, aborting\n");
9656                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9657                 tg3_full_unlock(tp);
9658                 tg3_timer_stop(tp);
9659                 tp->irq_sync = 0;
9660                 tg3_napi_enable(tp);
9661                 dev_close(tp->dev);
9662                 tg3_full_lock(tp, 0);
9663         }
9664         return err;
9665 }
9666
9667 static void tg3_reset_task(struct work_struct *work)
9668 {
9669         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9670         int err;
9671
9672         tg3_full_lock(tp, 0);
9673
9674         if (!netif_running(tp->dev)) {
9675                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9676                 tg3_full_unlock(tp);
9677                 return;
9678         }
9679
9680         tg3_full_unlock(tp);
9681
9682         tg3_phy_stop(tp);
9683
9684         tg3_netif_stop(tp);
9685
9686         tg3_full_lock(tp, 1);
9687
9688         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9689                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9690                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9691                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9692                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9693         }
9694
9695         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9696         err = tg3_init_hw(tp, 1);
9697         if (err)
9698                 goto out;
9699
9700         tg3_netif_start(tp);
9701
9702 out:
9703         tg3_full_unlock(tp);
9704
9705         if (!err)
9706                 tg3_phy_start(tp);
9707
9708         tg3_flag_clear(tp, RESET_TASK_PENDING);
9709 }
9710
9711 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9712 {
9713         irq_handler_t fn;
9714         unsigned long flags;
9715         char *name;
9716         struct tg3_napi *tnapi = &tp->napi[irq_num];
9717
9718         if (tp->irq_cnt == 1)
9719                 name = tp->dev->name;
9720         else {
9721                 name = &tnapi->irq_lbl[0];
9722                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9723                 name[IFNAMSIZ-1] = 0;
9724         }
9725
9726         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9727                 fn = tg3_msi;
9728                 if (tg3_flag(tp, 1SHOT_MSI))
9729                         fn = tg3_msi_1shot;
9730                 flags = 0;
9731         } else {
9732                 fn = tg3_interrupt;
9733                 if (tg3_flag(tp, TAGGED_STATUS))
9734                         fn = tg3_interrupt_tagged;
9735                 flags = IRQF_SHARED;
9736         }
9737
9738         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9739 }
9740
9741 static int tg3_test_interrupt(struct tg3 *tp)
9742 {
9743         struct tg3_napi *tnapi = &tp->napi[0];
9744         struct net_device *dev = tp->dev;
9745         int err, i, intr_ok = 0;
9746         u32 val;
9747
9748         if (!netif_running(dev))
9749                 return -ENODEV;
9750
9751         tg3_disable_ints(tp);
9752
9753         free_irq(tnapi->irq_vec, tnapi);
9754
9755         /*
9756          * Turn off MSI one shot mode.  Otherwise this test has no
9757          * observable way to know whether the interrupt was delivered.
9758          */
9759         if (tg3_flag(tp, 57765_PLUS)) {
9760                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9761                 tw32(MSGINT_MODE, val);
9762         }
9763
9764         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9765                           IRQF_SHARED, dev->name, tnapi);
9766         if (err)
9767                 return err;
9768
9769         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9770         tg3_enable_ints(tp);
9771
9772         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9773                tnapi->coal_now);
9774
9775         for (i = 0; i < 5; i++) {
9776                 u32 int_mbox, misc_host_ctrl;
9777
9778                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9779                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9780
9781                 if ((int_mbox != 0) ||
9782                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9783                         intr_ok = 1;
9784                         break;
9785                 }
9786
9787                 if (tg3_flag(tp, 57765_PLUS) &&
9788                     tnapi->hw_status->status_tag != tnapi->last_tag)
9789                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9790
9791                 msleep(10);
9792         }
9793
9794         tg3_disable_ints(tp);
9795
9796         free_irq(tnapi->irq_vec, tnapi);
9797
9798         err = tg3_request_irq(tp, 0);
9799
9800         if (err)
9801                 return err;
9802
9803         if (intr_ok) {
9804                 /* Reenable MSI one shot mode. */
9805                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9806                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9807                         tw32(MSGINT_MODE, val);
9808                 }
9809                 return 0;
9810         }
9811
9812         return -EIO;
9813 }
9814
9815 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9816  * successfully restored
9817  */
9818 static int tg3_test_msi(struct tg3 *tp)
9819 {
9820         int err;
9821         u16 pci_cmd;
9822
9823         if (!tg3_flag(tp, USING_MSI))
9824                 return 0;
9825
9826         /* Turn off SERR reporting in case MSI terminates with Master
9827          * Abort.
9828          */
9829         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9830         pci_write_config_word(tp->pdev, PCI_COMMAND,
9831                               pci_cmd & ~PCI_COMMAND_SERR);
9832
9833         err = tg3_test_interrupt(tp);
9834
9835         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9836
9837         if (!err)
9838                 return 0;
9839
9840         /* other failures */
9841         if (err != -EIO)
9842                 return err;
9843
9844         /* MSI test failed, go back to INTx mode */
9845         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9846                     "to INTx mode. Please report this failure to the PCI "
9847                     "maintainer and include system chipset information\n");
9848
9849         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9850
9851         pci_disable_msi(tp->pdev);
9852
9853         tg3_flag_clear(tp, USING_MSI);
9854         tp->napi[0].irq_vec = tp->pdev->irq;
9855
9856         err = tg3_request_irq(tp, 0);
9857         if (err)
9858                 return err;
9859
9860         /* Need to reset the chip because the MSI cycle may have terminated
9861          * with Master Abort.
9862          */
9863         tg3_full_lock(tp, 1);
9864
9865         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9866         err = tg3_init_hw(tp, 1);
9867
9868         tg3_full_unlock(tp);
9869
9870         if (err)
9871                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9872
9873         return err;
9874 }
9875
9876 static int tg3_request_firmware(struct tg3 *tp)
9877 {
9878         const __be32 *fw_data;
9879
9880         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9881                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9882                            tp->fw_needed);
9883                 return -ENOENT;
9884         }
9885
9886         fw_data = (void *)tp->fw->data;
9887
9888         /* Firmware blob starts with version numbers, followed by
9889          * start address and _full_ length including BSS sections
9890          * (which must be longer than the actual data, of course
9891          */
9892
9893         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9894         if (tp->fw_len < (tp->fw->size - 12)) {
9895                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9896                            tp->fw_len, tp->fw_needed);
9897                 release_firmware(tp->fw);
9898                 tp->fw = NULL;
9899                 return -EINVAL;
9900         }
9901
9902         /* We no longer need firmware; we have it. */
9903         tp->fw_needed = NULL;
9904         return 0;
9905 }
9906
9907 static bool tg3_enable_msix(struct tg3 *tp)
9908 {
9909         int i, rc;
9910         struct msix_entry msix_ent[tp->irq_max];
9911
9912         tp->irq_cnt = num_online_cpus();
9913         if (tp->irq_cnt > 1) {
9914                 /* We want as many rx rings enabled as there are cpus.
9915                  * In multiqueue MSI-X mode, the first MSI-X vector
9916                  * only deals with link interrupts, etc, so we add
9917                  * one to the number of vectors we are requesting.
9918                  */
9919                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9920         }
9921
9922         for (i = 0; i < tp->irq_max; i++) {
9923                 msix_ent[i].entry  = i;
9924                 msix_ent[i].vector = 0;
9925         }
9926
9927         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9928         if (rc < 0) {
9929                 return false;
9930         } else if (rc != 0) {
9931                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9932                         return false;
9933                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9934                               tp->irq_cnt, rc);
9935                 tp->irq_cnt = rc;
9936         }
9937
9938         for (i = 0; i < tp->irq_max; i++)
9939                 tp->napi[i].irq_vec = msix_ent[i].vector;
9940
9941         netif_set_real_num_tx_queues(tp->dev, 1);
9942         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9943         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9944                 pci_disable_msix(tp->pdev);
9945                 return false;
9946         }
9947
9948         if (tp->irq_cnt > 1) {
9949                 tg3_flag_set(tp, ENABLE_RSS);
9950
9951                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9952                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9953                         tg3_flag_set(tp, ENABLE_TSS);
9954                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9955                 }
9956         }
9957
9958         return true;
9959 }
9960
9961 static void tg3_ints_init(struct tg3 *tp)
9962 {
9963         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9964             !tg3_flag(tp, TAGGED_STATUS)) {
9965                 /* All MSI supporting chips should support tagged
9966                  * status.  Assert that this is the case.
9967                  */
9968                 netdev_warn(tp->dev,
9969                             "MSI without TAGGED_STATUS? Not using MSI\n");
9970                 goto defcfg;
9971         }
9972
9973         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9974                 tg3_flag_set(tp, USING_MSIX);
9975         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9976                 tg3_flag_set(tp, USING_MSI);
9977
9978         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9979                 u32 msi_mode = tr32(MSGINT_MODE);
9980                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9981                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9982                 if (!tg3_flag(tp, 1SHOT_MSI))
9983                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9984                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9985         }
9986 defcfg:
9987         if (!tg3_flag(tp, USING_MSIX)) {
9988                 tp->irq_cnt = 1;
9989                 tp->napi[0].irq_vec = tp->pdev->irq;
9990                 netif_set_real_num_tx_queues(tp->dev, 1);
9991                 netif_set_real_num_rx_queues(tp->dev, 1);
9992         }
9993 }
9994
9995 static void tg3_ints_fini(struct tg3 *tp)
9996 {
9997         if (tg3_flag(tp, USING_MSIX))
9998                 pci_disable_msix(tp->pdev);
9999         else if (tg3_flag(tp, USING_MSI))
10000                 pci_disable_msi(tp->pdev);
10001         tg3_flag_clear(tp, USING_MSI);
10002         tg3_flag_clear(tp, USING_MSIX);
10003         tg3_flag_clear(tp, ENABLE_RSS);
10004         tg3_flag_clear(tp, ENABLE_TSS);
10005 }
10006
10007 static int tg3_open(struct net_device *dev)
10008 {
10009         struct tg3 *tp = netdev_priv(dev);
10010         int i, err;
10011
10012         if (tp->fw_needed) {
10013                 err = tg3_request_firmware(tp);
10014                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10015                         if (err)
10016                                 return err;
10017                 } else if (err) {
10018                         netdev_warn(tp->dev, "TSO capability disabled\n");
10019                         tg3_flag_clear(tp, TSO_CAPABLE);
10020                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10021                         netdev_notice(tp->dev, "TSO capability restored\n");
10022                         tg3_flag_set(tp, TSO_CAPABLE);
10023                 }
10024         }
10025
10026         netif_carrier_off(tp->dev);
10027
10028         err = tg3_power_up(tp);
10029         if (err)
10030                 return err;
10031
10032         tg3_full_lock(tp, 0);
10033
10034         tg3_disable_ints(tp);
10035         tg3_flag_clear(tp, INIT_COMPLETE);
10036
10037         tg3_full_unlock(tp);
10038
10039         /*
10040          * Setup interrupts first so we know how
10041          * many NAPI resources to allocate
10042          */
10043         tg3_ints_init(tp);
10044
10045         tg3_rss_check_indir_tbl(tp);
10046
10047         /* The placement of this call is tied
10048          * to the setup and use of Host TX descriptors.
10049          */
10050         err = tg3_alloc_consistent(tp);
10051         if (err)
10052                 goto err_out1;
10053
10054         tg3_napi_init(tp);
10055
10056         tg3_napi_enable(tp);
10057
10058         for (i = 0; i < tp->irq_cnt; i++) {
10059                 struct tg3_napi *tnapi = &tp->napi[i];
10060                 err = tg3_request_irq(tp, i);
10061                 if (err) {
10062                         for (i--; i >= 0; i--) {
10063                                 tnapi = &tp->napi[i];
10064                                 free_irq(tnapi->irq_vec, tnapi);
10065                         }
10066                         goto err_out2;
10067                 }
10068         }
10069
10070         tg3_full_lock(tp, 0);
10071
10072         err = tg3_init_hw(tp, 1);
10073         if (err) {
10074                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10075                 tg3_free_rings(tp);
10076         }
10077
10078         tg3_full_unlock(tp);
10079
10080         if (err)
10081                 goto err_out3;
10082
10083         if (tg3_flag(tp, USING_MSI)) {
10084                 err = tg3_test_msi(tp);
10085
10086                 if (err) {
10087                         tg3_full_lock(tp, 0);
10088                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10089                         tg3_free_rings(tp);
10090                         tg3_full_unlock(tp);
10091
10092                         goto err_out2;
10093                 }
10094
10095                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10096                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10097
10098                         tw32(PCIE_TRANSACTION_CFG,
10099                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10100                 }
10101         }
10102
10103         tg3_phy_start(tp);
10104
10105         tg3_full_lock(tp, 0);
10106
10107         tg3_timer_start(tp);
10108         tg3_flag_set(tp, INIT_COMPLETE);
10109         tg3_enable_ints(tp);
10110
10111         tg3_full_unlock(tp);
10112
10113         netif_tx_start_all_queues(dev);
10114
10115         /*
10116          * Reset loopback feature if it was turned on while the device was down
10117          * make sure that it's installed properly now.
10118          */
10119         if (dev->features & NETIF_F_LOOPBACK)
10120                 tg3_set_loopback(dev, dev->features);
10121
10122         return 0;
10123
10124 err_out3:
10125         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10126                 struct tg3_napi *tnapi = &tp->napi[i];
10127                 free_irq(tnapi->irq_vec, tnapi);
10128         }
10129
10130 err_out2:
10131         tg3_napi_disable(tp);
10132         tg3_napi_fini(tp);
10133         tg3_free_consistent(tp);
10134
10135 err_out1:
10136         tg3_ints_fini(tp);
10137         tg3_frob_aux_power(tp, false);
10138         pci_set_power_state(tp->pdev, PCI_D3hot);
10139         return err;
10140 }
10141
10142 static int tg3_close(struct net_device *dev)
10143 {
10144         int i;
10145         struct tg3 *tp = netdev_priv(dev);
10146
10147         tg3_napi_disable(tp);
10148         tg3_reset_task_cancel(tp);
10149
10150         netif_tx_stop_all_queues(dev);
10151
10152         tg3_timer_stop(tp);
10153
10154         tg3_phy_stop(tp);
10155
10156         tg3_full_lock(tp, 1);
10157
10158         tg3_disable_ints(tp);
10159
10160         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10161         tg3_free_rings(tp);
10162         tg3_flag_clear(tp, INIT_COMPLETE);
10163
10164         tg3_full_unlock(tp);
10165
10166         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10167                 struct tg3_napi *tnapi = &tp->napi[i];
10168                 free_irq(tnapi->irq_vec, tnapi);
10169         }
10170
10171         tg3_ints_fini(tp);
10172
10173         /* Clear stats across close / open calls */
10174         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10175         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10176
10177         tg3_napi_fini(tp);
10178
10179         tg3_free_consistent(tp);
10180
10181         tg3_power_down(tp);
10182
10183         netif_carrier_off(tp->dev);
10184
10185         return 0;
10186 }
10187
10188 static inline u64 get_stat64(tg3_stat64_t *val)
10189 {
10190        return ((u64)val->high << 32) | ((u64)val->low);
10191 }
10192
10193 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10194 {
10195         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10196
10197         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10198             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10199              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10200                 u32 val;
10201
10202                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10203                         tg3_writephy(tp, MII_TG3_TEST1,
10204                                      val | MII_TG3_TEST1_CRC_EN);
10205                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10206                 } else
10207                         val = 0;
10208
10209                 tp->phy_crc_errors += val;
10210
10211                 return tp->phy_crc_errors;
10212         }
10213
10214         return get_stat64(&hw_stats->rx_fcs_errors);
10215 }
10216
10217 #define ESTAT_ADD(member) \
10218         estats->member =        old_estats->member + \
10219                                 get_stat64(&hw_stats->member)
10220
10221 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10222 {
10223         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10224         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10225
10226         ESTAT_ADD(rx_octets);
10227         ESTAT_ADD(rx_fragments);
10228         ESTAT_ADD(rx_ucast_packets);
10229         ESTAT_ADD(rx_mcast_packets);
10230         ESTAT_ADD(rx_bcast_packets);
10231         ESTAT_ADD(rx_fcs_errors);
10232         ESTAT_ADD(rx_align_errors);
10233         ESTAT_ADD(rx_xon_pause_rcvd);
10234         ESTAT_ADD(rx_xoff_pause_rcvd);
10235         ESTAT_ADD(rx_mac_ctrl_rcvd);
10236         ESTAT_ADD(rx_xoff_entered);
10237         ESTAT_ADD(rx_frame_too_long_errors);
10238         ESTAT_ADD(rx_jabbers);
10239         ESTAT_ADD(rx_undersize_packets);
10240         ESTAT_ADD(rx_in_length_errors);
10241         ESTAT_ADD(rx_out_length_errors);
10242         ESTAT_ADD(rx_64_or_less_octet_packets);
10243         ESTAT_ADD(rx_65_to_127_octet_packets);
10244         ESTAT_ADD(rx_128_to_255_octet_packets);
10245         ESTAT_ADD(rx_256_to_511_octet_packets);
10246         ESTAT_ADD(rx_512_to_1023_octet_packets);
10247         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10248         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10249         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10250         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10251         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10252
10253         ESTAT_ADD(tx_octets);
10254         ESTAT_ADD(tx_collisions);
10255         ESTAT_ADD(tx_xon_sent);
10256         ESTAT_ADD(tx_xoff_sent);
10257         ESTAT_ADD(tx_flow_control);
10258         ESTAT_ADD(tx_mac_errors);
10259         ESTAT_ADD(tx_single_collisions);
10260         ESTAT_ADD(tx_mult_collisions);
10261         ESTAT_ADD(tx_deferred);
10262         ESTAT_ADD(tx_excessive_collisions);
10263         ESTAT_ADD(tx_late_collisions);
10264         ESTAT_ADD(tx_collide_2times);
10265         ESTAT_ADD(tx_collide_3times);
10266         ESTAT_ADD(tx_collide_4times);
10267         ESTAT_ADD(tx_collide_5times);
10268         ESTAT_ADD(tx_collide_6times);
10269         ESTAT_ADD(tx_collide_7times);
10270         ESTAT_ADD(tx_collide_8times);
10271         ESTAT_ADD(tx_collide_9times);
10272         ESTAT_ADD(tx_collide_10times);
10273         ESTAT_ADD(tx_collide_11times);
10274         ESTAT_ADD(tx_collide_12times);
10275         ESTAT_ADD(tx_collide_13times);
10276         ESTAT_ADD(tx_collide_14times);
10277         ESTAT_ADD(tx_collide_15times);
10278         ESTAT_ADD(tx_ucast_packets);
10279         ESTAT_ADD(tx_mcast_packets);
10280         ESTAT_ADD(tx_bcast_packets);
10281         ESTAT_ADD(tx_carrier_sense_errors);
10282         ESTAT_ADD(tx_discards);
10283         ESTAT_ADD(tx_errors);
10284
10285         ESTAT_ADD(dma_writeq_full);
10286         ESTAT_ADD(dma_write_prioq_full);
10287         ESTAT_ADD(rxbds_empty);
10288         ESTAT_ADD(rx_discards);
10289         ESTAT_ADD(rx_errors);
10290         ESTAT_ADD(rx_threshold_hit);
10291
10292         ESTAT_ADD(dma_readq_full);
10293         ESTAT_ADD(dma_read_prioq_full);
10294         ESTAT_ADD(tx_comp_queue_full);
10295
10296         ESTAT_ADD(ring_set_send_prod_index);
10297         ESTAT_ADD(ring_status_update);
10298         ESTAT_ADD(nic_irqs);
10299         ESTAT_ADD(nic_avoided_irqs);
10300         ESTAT_ADD(nic_tx_threshold_hit);
10301
10302         ESTAT_ADD(mbuf_lwm_thresh_hit);
10303 }
10304
10305 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10306 {
10307         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10308         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10309
10310         stats->rx_packets = old_stats->rx_packets +
10311                 get_stat64(&hw_stats->rx_ucast_packets) +
10312                 get_stat64(&hw_stats->rx_mcast_packets) +
10313                 get_stat64(&hw_stats->rx_bcast_packets);
10314
10315         stats->tx_packets = old_stats->tx_packets +
10316                 get_stat64(&hw_stats->tx_ucast_packets) +
10317                 get_stat64(&hw_stats->tx_mcast_packets) +
10318                 get_stat64(&hw_stats->tx_bcast_packets);
10319
10320         stats->rx_bytes = old_stats->rx_bytes +
10321                 get_stat64(&hw_stats->rx_octets);
10322         stats->tx_bytes = old_stats->tx_bytes +
10323                 get_stat64(&hw_stats->tx_octets);
10324
10325         stats->rx_errors = old_stats->rx_errors +
10326                 get_stat64(&hw_stats->rx_errors);
10327         stats->tx_errors = old_stats->tx_errors +
10328                 get_stat64(&hw_stats->tx_errors) +
10329                 get_stat64(&hw_stats->tx_mac_errors) +
10330                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10331                 get_stat64(&hw_stats->tx_discards);
10332
10333         stats->multicast = old_stats->multicast +
10334                 get_stat64(&hw_stats->rx_mcast_packets);
10335         stats->collisions = old_stats->collisions +
10336                 get_stat64(&hw_stats->tx_collisions);
10337
10338         stats->rx_length_errors = old_stats->rx_length_errors +
10339                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10340                 get_stat64(&hw_stats->rx_undersize_packets);
10341
10342         stats->rx_over_errors = old_stats->rx_over_errors +
10343                 get_stat64(&hw_stats->rxbds_empty);
10344         stats->rx_frame_errors = old_stats->rx_frame_errors +
10345                 get_stat64(&hw_stats->rx_align_errors);
10346         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10347                 get_stat64(&hw_stats->tx_discards);
10348         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10349                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10350
10351         stats->rx_crc_errors = old_stats->rx_crc_errors +
10352                 tg3_calc_crc_errors(tp);
10353
10354         stats->rx_missed_errors = old_stats->rx_missed_errors +
10355                 get_stat64(&hw_stats->rx_discards);
10356
10357         stats->rx_dropped = tp->rx_dropped;
10358         stats->tx_dropped = tp->tx_dropped;
10359 }
10360
10361 static int tg3_get_regs_len(struct net_device *dev)
10362 {
10363         return TG3_REG_BLK_SIZE;
10364 }
10365
10366 static void tg3_get_regs(struct net_device *dev,
10367                 struct ethtool_regs *regs, void *_p)
10368 {
10369         struct tg3 *tp = netdev_priv(dev);
10370
10371         regs->version = 0;
10372
10373         memset(_p, 0, TG3_REG_BLK_SIZE);
10374
10375         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10376                 return;
10377
10378         tg3_full_lock(tp, 0);
10379
10380         tg3_dump_legacy_regs(tp, (u32 *)_p);
10381
10382         tg3_full_unlock(tp);
10383 }
10384
10385 static int tg3_get_eeprom_len(struct net_device *dev)
10386 {
10387         struct tg3 *tp = netdev_priv(dev);
10388
10389         return tp->nvram_size;
10390 }
10391
10392 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10393 {
10394         struct tg3 *tp = netdev_priv(dev);
10395         int ret;
10396         u8  *pd;
10397         u32 i, offset, len, b_offset, b_count;
10398         __be32 val;
10399
10400         if (tg3_flag(tp, NO_NVRAM))
10401                 return -EINVAL;
10402
10403         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10404                 return -EAGAIN;
10405
10406         offset = eeprom->offset;
10407         len = eeprom->len;
10408         eeprom->len = 0;
10409
10410         eeprom->magic = TG3_EEPROM_MAGIC;
10411
10412         if (offset & 3) {
10413                 /* adjustments to start on required 4 byte boundary */
10414                 b_offset = offset & 3;
10415                 b_count = 4 - b_offset;
10416                 if (b_count > len) {
10417                         /* i.e. offset=1 len=2 */
10418                         b_count = len;
10419                 }
10420                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10421                 if (ret)
10422                         return ret;
10423                 memcpy(data, ((char *)&val) + b_offset, b_count);
10424                 len -= b_count;
10425                 offset += b_count;
10426                 eeprom->len += b_count;
10427         }
10428
10429         /* read bytes up to the last 4 byte boundary */
10430         pd = &data[eeprom->len];
10431         for (i = 0; i < (len - (len & 3)); i += 4) {
10432                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10433                 if (ret) {
10434                         eeprom->len += i;
10435                         return ret;
10436                 }
10437                 memcpy(pd + i, &val, 4);
10438         }
10439         eeprom->len += i;
10440
10441         if (len & 3) {
10442                 /* read last bytes not ending on 4 byte boundary */
10443                 pd = &data[eeprom->len];
10444                 b_count = len & 3;
10445                 b_offset = offset + len - b_count;
10446                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10447                 if (ret)
10448                         return ret;
10449                 memcpy(pd, &val, b_count);
10450                 eeprom->len += b_count;
10451         }
10452         return 0;
10453 }
10454
10455 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10456 {
10457         struct tg3 *tp = netdev_priv(dev);
10458         int ret;
10459         u32 offset, len, b_offset, odd_len;
10460         u8 *buf;
10461         __be32 start, end;
10462
10463         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10464                 return -EAGAIN;
10465
10466         if (tg3_flag(tp, NO_NVRAM) ||
10467             eeprom->magic != TG3_EEPROM_MAGIC)
10468                 return -EINVAL;
10469
10470         offset = eeprom->offset;
10471         len = eeprom->len;
10472
10473         if ((b_offset = (offset & 3))) {
10474                 /* adjustments to start on required 4 byte boundary */
10475                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10476                 if (ret)
10477                         return ret;
10478                 len += b_offset;
10479                 offset &= ~3;
10480                 if (len < 4)
10481                         len = 4;
10482         }
10483
10484         odd_len = 0;
10485         if (len & 3) {
10486                 /* adjustments to end on required 4 byte boundary */
10487                 odd_len = 1;
10488                 len = (len + 3) & ~3;
10489                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10490                 if (ret)
10491                         return ret;
10492         }
10493
10494         buf = data;
10495         if (b_offset || odd_len) {
10496                 buf = kmalloc(len, GFP_KERNEL);
10497                 if (!buf)
10498                         return -ENOMEM;
10499                 if (b_offset)
10500                         memcpy(buf, &start, 4);
10501                 if (odd_len)
10502                         memcpy(buf+len-4, &end, 4);
10503                 memcpy(buf + b_offset, data, eeprom->len);
10504         }
10505
10506         ret = tg3_nvram_write_block(tp, offset, len, buf);
10507
10508         if (buf != data)
10509                 kfree(buf);
10510
10511         return ret;
10512 }
10513
10514 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10515 {
10516         struct tg3 *tp = netdev_priv(dev);
10517
10518         if (tg3_flag(tp, USE_PHYLIB)) {
10519                 struct phy_device *phydev;
10520                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10521                         return -EAGAIN;
10522                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10523                 return phy_ethtool_gset(phydev, cmd);
10524         }
10525
10526         cmd->supported = (SUPPORTED_Autoneg);
10527
10528         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10529                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10530                                    SUPPORTED_1000baseT_Full);
10531
10532         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10533                 cmd->supported |= (SUPPORTED_100baseT_Half |
10534                                   SUPPORTED_100baseT_Full |
10535                                   SUPPORTED_10baseT_Half |
10536                                   SUPPORTED_10baseT_Full |
10537                                   SUPPORTED_TP);
10538                 cmd->port = PORT_TP;
10539         } else {
10540                 cmd->supported |= SUPPORTED_FIBRE;
10541                 cmd->port = PORT_FIBRE;
10542         }
10543
10544         cmd->advertising = tp->link_config.advertising;
10545         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10546                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10547                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10548                                 cmd->advertising |= ADVERTISED_Pause;
10549                         } else {
10550                                 cmd->advertising |= ADVERTISED_Pause |
10551                                                     ADVERTISED_Asym_Pause;
10552                         }
10553                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10554                         cmd->advertising |= ADVERTISED_Asym_Pause;
10555                 }
10556         }
10557         if (netif_running(dev) && netif_carrier_ok(dev)) {
10558                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10559                 cmd->duplex = tp->link_config.active_duplex;
10560                 cmd->lp_advertising = tp->link_config.rmt_adv;
10561                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10562                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10563                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10564                         else
10565                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10566                 }
10567         } else {
10568                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10569                 cmd->duplex = DUPLEX_UNKNOWN;
10570                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10571         }
10572         cmd->phy_address = tp->phy_addr;
10573         cmd->transceiver = XCVR_INTERNAL;
10574         cmd->autoneg = tp->link_config.autoneg;
10575         cmd->maxtxpkt = 0;
10576         cmd->maxrxpkt = 0;
10577         return 0;
10578 }
10579
10580 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10581 {
10582         struct tg3 *tp = netdev_priv(dev);
10583         u32 speed = ethtool_cmd_speed(cmd);
10584
10585         if (tg3_flag(tp, USE_PHYLIB)) {
10586                 struct phy_device *phydev;
10587                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10588                         return -EAGAIN;
10589                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10590                 return phy_ethtool_sset(phydev, cmd);
10591         }
10592
10593         if (cmd->autoneg != AUTONEG_ENABLE &&
10594             cmd->autoneg != AUTONEG_DISABLE)
10595                 return -EINVAL;
10596
10597         if (cmd->autoneg == AUTONEG_DISABLE &&
10598             cmd->duplex != DUPLEX_FULL &&
10599             cmd->duplex != DUPLEX_HALF)
10600                 return -EINVAL;
10601
10602         if (cmd->autoneg == AUTONEG_ENABLE) {
10603                 u32 mask = ADVERTISED_Autoneg |
10604                            ADVERTISED_Pause |
10605                            ADVERTISED_Asym_Pause;
10606
10607                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10608                         mask |= ADVERTISED_1000baseT_Half |
10609                                 ADVERTISED_1000baseT_Full;
10610
10611                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10612                         mask |= ADVERTISED_100baseT_Half |
10613                                 ADVERTISED_100baseT_Full |
10614                                 ADVERTISED_10baseT_Half |
10615                                 ADVERTISED_10baseT_Full |
10616                                 ADVERTISED_TP;
10617                 else
10618                         mask |= ADVERTISED_FIBRE;
10619
10620                 if (cmd->advertising & ~mask)
10621                         return -EINVAL;
10622
10623                 mask &= (ADVERTISED_1000baseT_Half |
10624                          ADVERTISED_1000baseT_Full |
10625                          ADVERTISED_100baseT_Half |
10626                          ADVERTISED_100baseT_Full |
10627                          ADVERTISED_10baseT_Half |
10628                          ADVERTISED_10baseT_Full);
10629
10630                 cmd->advertising &= mask;
10631         } else {
10632                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10633                         if (speed != SPEED_1000)
10634                                 return -EINVAL;
10635
10636                         if (cmd->duplex != DUPLEX_FULL)
10637                                 return -EINVAL;
10638                 } else {
10639                         if (speed != SPEED_100 &&
10640                             speed != SPEED_10)
10641                                 return -EINVAL;
10642                 }
10643         }
10644
10645         tg3_full_lock(tp, 0);
10646
10647         tp->link_config.autoneg = cmd->autoneg;
10648         if (cmd->autoneg == AUTONEG_ENABLE) {
10649                 tp->link_config.advertising = (cmd->advertising |
10650                                               ADVERTISED_Autoneg);
10651                 tp->link_config.speed = SPEED_UNKNOWN;
10652                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10653         } else {
10654                 tp->link_config.advertising = 0;
10655                 tp->link_config.speed = speed;
10656                 tp->link_config.duplex = cmd->duplex;
10657         }
10658
10659         if (netif_running(dev))
10660                 tg3_setup_phy(tp, 1);
10661
10662         tg3_full_unlock(tp);
10663
10664         return 0;
10665 }
10666
10667 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10668 {
10669         struct tg3 *tp = netdev_priv(dev);
10670
10671         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10672         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10673         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10674         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10675 }
10676
10677 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10678 {
10679         struct tg3 *tp = netdev_priv(dev);
10680
10681         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10682                 wol->supported = WAKE_MAGIC;
10683         else
10684                 wol->supported = 0;
10685         wol->wolopts = 0;
10686         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10687                 wol->wolopts = WAKE_MAGIC;
10688         memset(&wol->sopass, 0, sizeof(wol->sopass));
10689 }
10690
10691 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10692 {
10693         struct tg3 *tp = netdev_priv(dev);
10694         struct device *dp = &tp->pdev->dev;
10695
10696         if (wol->wolopts & ~WAKE_MAGIC)
10697                 return -EINVAL;
10698         if ((wol->wolopts & WAKE_MAGIC) &&
10699             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10700                 return -EINVAL;
10701
10702         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10703
10704         spin_lock_bh(&tp->lock);
10705         if (device_may_wakeup(dp))
10706                 tg3_flag_set(tp, WOL_ENABLE);
10707         else
10708                 tg3_flag_clear(tp, WOL_ENABLE);
10709         spin_unlock_bh(&tp->lock);
10710
10711         return 0;
10712 }
10713
10714 static u32 tg3_get_msglevel(struct net_device *dev)
10715 {
10716         struct tg3 *tp = netdev_priv(dev);
10717         return tp->msg_enable;
10718 }
10719
10720 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10721 {
10722         struct tg3 *tp = netdev_priv(dev);
10723         tp->msg_enable = value;
10724 }
10725
10726 static int tg3_nway_reset(struct net_device *dev)
10727 {
10728         struct tg3 *tp = netdev_priv(dev);
10729         int r;
10730
10731         if (!netif_running(dev))
10732                 return -EAGAIN;
10733
10734         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10735                 return -EINVAL;
10736
10737         if (tg3_flag(tp, USE_PHYLIB)) {
10738                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10739                         return -EAGAIN;
10740                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10741         } else {
10742                 u32 bmcr;
10743
10744                 spin_lock_bh(&tp->lock);
10745                 r = -EINVAL;
10746                 tg3_readphy(tp, MII_BMCR, &bmcr);
10747                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10748                     ((bmcr & BMCR_ANENABLE) ||
10749                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10750                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10751                                                    BMCR_ANENABLE);
10752                         r = 0;
10753                 }
10754                 spin_unlock_bh(&tp->lock);
10755         }
10756
10757         return r;
10758 }
10759
10760 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10761 {
10762         struct tg3 *tp = netdev_priv(dev);
10763
10764         ering->rx_max_pending = tp->rx_std_ring_mask;
10765         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10766                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10767         else
10768                 ering->rx_jumbo_max_pending = 0;
10769
10770         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10771
10772         ering->rx_pending = tp->rx_pending;
10773         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10774                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10775         else
10776                 ering->rx_jumbo_pending = 0;
10777
10778         ering->tx_pending = tp->napi[0].tx_pending;
10779 }
10780
10781 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10782 {
10783         struct tg3 *tp = netdev_priv(dev);
10784         int i, irq_sync = 0, err = 0;
10785
10786         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10787             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10788             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10789             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10790             (tg3_flag(tp, TSO_BUG) &&
10791              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10792                 return -EINVAL;
10793
10794         if (netif_running(dev)) {
10795                 tg3_phy_stop(tp);
10796                 tg3_netif_stop(tp);
10797                 irq_sync = 1;
10798         }
10799
10800         tg3_full_lock(tp, irq_sync);
10801
10802         tp->rx_pending = ering->rx_pending;
10803
10804         if (tg3_flag(tp, MAX_RXPEND_64) &&
10805             tp->rx_pending > 63)
10806                 tp->rx_pending = 63;
10807         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10808
10809         for (i = 0; i < tp->irq_max; i++)
10810                 tp->napi[i].tx_pending = ering->tx_pending;
10811
10812         if (netif_running(dev)) {
10813                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10814                 err = tg3_restart_hw(tp, 1);
10815                 if (!err)
10816                         tg3_netif_start(tp);
10817         }
10818
10819         tg3_full_unlock(tp);
10820
10821         if (irq_sync && !err)
10822                 tg3_phy_start(tp);
10823
10824         return err;
10825 }
10826
10827 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10828 {
10829         struct tg3 *tp = netdev_priv(dev);
10830
10831         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10832
10833         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10834                 epause->rx_pause = 1;
10835         else
10836                 epause->rx_pause = 0;
10837
10838         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10839                 epause->tx_pause = 1;
10840         else
10841                 epause->tx_pause = 0;
10842 }
10843
10844 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10845 {
10846         struct tg3 *tp = netdev_priv(dev);
10847         int err = 0;
10848
10849         if (tg3_flag(tp, USE_PHYLIB)) {
10850                 u32 newadv;
10851                 struct phy_device *phydev;
10852
10853                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10854
10855                 if (!(phydev->supported & SUPPORTED_Pause) ||
10856                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10857                      (epause->rx_pause != epause->tx_pause)))
10858                         return -EINVAL;
10859
10860                 tp->link_config.flowctrl = 0;
10861                 if (epause->rx_pause) {
10862                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10863
10864                         if (epause->tx_pause) {
10865                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10866                                 newadv = ADVERTISED_Pause;
10867                         } else
10868                                 newadv = ADVERTISED_Pause |
10869                                          ADVERTISED_Asym_Pause;
10870                 } else if (epause->tx_pause) {
10871                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10872                         newadv = ADVERTISED_Asym_Pause;
10873                 } else
10874                         newadv = 0;
10875
10876                 if (epause->autoneg)
10877                         tg3_flag_set(tp, PAUSE_AUTONEG);
10878                 else
10879                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10880
10881                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10882                         u32 oldadv = phydev->advertising &
10883                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10884                         if (oldadv != newadv) {
10885                                 phydev->advertising &=
10886                                         ~(ADVERTISED_Pause |
10887                                           ADVERTISED_Asym_Pause);
10888                                 phydev->advertising |= newadv;
10889                                 if (phydev->autoneg) {
10890                                         /*
10891                                          * Always renegotiate the link to
10892                                          * inform our link partner of our
10893                                          * flow control settings, even if the
10894                                          * flow control is forced.  Let
10895                                          * tg3_adjust_link() do the final
10896                                          * flow control setup.
10897                                          */
10898                                         return phy_start_aneg(phydev);
10899                                 }
10900                         }
10901
10902                         if (!epause->autoneg)
10903                                 tg3_setup_flow_control(tp, 0, 0);
10904                 } else {
10905                         tp->link_config.advertising &=
10906                                         ~(ADVERTISED_Pause |
10907                                           ADVERTISED_Asym_Pause);
10908                         tp->link_config.advertising |= newadv;
10909                 }
10910         } else {
10911                 int irq_sync = 0;
10912
10913                 if (netif_running(dev)) {
10914                         tg3_netif_stop(tp);
10915                         irq_sync = 1;
10916                 }
10917
10918                 tg3_full_lock(tp, irq_sync);
10919
10920                 if (epause->autoneg)
10921                         tg3_flag_set(tp, PAUSE_AUTONEG);
10922                 else
10923                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10924                 if (epause->rx_pause)
10925                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10926                 else
10927                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10928                 if (epause->tx_pause)
10929                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10930                 else
10931                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10932
10933                 if (netif_running(dev)) {
10934                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10935                         err = tg3_restart_hw(tp, 1);
10936                         if (!err)
10937                                 tg3_netif_start(tp);
10938                 }
10939
10940                 tg3_full_unlock(tp);
10941         }
10942
10943         return err;
10944 }
10945
10946 static int tg3_get_sset_count(struct net_device *dev, int sset)
10947 {
10948         switch (sset) {
10949         case ETH_SS_TEST:
10950                 return TG3_NUM_TEST;
10951         case ETH_SS_STATS:
10952                 return TG3_NUM_STATS;
10953         default:
10954                 return -EOPNOTSUPP;
10955         }
10956 }
10957
10958 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10959                          u32 *rules __always_unused)
10960 {
10961         struct tg3 *tp = netdev_priv(dev);
10962
10963         if (!tg3_flag(tp, SUPPORT_MSIX))
10964                 return -EOPNOTSUPP;
10965
10966         switch (info->cmd) {
10967         case ETHTOOL_GRXRINGS:
10968                 if (netif_running(tp->dev))
10969                         info->data = tp->irq_cnt;
10970                 else {
10971                         info->data = num_online_cpus();
10972                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10973                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10974                 }
10975
10976                 /* The first interrupt vector only
10977                  * handles link interrupts.
10978                  */
10979                 info->data -= 1;
10980                 return 0;
10981
10982         default:
10983                 return -EOPNOTSUPP;
10984         }
10985 }
10986
10987 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10988 {
10989         u32 size = 0;
10990         struct tg3 *tp = netdev_priv(dev);
10991
10992         if (tg3_flag(tp, SUPPORT_MSIX))
10993                 size = TG3_RSS_INDIR_TBL_SIZE;
10994
10995         return size;
10996 }
10997
10998 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10999 {
11000         struct tg3 *tp = netdev_priv(dev);
11001         int i;
11002
11003         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11004                 indir[i] = tp->rss_ind_tbl[i];
11005
11006         return 0;
11007 }
11008
11009 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11010 {
11011         struct tg3 *tp = netdev_priv(dev);
11012         size_t i;
11013
11014         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11015                 tp->rss_ind_tbl[i] = indir[i];
11016
11017         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11018                 return 0;
11019
11020         /* It is legal to write the indirection
11021          * table while the device is running.
11022          */
11023         tg3_full_lock(tp, 0);
11024         tg3_rss_write_indir_tbl(tp);
11025         tg3_full_unlock(tp);
11026
11027         return 0;
11028 }
11029
11030 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11031 {
11032         switch (stringset) {
11033         case ETH_SS_STATS:
11034                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11035                 break;
11036         case ETH_SS_TEST:
11037                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11038                 break;
11039         default:
11040                 WARN_ON(1);     /* we need a WARN() */
11041                 break;
11042         }
11043 }
11044
11045 static int tg3_set_phys_id(struct net_device *dev,
11046                             enum ethtool_phys_id_state state)
11047 {
11048         struct tg3 *tp = netdev_priv(dev);
11049
11050         if (!netif_running(tp->dev))
11051                 return -EAGAIN;
11052
11053         switch (state) {
11054         case ETHTOOL_ID_ACTIVE:
11055                 return 1;       /* cycle on/off once per second */
11056
11057         case ETHTOOL_ID_ON:
11058                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11059                      LED_CTRL_1000MBPS_ON |
11060                      LED_CTRL_100MBPS_ON |
11061                      LED_CTRL_10MBPS_ON |
11062                      LED_CTRL_TRAFFIC_OVERRIDE |
11063                      LED_CTRL_TRAFFIC_BLINK |
11064                      LED_CTRL_TRAFFIC_LED);
11065                 break;
11066
11067         case ETHTOOL_ID_OFF:
11068                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11069                      LED_CTRL_TRAFFIC_OVERRIDE);
11070                 break;
11071
11072         case ETHTOOL_ID_INACTIVE:
11073                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11074                 break;
11075         }
11076
11077         return 0;
11078 }
11079
11080 static void tg3_get_ethtool_stats(struct net_device *dev,
11081                                    struct ethtool_stats *estats, u64 *tmp_stats)
11082 {
11083         struct tg3 *tp = netdev_priv(dev);
11084
11085         if (tp->hw_stats)
11086                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11087         else
11088                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11089 }
11090
11091 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11092 {
11093         int i;
11094         __be32 *buf;
11095         u32 offset = 0, len = 0;
11096         u32 magic, val;
11097
11098         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11099                 return NULL;
11100
11101         if (magic == TG3_EEPROM_MAGIC) {
11102                 for (offset = TG3_NVM_DIR_START;
11103                      offset < TG3_NVM_DIR_END;
11104                      offset += TG3_NVM_DIRENT_SIZE) {
11105                         if (tg3_nvram_read(tp, offset, &val))
11106                                 return NULL;
11107
11108                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11109                             TG3_NVM_DIRTYPE_EXTVPD)
11110                                 break;
11111                 }
11112
11113                 if (offset != TG3_NVM_DIR_END) {
11114                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11115                         if (tg3_nvram_read(tp, offset + 4, &offset))
11116                                 return NULL;
11117
11118                         offset = tg3_nvram_logical_addr(tp, offset);
11119                 }
11120         }
11121
11122         if (!offset || !len) {
11123                 offset = TG3_NVM_VPD_OFF;
11124                 len = TG3_NVM_VPD_LEN;
11125         }
11126
11127         buf = kmalloc(len, GFP_KERNEL);
11128         if (buf == NULL)
11129                 return NULL;
11130
11131         if (magic == TG3_EEPROM_MAGIC) {
11132                 for (i = 0; i < len; i += 4) {
11133                         /* The data is in little-endian format in NVRAM.
11134                          * Use the big-endian read routines to preserve
11135                          * the byte order as it exists in NVRAM.
11136                          */
11137                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11138                                 goto error;
11139                 }
11140         } else {
11141                 u8 *ptr;
11142                 ssize_t cnt;
11143                 unsigned int pos = 0;
11144
11145                 ptr = (u8 *)&buf[0];
11146                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11147                         cnt = pci_read_vpd(tp->pdev, pos,
11148                                            len - pos, ptr);
11149                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11150                                 cnt = 0;
11151                         else if (cnt < 0)
11152                                 goto error;
11153                 }
11154                 if (pos != len)
11155                         goto error;
11156         }
11157
11158         *vpdlen = len;
11159
11160         return buf;
11161
11162 error:
11163         kfree(buf);
11164         return NULL;
11165 }
11166
11167 #define NVRAM_TEST_SIZE 0x100
11168 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11169 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11170 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11171 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11172 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11173 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11174 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11175 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11176
11177 static int tg3_test_nvram(struct tg3 *tp)
11178 {
11179         u32 csum, magic, len;
11180         __be32 *buf;
11181         int i, j, k, err = 0, size;
11182
11183         if (tg3_flag(tp, NO_NVRAM))
11184                 return 0;
11185
11186         if (tg3_nvram_read(tp, 0, &magic) != 0)
11187                 return -EIO;
11188
11189         if (magic == TG3_EEPROM_MAGIC)
11190                 size = NVRAM_TEST_SIZE;
11191         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11192                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11193                     TG3_EEPROM_SB_FORMAT_1) {
11194                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11195                         case TG3_EEPROM_SB_REVISION_0:
11196                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11197                                 break;
11198                         case TG3_EEPROM_SB_REVISION_2:
11199                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11200                                 break;
11201                         case TG3_EEPROM_SB_REVISION_3:
11202                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11203                                 break;
11204                         case TG3_EEPROM_SB_REVISION_4:
11205                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11206                                 break;
11207                         case TG3_EEPROM_SB_REVISION_5:
11208                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11209                                 break;
11210                         case TG3_EEPROM_SB_REVISION_6:
11211                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11212                                 break;
11213                         default:
11214                                 return -EIO;
11215                         }
11216                 } else
11217                         return 0;
11218         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11219                 size = NVRAM_SELFBOOT_HW_SIZE;
11220         else
11221                 return -EIO;
11222
11223         buf = kmalloc(size, GFP_KERNEL);
11224         if (buf == NULL)
11225                 return -ENOMEM;
11226
11227         err = -EIO;
11228         for (i = 0, j = 0; i < size; i += 4, j++) {
11229                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11230                 if (err)
11231                         break;
11232         }
11233         if (i < size)
11234                 goto out;
11235
11236         /* Selfboot format */
11237         magic = be32_to_cpu(buf[0]);
11238         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11239             TG3_EEPROM_MAGIC_FW) {
11240                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11241
11242                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11243                     TG3_EEPROM_SB_REVISION_2) {
11244                         /* For rev 2, the csum doesn't include the MBA. */
11245                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11246                                 csum8 += buf8[i];
11247                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11248                                 csum8 += buf8[i];
11249                 } else {
11250                         for (i = 0; i < size; i++)
11251                                 csum8 += buf8[i];
11252                 }
11253
11254                 if (csum8 == 0) {
11255                         err = 0;
11256                         goto out;
11257                 }
11258
11259                 err = -EIO;
11260                 goto out;
11261         }
11262
11263         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11264             TG3_EEPROM_MAGIC_HW) {
11265                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11266                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11267                 u8 *buf8 = (u8 *) buf;
11268
11269                 /* Separate the parity bits and the data bytes.  */
11270                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11271                         if ((i == 0) || (i == 8)) {
11272                                 int l;
11273                                 u8 msk;
11274
11275                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11276                                         parity[k++] = buf8[i] & msk;
11277                                 i++;
11278                         } else if (i == 16) {
11279                                 int l;
11280                                 u8 msk;
11281
11282                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11283                                         parity[k++] = buf8[i] & msk;
11284                                 i++;
11285
11286                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11287                                         parity[k++] = buf8[i] & msk;
11288                                 i++;
11289                         }
11290                         data[j++] = buf8[i];
11291                 }
11292
11293                 err = -EIO;
11294                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11295                         u8 hw8 = hweight8(data[i]);
11296
11297                         if ((hw8 & 0x1) && parity[i])
11298                                 goto out;
11299                         else if (!(hw8 & 0x1) && !parity[i])
11300                                 goto out;
11301                 }
11302                 err = 0;
11303                 goto out;
11304         }
11305
11306         err = -EIO;
11307
11308         /* Bootstrap checksum at offset 0x10 */
11309         csum = calc_crc((unsigned char *) buf, 0x10);
11310         if (csum != le32_to_cpu(buf[0x10/4]))
11311                 goto out;
11312
11313         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11314         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11315         if (csum != le32_to_cpu(buf[0xfc/4]))
11316                 goto out;
11317
11318         kfree(buf);
11319
11320         buf = tg3_vpd_readblock(tp, &len);
11321         if (!buf)
11322                 return -ENOMEM;
11323
11324         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11325         if (i > 0) {
11326                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11327                 if (j < 0)
11328                         goto out;
11329
11330                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11331                         goto out;
11332
11333                 i += PCI_VPD_LRDT_TAG_SIZE;
11334                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11335                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11336                 if (j > 0) {
11337                         u8 csum8 = 0;
11338
11339                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11340
11341                         for (i = 0; i <= j; i++)
11342                                 csum8 += ((u8 *)buf)[i];
11343
11344                         if (csum8)
11345                                 goto out;
11346                 }
11347         }
11348
11349         err = 0;
11350
11351 out:
11352         kfree(buf);
11353         return err;
11354 }
11355
11356 #define TG3_SERDES_TIMEOUT_SEC  2
11357 #define TG3_COPPER_TIMEOUT_SEC  6
11358
11359 static int tg3_test_link(struct tg3 *tp)
11360 {
11361         int i, max;
11362
11363         if (!netif_running(tp->dev))
11364                 return -ENODEV;
11365
11366         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11367                 max = TG3_SERDES_TIMEOUT_SEC;
11368         else
11369                 max = TG3_COPPER_TIMEOUT_SEC;
11370
11371         for (i = 0; i < max; i++) {
11372                 if (netif_carrier_ok(tp->dev))
11373                         return 0;
11374
11375                 if (msleep_interruptible(1000))
11376                         break;
11377         }
11378
11379         return -EIO;
11380 }
11381
11382 /* Only test the commonly used registers */
11383 static int tg3_test_registers(struct tg3 *tp)
11384 {
11385         int i, is_5705, is_5750;
11386         u32 offset, read_mask, write_mask, val, save_val, read_val;
11387         static struct {
11388                 u16 offset;
11389                 u16 flags;
11390 #define TG3_FL_5705     0x1
11391 #define TG3_FL_NOT_5705 0x2
11392 #define TG3_FL_NOT_5788 0x4
11393 #define TG3_FL_NOT_5750 0x8
11394                 u32 read_mask;
11395                 u32 write_mask;
11396         } reg_tbl[] = {
11397                 /* MAC Control Registers */
11398                 { MAC_MODE, TG3_FL_NOT_5705,
11399                         0x00000000, 0x00ef6f8c },
11400                 { MAC_MODE, TG3_FL_5705,
11401                         0x00000000, 0x01ef6b8c },
11402                 { MAC_STATUS, TG3_FL_NOT_5705,
11403                         0x03800107, 0x00000000 },
11404                 { MAC_STATUS, TG3_FL_5705,
11405                         0x03800100, 0x00000000 },
11406                 { MAC_ADDR_0_HIGH, 0x0000,
11407                         0x00000000, 0x0000ffff },
11408                 { MAC_ADDR_0_LOW, 0x0000,
11409                         0x00000000, 0xffffffff },
11410                 { MAC_RX_MTU_SIZE, 0x0000,
11411                         0x00000000, 0x0000ffff },
11412                 { MAC_TX_MODE, 0x0000,
11413                         0x00000000, 0x00000070 },
11414                 { MAC_TX_LENGTHS, 0x0000,
11415                         0x00000000, 0x00003fff },
11416                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11417                         0x00000000, 0x000007fc },
11418                 { MAC_RX_MODE, TG3_FL_5705,
11419                         0x00000000, 0x000007dc },
11420                 { MAC_HASH_REG_0, 0x0000,
11421                         0x00000000, 0xffffffff },
11422                 { MAC_HASH_REG_1, 0x0000,
11423                         0x00000000, 0xffffffff },
11424                 { MAC_HASH_REG_2, 0x0000,
11425                         0x00000000, 0xffffffff },
11426                 { MAC_HASH_REG_3, 0x0000,
11427                         0x00000000, 0xffffffff },
11428
11429                 /* Receive Data and Receive BD Initiator Control Registers. */
11430                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11431                         0x00000000, 0xffffffff },
11432                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11433                         0x00000000, 0xffffffff },
11434                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11435                         0x00000000, 0x00000003 },
11436                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11437                         0x00000000, 0xffffffff },
11438                 { RCVDBDI_STD_BD+0, 0x0000,
11439                         0x00000000, 0xffffffff },
11440                 { RCVDBDI_STD_BD+4, 0x0000,
11441                         0x00000000, 0xffffffff },
11442                 { RCVDBDI_STD_BD+8, 0x0000,
11443                         0x00000000, 0xffff0002 },
11444                 { RCVDBDI_STD_BD+0xc, 0x0000,
11445                         0x00000000, 0xffffffff },
11446
11447                 /* Receive BD Initiator Control Registers. */
11448                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11449                         0x00000000, 0xffffffff },
11450                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11451                         0x00000000, 0x000003ff },
11452                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11453                         0x00000000, 0xffffffff },
11454
11455                 /* Host Coalescing Control Registers. */
11456                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11457                         0x00000000, 0x00000004 },
11458                 { HOSTCC_MODE, TG3_FL_5705,
11459                         0x00000000, 0x000000f6 },
11460                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11461                         0x00000000, 0xffffffff },
11462                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11463                         0x00000000, 0x000003ff },
11464                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11465                         0x00000000, 0xffffffff },
11466                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11467                         0x00000000, 0x000003ff },
11468                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11469                         0x00000000, 0xffffffff },
11470                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11471                         0x00000000, 0x000000ff },
11472                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11473                         0x00000000, 0xffffffff },
11474                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11475                         0x00000000, 0x000000ff },
11476                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11477                         0x00000000, 0xffffffff },
11478                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11479                         0x00000000, 0xffffffff },
11480                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11481                         0x00000000, 0xffffffff },
11482                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11483                         0x00000000, 0x000000ff },
11484                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11485                         0x00000000, 0xffffffff },
11486                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11487                         0x00000000, 0x000000ff },
11488                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11489                         0x00000000, 0xffffffff },
11490                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11491                         0x00000000, 0xffffffff },
11492                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11493                         0x00000000, 0xffffffff },
11494                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11495                         0x00000000, 0xffffffff },
11496                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11497                         0x00000000, 0xffffffff },
11498                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11499                         0xffffffff, 0x00000000 },
11500                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11501                         0xffffffff, 0x00000000 },
11502
11503                 /* Buffer Manager Control Registers. */
11504                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11505                         0x00000000, 0x007fff80 },
11506                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11507                         0x00000000, 0x007fffff },
11508                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11509                         0x00000000, 0x0000003f },
11510                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11511                         0x00000000, 0x000001ff },
11512                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11513                         0x00000000, 0x000001ff },
11514                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11515                         0xffffffff, 0x00000000 },
11516                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11517                         0xffffffff, 0x00000000 },
11518
11519                 /* Mailbox Registers */
11520                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11521                         0x00000000, 0x000001ff },
11522                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11523                         0x00000000, 0x000001ff },
11524                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11525                         0x00000000, 0x000007ff },
11526                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11527                         0x00000000, 0x000001ff },
11528
11529                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11530         };
11531
11532         is_5705 = is_5750 = 0;
11533         if (tg3_flag(tp, 5705_PLUS)) {
11534                 is_5705 = 1;
11535                 if (tg3_flag(tp, 5750_PLUS))
11536                         is_5750 = 1;
11537         }
11538
11539         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11540                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11541                         continue;
11542
11543                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11544                         continue;
11545
11546                 if (tg3_flag(tp, IS_5788) &&
11547                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11548                         continue;
11549
11550                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11551                         continue;
11552
11553                 offset = (u32) reg_tbl[i].offset;
11554                 read_mask = reg_tbl[i].read_mask;
11555                 write_mask = reg_tbl[i].write_mask;
11556
11557                 /* Save the original register content */
11558                 save_val = tr32(offset);
11559
11560                 /* Determine the read-only value. */
11561                 read_val = save_val & read_mask;
11562
11563                 /* Write zero to the register, then make sure the read-only bits
11564                  * are not changed and the read/write bits are all zeros.
11565                  */
11566                 tw32(offset, 0);
11567
11568                 val = tr32(offset);
11569
11570                 /* Test the read-only and read/write bits. */
11571                 if (((val & read_mask) != read_val) || (val & write_mask))
11572                         goto out;
11573
11574                 /* Write ones to all the bits defined by RdMask and WrMask, then
11575                  * make sure the read-only bits are not changed and the
11576                  * read/write bits are all ones.
11577                  */
11578                 tw32(offset, read_mask | write_mask);
11579
11580                 val = tr32(offset);
11581
11582                 /* Test the read-only bits. */
11583                 if ((val & read_mask) != read_val)
11584                         goto out;
11585
11586                 /* Test the read/write bits. */
11587                 if ((val & write_mask) != write_mask)
11588                         goto out;
11589
11590                 tw32(offset, save_val);
11591         }
11592
11593         return 0;
11594
11595 out:
11596         if (netif_msg_hw(tp))
11597                 netdev_err(tp->dev,
11598                            "Register test failed at offset %x\n", offset);
11599         tw32(offset, save_val);
11600         return -EIO;
11601 }
11602
11603 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11604 {
11605         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11606         int i;
11607         u32 j;
11608
11609         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11610                 for (j = 0; j < len; j += 4) {
11611                         u32 val;
11612
11613                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11614                         tg3_read_mem(tp, offset + j, &val);
11615                         if (val != test_pattern[i])
11616                                 return -EIO;
11617                 }
11618         }
11619         return 0;
11620 }
11621
11622 static int tg3_test_memory(struct tg3 *tp)
11623 {
11624         static struct mem_entry {
11625                 u32 offset;
11626                 u32 len;
11627         } mem_tbl_570x[] = {
11628                 { 0x00000000, 0x00b50},
11629                 { 0x00002000, 0x1c000},
11630                 { 0xffffffff, 0x00000}
11631         }, mem_tbl_5705[] = {
11632                 { 0x00000100, 0x0000c},
11633                 { 0x00000200, 0x00008},
11634                 { 0x00004000, 0x00800},
11635                 { 0x00006000, 0x01000},
11636                 { 0x00008000, 0x02000},
11637                 { 0x00010000, 0x0e000},
11638                 { 0xffffffff, 0x00000}
11639         }, mem_tbl_5755[] = {
11640                 { 0x00000200, 0x00008},
11641                 { 0x00004000, 0x00800},
11642                 { 0x00006000, 0x00800},
11643                 { 0x00008000, 0x02000},
11644                 { 0x00010000, 0x0c000},
11645                 { 0xffffffff, 0x00000}
11646         }, mem_tbl_5906[] = {
11647                 { 0x00000200, 0x00008},
11648                 { 0x00004000, 0x00400},
11649                 { 0x00006000, 0x00400},
11650                 { 0x00008000, 0x01000},
11651                 { 0x00010000, 0x01000},
11652                 { 0xffffffff, 0x00000}
11653         }, mem_tbl_5717[] = {
11654                 { 0x00000200, 0x00008},
11655                 { 0x00010000, 0x0a000},
11656                 { 0x00020000, 0x13c00},
11657                 { 0xffffffff, 0x00000}
11658         }, mem_tbl_57765[] = {
11659                 { 0x00000200, 0x00008},
11660                 { 0x00004000, 0x00800},
11661                 { 0x00006000, 0x09800},
11662                 { 0x00010000, 0x0a000},
11663                 { 0xffffffff, 0x00000}
11664         };
11665         struct mem_entry *mem_tbl;
11666         int err = 0;
11667         int i;
11668
11669         if (tg3_flag(tp, 5717_PLUS))
11670                 mem_tbl = mem_tbl_5717;
11671         else if (tg3_flag(tp, 57765_CLASS))
11672                 mem_tbl = mem_tbl_57765;
11673         else if (tg3_flag(tp, 5755_PLUS))
11674                 mem_tbl = mem_tbl_5755;
11675         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11676                 mem_tbl = mem_tbl_5906;
11677         else if (tg3_flag(tp, 5705_PLUS))
11678                 mem_tbl = mem_tbl_5705;
11679         else
11680                 mem_tbl = mem_tbl_570x;
11681
11682         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11683                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11684                 if (err)
11685                         break;
11686         }
11687
11688         return err;
11689 }
11690
11691 #define TG3_TSO_MSS             500
11692
11693 #define TG3_TSO_IP_HDR_LEN      20
11694 #define TG3_TSO_TCP_HDR_LEN     20
11695 #define TG3_TSO_TCP_OPT_LEN     12
11696
11697 static const u8 tg3_tso_header[] = {
11698 0x08, 0x00,
11699 0x45, 0x00, 0x00, 0x00,
11700 0x00, 0x00, 0x40, 0x00,
11701 0x40, 0x06, 0x00, 0x00,
11702 0x0a, 0x00, 0x00, 0x01,
11703 0x0a, 0x00, 0x00, 0x02,
11704 0x0d, 0x00, 0xe0, 0x00,
11705 0x00, 0x00, 0x01, 0x00,
11706 0x00, 0x00, 0x02, 0x00,
11707 0x80, 0x10, 0x10, 0x00,
11708 0x14, 0x09, 0x00, 0x00,
11709 0x01, 0x01, 0x08, 0x0a,
11710 0x11, 0x11, 0x11, 0x11,
11711 0x11, 0x11, 0x11, 0x11,
11712 };
11713
11714 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11715 {
11716         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11717         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11718         u32 budget;
11719         struct sk_buff *skb;
11720         u8 *tx_data, *rx_data;
11721         dma_addr_t map;
11722         int num_pkts, tx_len, rx_len, i, err;
11723         struct tg3_rx_buffer_desc *desc;
11724         struct tg3_napi *tnapi, *rnapi;
11725         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11726
11727         tnapi = &tp->napi[0];
11728         rnapi = &tp->napi[0];
11729         if (tp->irq_cnt > 1) {
11730                 if (tg3_flag(tp, ENABLE_RSS))
11731                         rnapi = &tp->napi[1];
11732                 if (tg3_flag(tp, ENABLE_TSS))
11733                         tnapi = &tp->napi[1];
11734         }
11735         coal_now = tnapi->coal_now | rnapi->coal_now;
11736
11737         err = -EIO;
11738
11739         tx_len = pktsz;
11740         skb = netdev_alloc_skb(tp->dev, tx_len);
11741         if (!skb)
11742                 return -ENOMEM;
11743
11744         tx_data = skb_put(skb, tx_len);
11745         memcpy(tx_data, tp->dev->dev_addr, 6);
11746         memset(tx_data + 6, 0x0, 8);
11747
11748         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11749
11750         if (tso_loopback) {
11751                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11752
11753                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11754                               TG3_TSO_TCP_OPT_LEN;
11755
11756                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11757                        sizeof(tg3_tso_header));
11758                 mss = TG3_TSO_MSS;
11759
11760                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11761                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11762
11763                 /* Set the total length field in the IP header */
11764                 iph->tot_len = htons((u16)(mss + hdr_len));
11765
11766                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11767                               TXD_FLAG_CPU_POST_DMA);
11768
11769                 if (tg3_flag(tp, HW_TSO_1) ||
11770                     tg3_flag(tp, HW_TSO_2) ||
11771                     tg3_flag(tp, HW_TSO_3)) {
11772                         struct tcphdr *th;
11773                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11774                         th = (struct tcphdr *)&tx_data[val];
11775                         th->check = 0;
11776                 } else
11777                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11778
11779                 if (tg3_flag(tp, HW_TSO_3)) {
11780                         mss |= (hdr_len & 0xc) << 12;
11781                         if (hdr_len & 0x10)
11782                                 base_flags |= 0x00000010;
11783                         base_flags |= (hdr_len & 0x3e0) << 5;
11784                 } else if (tg3_flag(tp, HW_TSO_2))
11785                         mss |= hdr_len << 9;
11786                 else if (tg3_flag(tp, HW_TSO_1) ||
11787                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11788                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11789                 } else {
11790                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11791                 }
11792
11793                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11794         } else {
11795                 num_pkts = 1;
11796                 data_off = ETH_HLEN;
11797
11798                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11799                     tx_len > VLAN_ETH_FRAME_LEN)
11800                         base_flags |= TXD_FLAG_JMB_PKT;
11801         }
11802
11803         for (i = data_off; i < tx_len; i++)
11804                 tx_data[i] = (u8) (i & 0xff);
11805
11806         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11807         if (pci_dma_mapping_error(tp->pdev, map)) {
11808                 dev_kfree_skb(skb);
11809                 return -EIO;
11810         }
11811
11812         val = tnapi->tx_prod;
11813         tnapi->tx_buffers[val].skb = skb;
11814         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11815
11816         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11817                rnapi->coal_now);
11818
11819         udelay(10);
11820
11821         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11822
11823         budget = tg3_tx_avail(tnapi);
11824         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11825                             base_flags | TXD_FLAG_END, mss, 0)) {
11826                 tnapi->tx_buffers[val].skb = NULL;
11827                 dev_kfree_skb(skb);
11828                 return -EIO;
11829         }
11830
11831         tnapi->tx_prod++;
11832
11833         /* Sync BD data before updating mailbox */
11834         wmb();
11835
11836         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11837         tr32_mailbox(tnapi->prodmbox);
11838
11839         udelay(10);
11840
11841         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11842         for (i = 0; i < 35; i++) {
11843                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11844                        coal_now);
11845
11846                 udelay(10);
11847
11848                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11849                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11850                 if ((tx_idx == tnapi->tx_prod) &&
11851                     (rx_idx == (rx_start_idx + num_pkts)))
11852                         break;
11853         }
11854
11855         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11856         dev_kfree_skb(skb);
11857
11858         if (tx_idx != tnapi->tx_prod)
11859                 goto out;
11860
11861         if (rx_idx != rx_start_idx + num_pkts)
11862                 goto out;
11863
11864         val = data_off;
11865         while (rx_idx != rx_start_idx) {
11866                 desc = &rnapi->rx_rcb[rx_start_idx++];
11867                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11868                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11869
11870                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11871                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11872                         goto out;
11873
11874                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11875                          - ETH_FCS_LEN;
11876
11877                 if (!tso_loopback) {
11878                         if (rx_len != tx_len)
11879                                 goto out;
11880
11881                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11882                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11883                                         goto out;
11884                         } else {
11885                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11886                                         goto out;
11887                         }
11888                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11889                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11890                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11891                         goto out;
11892                 }
11893
11894                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11895                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11896                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11897                                              mapping);
11898                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11899                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11900                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11901                                              mapping);
11902                 } else
11903                         goto out;
11904
11905                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11906                                             PCI_DMA_FROMDEVICE);
11907
11908                 rx_data += TG3_RX_OFFSET(tp);
11909                 for (i = data_off; i < rx_len; i++, val++) {
11910                         if (*(rx_data + i) != (u8) (val & 0xff))
11911                                 goto out;
11912                 }
11913         }
11914
11915         err = 0;
11916
11917         /* tg3_free_rings will unmap and free the rx_data */
11918 out:
11919         return err;
11920 }
11921
11922 #define TG3_STD_LOOPBACK_FAILED         1
11923 #define TG3_JMB_LOOPBACK_FAILED         2
11924 #define TG3_TSO_LOOPBACK_FAILED         4
11925 #define TG3_LOOPBACK_FAILED \
11926         (TG3_STD_LOOPBACK_FAILED | \
11927          TG3_JMB_LOOPBACK_FAILED | \
11928          TG3_TSO_LOOPBACK_FAILED)
11929
11930 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11931 {
11932         int err = -EIO;
11933         u32 eee_cap;
11934         u32 jmb_pkt_sz = 9000;
11935
11936         if (tp->dma_limit)
11937                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11938
11939         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11940         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11941
11942         if (!netif_running(tp->dev)) {
11943                 data[0] = TG3_LOOPBACK_FAILED;
11944                 data[1] = TG3_LOOPBACK_FAILED;
11945                 if (do_extlpbk)
11946                         data[2] = TG3_LOOPBACK_FAILED;
11947                 goto done;
11948         }
11949
11950         err = tg3_reset_hw(tp, 1);
11951         if (err) {
11952                 data[0] = TG3_LOOPBACK_FAILED;
11953                 data[1] = TG3_LOOPBACK_FAILED;
11954                 if (do_extlpbk)
11955                         data[2] = TG3_LOOPBACK_FAILED;
11956                 goto done;
11957         }
11958
11959         if (tg3_flag(tp, ENABLE_RSS)) {
11960                 int i;
11961
11962                 /* Reroute all rx packets to the 1st queue */
11963                 for (i = MAC_RSS_INDIR_TBL_0;
11964                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11965                         tw32(i, 0x0);
11966         }
11967
11968         /* HW errata - mac loopback fails in some cases on 5780.
11969          * Normal traffic and PHY loopback are not affected by
11970          * errata.  Also, the MAC loopback test is deprecated for
11971          * all newer ASIC revisions.
11972          */
11973         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11974             !tg3_flag(tp, CPMU_PRESENT)) {
11975                 tg3_mac_loopback(tp, true);
11976
11977                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11978                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11979
11980                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11981                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11982                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11983
11984                 tg3_mac_loopback(tp, false);
11985         }
11986
11987         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11988             !tg3_flag(tp, USE_PHYLIB)) {
11989                 int i;
11990
11991                 tg3_phy_lpbk_set(tp, 0, false);
11992
11993                 /* Wait for link */
11994                 for (i = 0; i < 100; i++) {
11995                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11996                                 break;
11997                         mdelay(1);
11998                 }
11999
12000                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12001                         data[1] |= TG3_STD_LOOPBACK_FAILED;
12002                 if (tg3_flag(tp, TSO_CAPABLE) &&
12003                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12004                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
12005                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12006                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12007                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
12008
12009                 if (do_extlpbk) {
12010                         tg3_phy_lpbk_set(tp, 0, true);
12011
12012                         /* All link indications report up, but the hardware
12013                          * isn't really ready for about 20 msec.  Double it
12014                          * to be sure.
12015                          */
12016                         mdelay(40);
12017
12018                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12019                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
12020                         if (tg3_flag(tp, TSO_CAPABLE) &&
12021                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12022                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12023                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12024                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12025                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12026                 }
12027
12028                 /* Re-enable gphy autopowerdown. */
12029                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12030                         tg3_phy_toggle_apd(tp, true);
12031         }
12032
12033         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12034
12035 done:
12036         tp->phy_flags |= eee_cap;
12037
12038         return err;
12039 }
12040
12041 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12042                           u64 *data)
12043 {
12044         struct tg3 *tp = netdev_priv(dev);
12045         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12046
12047         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12048             tg3_power_up(tp)) {
12049                 etest->flags |= ETH_TEST_FL_FAILED;
12050                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12051                 return;
12052         }
12053
12054         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12055
12056         if (tg3_test_nvram(tp) != 0) {
12057                 etest->flags |= ETH_TEST_FL_FAILED;
12058                 data[0] = 1;
12059         }
12060         if (!doextlpbk && tg3_test_link(tp)) {
12061                 etest->flags |= ETH_TEST_FL_FAILED;
12062                 data[1] = 1;
12063         }
12064         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12065                 int err, err2 = 0, irq_sync = 0;
12066
12067                 if (netif_running(dev)) {
12068                         tg3_phy_stop(tp);
12069                         tg3_netif_stop(tp);
12070                         irq_sync = 1;
12071                 }
12072
12073                 tg3_full_lock(tp, irq_sync);
12074
12075                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12076                 err = tg3_nvram_lock(tp);
12077                 tg3_halt_cpu(tp, RX_CPU_BASE);
12078                 if (!tg3_flag(tp, 5705_PLUS))
12079                         tg3_halt_cpu(tp, TX_CPU_BASE);
12080                 if (!err)
12081                         tg3_nvram_unlock(tp);
12082
12083                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12084                         tg3_phy_reset(tp);
12085
12086                 if (tg3_test_registers(tp) != 0) {
12087                         etest->flags |= ETH_TEST_FL_FAILED;
12088                         data[2] = 1;
12089                 }
12090
12091                 if (tg3_test_memory(tp) != 0) {
12092                         etest->flags |= ETH_TEST_FL_FAILED;
12093                         data[3] = 1;
12094                 }
12095
12096                 if (doextlpbk)
12097                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12098
12099                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12100                         etest->flags |= ETH_TEST_FL_FAILED;
12101
12102                 tg3_full_unlock(tp);
12103
12104                 if (tg3_test_interrupt(tp) != 0) {
12105                         etest->flags |= ETH_TEST_FL_FAILED;
12106                         data[7] = 1;
12107                 }
12108
12109                 tg3_full_lock(tp, 0);
12110
12111                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12112                 if (netif_running(dev)) {
12113                         tg3_flag_set(tp, INIT_COMPLETE);
12114                         err2 = tg3_restart_hw(tp, 1);
12115                         if (!err2)
12116                                 tg3_netif_start(tp);
12117                 }
12118
12119                 tg3_full_unlock(tp);
12120
12121                 if (irq_sync && !err2)
12122                         tg3_phy_start(tp);
12123         }
12124         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12125                 tg3_power_down(tp);
12126
12127 }
12128
12129 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12130 {
12131         struct mii_ioctl_data *data = if_mii(ifr);
12132         struct tg3 *tp = netdev_priv(dev);
12133         int err;
12134
12135         if (tg3_flag(tp, USE_PHYLIB)) {
12136                 struct phy_device *phydev;
12137                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12138                         return -EAGAIN;
12139                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12140                 return phy_mii_ioctl(phydev, ifr, cmd);
12141         }
12142
12143         switch (cmd) {
12144         case SIOCGMIIPHY:
12145                 data->phy_id = tp->phy_addr;
12146
12147                 /* fallthru */
12148         case SIOCGMIIREG: {
12149                 u32 mii_regval;
12150
12151                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12152                         break;                  /* We have no PHY */
12153
12154                 if (!netif_running(dev))
12155                         return -EAGAIN;
12156
12157                 spin_lock_bh(&tp->lock);
12158                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12159                 spin_unlock_bh(&tp->lock);
12160
12161                 data->val_out = mii_regval;
12162
12163                 return err;
12164         }
12165
12166         case SIOCSMIIREG:
12167                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12168                         break;                  /* We have no PHY */
12169
12170                 if (!netif_running(dev))
12171                         return -EAGAIN;
12172
12173                 spin_lock_bh(&tp->lock);
12174                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12175                 spin_unlock_bh(&tp->lock);
12176
12177                 return err;
12178
12179         default:
12180                 /* do nothing */
12181                 break;
12182         }
12183         return -EOPNOTSUPP;
12184 }
12185
12186 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12187 {
12188         struct tg3 *tp = netdev_priv(dev);
12189
12190         memcpy(ec, &tp->coal, sizeof(*ec));
12191         return 0;
12192 }
12193
12194 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12195 {
12196         struct tg3 *tp = netdev_priv(dev);
12197         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12198         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12199
12200         if (!tg3_flag(tp, 5705_PLUS)) {
12201                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12202                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12203                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12204                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12205         }
12206
12207         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12208             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12209             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12210             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12211             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12212             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12213             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12214             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12215             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12216             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12217                 return -EINVAL;
12218
12219         /* No rx interrupts will be generated if both are zero */
12220         if ((ec->rx_coalesce_usecs == 0) &&
12221             (ec->rx_max_coalesced_frames == 0))
12222                 return -EINVAL;
12223
12224         /* No tx interrupts will be generated if both are zero */
12225         if ((ec->tx_coalesce_usecs == 0) &&
12226             (ec->tx_max_coalesced_frames == 0))
12227                 return -EINVAL;
12228
12229         /* Only copy relevant parameters, ignore all others. */
12230         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12231         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12232         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12233         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12234         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12235         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12236         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12237         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12238         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12239
12240         if (netif_running(dev)) {
12241                 tg3_full_lock(tp, 0);
12242                 __tg3_set_coalesce(tp, &tp->coal);
12243                 tg3_full_unlock(tp);
12244         }
12245         return 0;
12246 }
12247
12248 static const struct ethtool_ops tg3_ethtool_ops = {
12249         .get_settings           = tg3_get_settings,
12250         .set_settings           = tg3_set_settings,
12251         .get_drvinfo            = tg3_get_drvinfo,
12252         .get_regs_len           = tg3_get_regs_len,
12253         .get_regs               = tg3_get_regs,
12254         .get_wol                = tg3_get_wol,
12255         .set_wol                = tg3_set_wol,
12256         .get_msglevel           = tg3_get_msglevel,
12257         .set_msglevel           = tg3_set_msglevel,
12258         .nway_reset             = tg3_nway_reset,
12259         .get_link               = ethtool_op_get_link,
12260         .get_eeprom_len         = tg3_get_eeprom_len,
12261         .get_eeprom             = tg3_get_eeprom,
12262         .set_eeprom             = tg3_set_eeprom,
12263         .get_ringparam          = tg3_get_ringparam,
12264         .set_ringparam          = tg3_set_ringparam,
12265         .get_pauseparam         = tg3_get_pauseparam,
12266         .set_pauseparam         = tg3_set_pauseparam,
12267         .self_test              = tg3_self_test,
12268         .get_strings            = tg3_get_strings,
12269         .set_phys_id            = tg3_set_phys_id,
12270         .get_ethtool_stats      = tg3_get_ethtool_stats,
12271         .get_coalesce           = tg3_get_coalesce,
12272         .set_coalesce           = tg3_set_coalesce,
12273         .get_sset_count         = tg3_get_sset_count,
12274         .get_rxnfc              = tg3_get_rxnfc,
12275         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12276         .get_rxfh_indir         = tg3_get_rxfh_indir,
12277         .set_rxfh_indir         = tg3_set_rxfh_indir,
12278         .get_ts_info            = ethtool_op_get_ts_info,
12279 };
12280
12281 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12282                                                 struct rtnl_link_stats64 *stats)
12283 {
12284         struct tg3 *tp = netdev_priv(dev);
12285
12286         if (!tp->hw_stats)
12287                 return &tp->net_stats_prev;
12288
12289         spin_lock_bh(&tp->lock);
12290         tg3_get_nstats(tp, stats);
12291         spin_unlock_bh(&tp->lock);
12292
12293         return stats;
12294 }
12295
12296 static void tg3_set_rx_mode(struct net_device *dev)
12297 {
12298         struct tg3 *tp = netdev_priv(dev);
12299
12300         if (!netif_running(dev))
12301                 return;
12302
12303         tg3_full_lock(tp, 0);
12304         __tg3_set_rx_mode(dev);
12305         tg3_full_unlock(tp);
12306 }
12307
12308 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12309                                int new_mtu)
12310 {
12311         dev->mtu = new_mtu;
12312
12313         if (new_mtu > ETH_DATA_LEN) {
12314                 if (tg3_flag(tp, 5780_CLASS)) {
12315                         netdev_update_features(dev);
12316                         tg3_flag_clear(tp, TSO_CAPABLE);
12317                 } else {
12318                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12319                 }
12320         } else {
12321                 if (tg3_flag(tp, 5780_CLASS)) {
12322                         tg3_flag_set(tp, TSO_CAPABLE);
12323                         netdev_update_features(dev);
12324                 }
12325                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12326         }
12327 }
12328
12329 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12330 {
12331         struct tg3 *tp = netdev_priv(dev);
12332         int err, reset_phy = 0;
12333
12334         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12335                 return -EINVAL;
12336
12337         if (!netif_running(dev)) {
12338                 /* We'll just catch it later when the
12339                  * device is up'd.
12340                  */
12341                 tg3_set_mtu(dev, tp, new_mtu);
12342                 return 0;
12343         }
12344
12345         tg3_phy_stop(tp);
12346
12347         tg3_netif_stop(tp);
12348
12349         tg3_full_lock(tp, 1);
12350
12351         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12352
12353         tg3_set_mtu(dev, tp, new_mtu);
12354
12355         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12356          * breaks all requests to 256 bytes.
12357          */
12358         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12359                 reset_phy = 1;
12360
12361         err = tg3_restart_hw(tp, reset_phy);
12362
12363         if (!err)
12364                 tg3_netif_start(tp);
12365
12366         tg3_full_unlock(tp);
12367
12368         if (!err)
12369                 tg3_phy_start(tp);
12370
12371         return err;
12372 }
12373
12374 static const struct net_device_ops tg3_netdev_ops = {
12375         .ndo_open               = tg3_open,
12376         .ndo_stop               = tg3_close,
12377         .ndo_start_xmit         = tg3_start_xmit,
12378         .ndo_get_stats64        = tg3_get_stats64,
12379         .ndo_validate_addr      = eth_validate_addr,
12380         .ndo_set_rx_mode        = tg3_set_rx_mode,
12381         .ndo_set_mac_address    = tg3_set_mac_addr,
12382         .ndo_do_ioctl           = tg3_ioctl,
12383         .ndo_tx_timeout         = tg3_tx_timeout,
12384         .ndo_change_mtu         = tg3_change_mtu,
12385         .ndo_fix_features       = tg3_fix_features,
12386         .ndo_set_features       = tg3_set_features,
12387 #ifdef CONFIG_NET_POLL_CONTROLLER
12388         .ndo_poll_controller    = tg3_poll_controller,
12389 #endif
12390 };
12391
12392 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12393 {
12394         u32 cursize, val, magic;
12395
12396         tp->nvram_size = EEPROM_CHIP_SIZE;
12397
12398         if (tg3_nvram_read(tp, 0, &magic) != 0)
12399                 return;
12400
12401         if ((magic != TG3_EEPROM_MAGIC) &&
12402             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12403             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12404                 return;
12405
12406         /*
12407          * Size the chip by reading offsets at increasing powers of two.
12408          * When we encounter our validation signature, we know the addressing
12409          * has wrapped around, and thus have our chip size.
12410          */
12411         cursize = 0x10;
12412
12413         while (cursize < tp->nvram_size) {
12414                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12415                         return;
12416
12417                 if (val == magic)
12418                         break;
12419
12420                 cursize <<= 1;
12421         }
12422
12423         tp->nvram_size = cursize;
12424 }
12425
12426 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12427 {
12428         u32 val;
12429
12430         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12431                 return;
12432
12433         /* Selfboot format */
12434         if (val != TG3_EEPROM_MAGIC) {
12435                 tg3_get_eeprom_size(tp);
12436                 return;
12437         }
12438
12439         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12440                 if (val != 0) {
12441                         /* This is confusing.  We want to operate on the
12442                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12443                          * call will read from NVRAM and byteswap the data
12444                          * according to the byteswapping settings for all
12445                          * other register accesses.  This ensures the data we
12446                          * want will always reside in the lower 16-bits.
12447                          * However, the data in NVRAM is in LE format, which
12448                          * means the data from the NVRAM read will always be
12449                          * opposite the endianness of the CPU.  The 16-bit
12450                          * byteswap then brings the data to CPU endianness.
12451                          */
12452                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12453                         return;
12454                 }
12455         }
12456         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12457 }
12458
12459 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12460 {
12461         u32 nvcfg1;
12462
12463         nvcfg1 = tr32(NVRAM_CFG1);
12464         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12465                 tg3_flag_set(tp, FLASH);
12466         } else {
12467                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12468                 tw32(NVRAM_CFG1, nvcfg1);
12469         }
12470
12471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12472             tg3_flag(tp, 5780_CLASS)) {
12473                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12474                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12475                         tp->nvram_jedecnum = JEDEC_ATMEL;
12476                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12477                         tg3_flag_set(tp, NVRAM_BUFFERED);
12478                         break;
12479                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12480                         tp->nvram_jedecnum = JEDEC_ATMEL;
12481                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12482                         break;
12483                 case FLASH_VENDOR_ATMEL_EEPROM:
12484                         tp->nvram_jedecnum = JEDEC_ATMEL;
12485                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12486                         tg3_flag_set(tp, NVRAM_BUFFERED);
12487                         break;
12488                 case FLASH_VENDOR_ST:
12489                         tp->nvram_jedecnum = JEDEC_ST;
12490                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12491                         tg3_flag_set(tp, NVRAM_BUFFERED);
12492                         break;
12493                 case FLASH_VENDOR_SAIFUN:
12494                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12495                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12496                         break;
12497                 case FLASH_VENDOR_SST_SMALL:
12498                 case FLASH_VENDOR_SST_LARGE:
12499                         tp->nvram_jedecnum = JEDEC_SST;
12500                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12501                         break;
12502                 }
12503         } else {
12504                 tp->nvram_jedecnum = JEDEC_ATMEL;
12505                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12506                 tg3_flag_set(tp, NVRAM_BUFFERED);
12507         }
12508 }
12509
12510 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12511 {
12512         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12513         case FLASH_5752PAGE_SIZE_256:
12514                 tp->nvram_pagesize = 256;
12515                 break;
12516         case FLASH_5752PAGE_SIZE_512:
12517                 tp->nvram_pagesize = 512;
12518                 break;
12519         case FLASH_5752PAGE_SIZE_1K:
12520                 tp->nvram_pagesize = 1024;
12521                 break;
12522         case FLASH_5752PAGE_SIZE_2K:
12523                 tp->nvram_pagesize = 2048;
12524                 break;
12525         case FLASH_5752PAGE_SIZE_4K:
12526                 tp->nvram_pagesize = 4096;
12527                 break;
12528         case FLASH_5752PAGE_SIZE_264:
12529                 tp->nvram_pagesize = 264;
12530                 break;
12531         case FLASH_5752PAGE_SIZE_528:
12532                 tp->nvram_pagesize = 528;
12533                 break;
12534         }
12535 }
12536
12537 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12538 {
12539         u32 nvcfg1;
12540
12541         nvcfg1 = tr32(NVRAM_CFG1);
12542
12543         /* NVRAM protection for TPM */
12544         if (nvcfg1 & (1 << 27))
12545                 tg3_flag_set(tp, PROTECTED_NVRAM);
12546
12547         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12548         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12549         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12550                 tp->nvram_jedecnum = JEDEC_ATMEL;
12551                 tg3_flag_set(tp, NVRAM_BUFFERED);
12552                 break;
12553         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12554                 tp->nvram_jedecnum = JEDEC_ATMEL;
12555                 tg3_flag_set(tp, NVRAM_BUFFERED);
12556                 tg3_flag_set(tp, FLASH);
12557                 break;
12558         case FLASH_5752VENDOR_ST_M45PE10:
12559         case FLASH_5752VENDOR_ST_M45PE20:
12560         case FLASH_5752VENDOR_ST_M45PE40:
12561                 tp->nvram_jedecnum = JEDEC_ST;
12562                 tg3_flag_set(tp, NVRAM_BUFFERED);
12563                 tg3_flag_set(tp, FLASH);
12564                 break;
12565         }
12566
12567         if (tg3_flag(tp, FLASH)) {
12568                 tg3_nvram_get_pagesize(tp, nvcfg1);
12569         } else {
12570                 /* For eeprom, set pagesize to maximum eeprom size */
12571                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12572
12573                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12574                 tw32(NVRAM_CFG1, nvcfg1);
12575         }
12576 }
12577
12578 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12579 {
12580         u32 nvcfg1, protect = 0;
12581
12582         nvcfg1 = tr32(NVRAM_CFG1);
12583
12584         /* NVRAM protection for TPM */
12585         if (nvcfg1 & (1 << 27)) {
12586                 tg3_flag_set(tp, PROTECTED_NVRAM);
12587                 protect = 1;
12588         }
12589
12590         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12591         switch (nvcfg1) {
12592         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12593         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12594         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12595         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12596                 tp->nvram_jedecnum = JEDEC_ATMEL;
12597                 tg3_flag_set(tp, NVRAM_BUFFERED);
12598                 tg3_flag_set(tp, FLASH);
12599                 tp->nvram_pagesize = 264;
12600                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12601                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12602                         tp->nvram_size = (protect ? 0x3e200 :
12603                                           TG3_NVRAM_SIZE_512KB);
12604                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12605                         tp->nvram_size = (protect ? 0x1f200 :
12606                                           TG3_NVRAM_SIZE_256KB);
12607                 else
12608                         tp->nvram_size = (protect ? 0x1f200 :
12609                                           TG3_NVRAM_SIZE_128KB);
12610                 break;
12611         case FLASH_5752VENDOR_ST_M45PE10:
12612         case FLASH_5752VENDOR_ST_M45PE20:
12613         case FLASH_5752VENDOR_ST_M45PE40:
12614                 tp->nvram_jedecnum = JEDEC_ST;
12615                 tg3_flag_set(tp, NVRAM_BUFFERED);
12616                 tg3_flag_set(tp, FLASH);
12617                 tp->nvram_pagesize = 256;
12618                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12619                         tp->nvram_size = (protect ?
12620                                           TG3_NVRAM_SIZE_64KB :
12621                                           TG3_NVRAM_SIZE_128KB);
12622                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12623                         tp->nvram_size = (protect ?
12624                                           TG3_NVRAM_SIZE_64KB :
12625                                           TG3_NVRAM_SIZE_256KB);
12626                 else
12627                         tp->nvram_size = (protect ?
12628                                           TG3_NVRAM_SIZE_128KB :
12629                                           TG3_NVRAM_SIZE_512KB);
12630                 break;
12631         }
12632 }
12633
12634 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12635 {
12636         u32 nvcfg1;
12637
12638         nvcfg1 = tr32(NVRAM_CFG1);
12639
12640         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12641         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12642         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12643         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12644         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12645                 tp->nvram_jedecnum = JEDEC_ATMEL;
12646                 tg3_flag_set(tp, NVRAM_BUFFERED);
12647                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12648
12649                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12650                 tw32(NVRAM_CFG1, nvcfg1);
12651                 break;
12652         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12653         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12654         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12655         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12656                 tp->nvram_jedecnum = JEDEC_ATMEL;
12657                 tg3_flag_set(tp, NVRAM_BUFFERED);
12658                 tg3_flag_set(tp, FLASH);
12659                 tp->nvram_pagesize = 264;
12660                 break;
12661         case FLASH_5752VENDOR_ST_M45PE10:
12662         case FLASH_5752VENDOR_ST_M45PE20:
12663         case FLASH_5752VENDOR_ST_M45PE40:
12664                 tp->nvram_jedecnum = JEDEC_ST;
12665                 tg3_flag_set(tp, NVRAM_BUFFERED);
12666                 tg3_flag_set(tp, FLASH);
12667                 tp->nvram_pagesize = 256;
12668                 break;
12669         }
12670 }
12671
12672 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12673 {
12674         u32 nvcfg1, protect = 0;
12675
12676         nvcfg1 = tr32(NVRAM_CFG1);
12677
12678         /* NVRAM protection for TPM */
12679         if (nvcfg1 & (1 << 27)) {
12680                 tg3_flag_set(tp, PROTECTED_NVRAM);
12681                 protect = 1;
12682         }
12683
12684         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12685         switch (nvcfg1) {
12686         case FLASH_5761VENDOR_ATMEL_ADB021D:
12687         case FLASH_5761VENDOR_ATMEL_ADB041D:
12688         case FLASH_5761VENDOR_ATMEL_ADB081D:
12689         case FLASH_5761VENDOR_ATMEL_ADB161D:
12690         case FLASH_5761VENDOR_ATMEL_MDB021D:
12691         case FLASH_5761VENDOR_ATMEL_MDB041D:
12692         case FLASH_5761VENDOR_ATMEL_MDB081D:
12693         case FLASH_5761VENDOR_ATMEL_MDB161D:
12694                 tp->nvram_jedecnum = JEDEC_ATMEL;
12695                 tg3_flag_set(tp, NVRAM_BUFFERED);
12696                 tg3_flag_set(tp, FLASH);
12697                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12698                 tp->nvram_pagesize = 256;
12699                 break;
12700         case FLASH_5761VENDOR_ST_A_M45PE20:
12701         case FLASH_5761VENDOR_ST_A_M45PE40:
12702         case FLASH_5761VENDOR_ST_A_M45PE80:
12703         case FLASH_5761VENDOR_ST_A_M45PE16:
12704         case FLASH_5761VENDOR_ST_M_M45PE20:
12705         case FLASH_5761VENDOR_ST_M_M45PE40:
12706         case FLASH_5761VENDOR_ST_M_M45PE80:
12707         case FLASH_5761VENDOR_ST_M_M45PE16:
12708                 tp->nvram_jedecnum = JEDEC_ST;
12709                 tg3_flag_set(tp, NVRAM_BUFFERED);
12710                 tg3_flag_set(tp, FLASH);
12711                 tp->nvram_pagesize = 256;
12712                 break;
12713         }
12714
12715         if (protect) {
12716                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12717         } else {
12718                 switch (nvcfg1) {
12719                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12720                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12721                 case FLASH_5761VENDOR_ST_A_M45PE16:
12722                 case FLASH_5761VENDOR_ST_M_M45PE16:
12723                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12724                         break;
12725                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12726                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12727                 case FLASH_5761VENDOR_ST_A_M45PE80:
12728                 case FLASH_5761VENDOR_ST_M_M45PE80:
12729                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12730                         break;
12731                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12732                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12733                 case FLASH_5761VENDOR_ST_A_M45PE40:
12734                 case FLASH_5761VENDOR_ST_M_M45PE40:
12735                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12736                         break;
12737                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12738                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12739                 case FLASH_5761VENDOR_ST_A_M45PE20:
12740                 case FLASH_5761VENDOR_ST_M_M45PE20:
12741                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12742                         break;
12743                 }
12744         }
12745 }
12746
12747 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12748 {
12749         tp->nvram_jedecnum = JEDEC_ATMEL;
12750         tg3_flag_set(tp, NVRAM_BUFFERED);
12751         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12752 }
12753
12754 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12755 {
12756         u32 nvcfg1;
12757
12758         nvcfg1 = tr32(NVRAM_CFG1);
12759
12760         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12761         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12762         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12763                 tp->nvram_jedecnum = JEDEC_ATMEL;
12764                 tg3_flag_set(tp, NVRAM_BUFFERED);
12765                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12766
12767                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12768                 tw32(NVRAM_CFG1, nvcfg1);
12769                 return;
12770         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12771         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12772         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12773         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12774         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12775         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12776         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12777                 tp->nvram_jedecnum = JEDEC_ATMEL;
12778                 tg3_flag_set(tp, NVRAM_BUFFERED);
12779                 tg3_flag_set(tp, FLASH);
12780
12781                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12782                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12783                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12784                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12785                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12786                         break;
12787                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12788                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12789                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12790                         break;
12791                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12792                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12793                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12794                         break;
12795                 }
12796                 break;
12797         case FLASH_5752VENDOR_ST_M45PE10:
12798         case FLASH_5752VENDOR_ST_M45PE20:
12799         case FLASH_5752VENDOR_ST_M45PE40:
12800                 tp->nvram_jedecnum = JEDEC_ST;
12801                 tg3_flag_set(tp, NVRAM_BUFFERED);
12802                 tg3_flag_set(tp, FLASH);
12803
12804                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12805                 case FLASH_5752VENDOR_ST_M45PE10:
12806                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12807                         break;
12808                 case FLASH_5752VENDOR_ST_M45PE20:
12809                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12810                         break;
12811                 case FLASH_5752VENDOR_ST_M45PE40:
12812                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12813                         break;
12814                 }
12815                 break;
12816         default:
12817                 tg3_flag_set(tp, NO_NVRAM);
12818                 return;
12819         }
12820
12821         tg3_nvram_get_pagesize(tp, nvcfg1);
12822         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12823                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12824 }
12825
12826
12827 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12828 {
12829         u32 nvcfg1;
12830
12831         nvcfg1 = tr32(NVRAM_CFG1);
12832
12833         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12834         case FLASH_5717VENDOR_ATMEL_EEPROM:
12835         case FLASH_5717VENDOR_MICRO_EEPROM:
12836                 tp->nvram_jedecnum = JEDEC_ATMEL;
12837                 tg3_flag_set(tp, NVRAM_BUFFERED);
12838                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12839
12840                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12841                 tw32(NVRAM_CFG1, nvcfg1);
12842                 return;
12843         case FLASH_5717VENDOR_ATMEL_MDB011D:
12844         case FLASH_5717VENDOR_ATMEL_ADB011B:
12845         case FLASH_5717VENDOR_ATMEL_ADB011D:
12846         case FLASH_5717VENDOR_ATMEL_MDB021D:
12847         case FLASH_5717VENDOR_ATMEL_ADB021B:
12848         case FLASH_5717VENDOR_ATMEL_ADB021D:
12849         case FLASH_5717VENDOR_ATMEL_45USPT:
12850                 tp->nvram_jedecnum = JEDEC_ATMEL;
12851                 tg3_flag_set(tp, NVRAM_BUFFERED);
12852                 tg3_flag_set(tp, FLASH);
12853
12854                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12855                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12856                         /* Detect size with tg3_nvram_get_size() */
12857                         break;
12858                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12859                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12860                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12861                         break;
12862                 default:
12863                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12864                         break;
12865                 }
12866                 break;
12867         case FLASH_5717VENDOR_ST_M_M25PE10:
12868         case FLASH_5717VENDOR_ST_A_M25PE10:
12869         case FLASH_5717VENDOR_ST_M_M45PE10:
12870         case FLASH_5717VENDOR_ST_A_M45PE10:
12871         case FLASH_5717VENDOR_ST_M_M25PE20:
12872         case FLASH_5717VENDOR_ST_A_M25PE20:
12873         case FLASH_5717VENDOR_ST_M_M45PE20:
12874         case FLASH_5717VENDOR_ST_A_M45PE20:
12875         case FLASH_5717VENDOR_ST_25USPT:
12876         case FLASH_5717VENDOR_ST_45USPT:
12877                 tp->nvram_jedecnum = JEDEC_ST;
12878                 tg3_flag_set(tp, NVRAM_BUFFERED);
12879                 tg3_flag_set(tp, FLASH);
12880
12881                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12882                 case FLASH_5717VENDOR_ST_M_M25PE20:
12883                 case FLASH_5717VENDOR_ST_M_M45PE20:
12884                         /* Detect size with tg3_nvram_get_size() */
12885                         break;
12886                 case FLASH_5717VENDOR_ST_A_M25PE20:
12887                 case FLASH_5717VENDOR_ST_A_M45PE20:
12888                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12889                         break;
12890                 default:
12891                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12892                         break;
12893                 }
12894                 break;
12895         default:
12896                 tg3_flag_set(tp, NO_NVRAM);
12897                 return;
12898         }
12899
12900         tg3_nvram_get_pagesize(tp, nvcfg1);
12901         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12902                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12903 }
12904
12905 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12906 {
12907         u32 nvcfg1, nvmpinstrp;
12908
12909         nvcfg1 = tr32(NVRAM_CFG1);
12910         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12911
12912         switch (nvmpinstrp) {
12913         case FLASH_5720_EEPROM_HD:
12914         case FLASH_5720_EEPROM_LD:
12915                 tp->nvram_jedecnum = JEDEC_ATMEL;
12916                 tg3_flag_set(tp, NVRAM_BUFFERED);
12917
12918                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12919                 tw32(NVRAM_CFG1, nvcfg1);
12920                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12921                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12922                 else
12923                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12924                 return;
12925         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12926         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12927         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12928         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12929         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12930         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12931         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12932         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12933         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12934         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12935         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12936         case FLASH_5720VENDOR_ATMEL_45USPT:
12937                 tp->nvram_jedecnum = JEDEC_ATMEL;
12938                 tg3_flag_set(tp, NVRAM_BUFFERED);
12939                 tg3_flag_set(tp, FLASH);
12940
12941                 switch (nvmpinstrp) {
12942                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12943                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12944                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12945                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12946                         break;
12947                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12948                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12949                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12950                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12951                         break;
12952                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12953                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12954                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12955                         break;
12956                 default:
12957                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12958                         break;
12959                 }
12960                 break;
12961         case FLASH_5720VENDOR_M_ST_M25PE10:
12962         case FLASH_5720VENDOR_M_ST_M45PE10:
12963         case FLASH_5720VENDOR_A_ST_M25PE10:
12964         case FLASH_5720VENDOR_A_ST_M45PE10:
12965         case FLASH_5720VENDOR_M_ST_M25PE20:
12966         case FLASH_5720VENDOR_M_ST_M45PE20:
12967         case FLASH_5720VENDOR_A_ST_M25PE20:
12968         case FLASH_5720VENDOR_A_ST_M45PE20:
12969         case FLASH_5720VENDOR_M_ST_M25PE40:
12970         case FLASH_5720VENDOR_M_ST_M45PE40:
12971         case FLASH_5720VENDOR_A_ST_M25PE40:
12972         case FLASH_5720VENDOR_A_ST_M45PE40:
12973         case FLASH_5720VENDOR_M_ST_M25PE80:
12974         case FLASH_5720VENDOR_M_ST_M45PE80:
12975         case FLASH_5720VENDOR_A_ST_M25PE80:
12976         case FLASH_5720VENDOR_A_ST_M45PE80:
12977         case FLASH_5720VENDOR_ST_25USPT:
12978         case FLASH_5720VENDOR_ST_45USPT:
12979                 tp->nvram_jedecnum = JEDEC_ST;
12980                 tg3_flag_set(tp, NVRAM_BUFFERED);
12981                 tg3_flag_set(tp, FLASH);
12982
12983                 switch (nvmpinstrp) {
12984                 case FLASH_5720VENDOR_M_ST_M25PE20:
12985                 case FLASH_5720VENDOR_M_ST_M45PE20:
12986                 case FLASH_5720VENDOR_A_ST_M25PE20:
12987                 case FLASH_5720VENDOR_A_ST_M45PE20:
12988                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12989                         break;
12990                 case FLASH_5720VENDOR_M_ST_M25PE40:
12991                 case FLASH_5720VENDOR_M_ST_M45PE40:
12992                 case FLASH_5720VENDOR_A_ST_M25PE40:
12993                 case FLASH_5720VENDOR_A_ST_M45PE40:
12994                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12995                         break;
12996                 case FLASH_5720VENDOR_M_ST_M25PE80:
12997                 case FLASH_5720VENDOR_M_ST_M45PE80:
12998                 case FLASH_5720VENDOR_A_ST_M25PE80:
12999                 case FLASH_5720VENDOR_A_ST_M45PE80:
13000                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13001                         break;
13002                 default:
13003                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13004                         break;
13005                 }
13006                 break;
13007         default:
13008                 tg3_flag_set(tp, NO_NVRAM);
13009                 return;
13010         }
13011
13012         tg3_nvram_get_pagesize(tp, nvcfg1);
13013         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13014                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13015 }
13016
13017 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13018 static void __devinit tg3_nvram_init(struct tg3 *tp)
13019 {
13020         tw32_f(GRC_EEPROM_ADDR,
13021              (EEPROM_ADDR_FSM_RESET |
13022               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13023                EEPROM_ADDR_CLKPERD_SHIFT)));
13024
13025         msleep(1);
13026
13027         /* Enable seeprom accesses. */
13028         tw32_f(GRC_LOCAL_CTRL,
13029              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13030         udelay(100);
13031
13032         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13033             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13034                 tg3_flag_set(tp, NVRAM);
13035
13036                 if (tg3_nvram_lock(tp)) {
13037                         netdev_warn(tp->dev,
13038                                     "Cannot get nvram lock, %s failed\n",
13039                                     __func__);
13040                         return;
13041                 }
13042                 tg3_enable_nvram_access(tp);
13043
13044                 tp->nvram_size = 0;
13045
13046                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13047                         tg3_get_5752_nvram_info(tp);
13048                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13049                         tg3_get_5755_nvram_info(tp);
13050                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13051                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13052                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13053                         tg3_get_5787_nvram_info(tp);
13054                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13055                         tg3_get_5761_nvram_info(tp);
13056                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13057                         tg3_get_5906_nvram_info(tp);
13058                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13059                          tg3_flag(tp, 57765_CLASS))
13060                         tg3_get_57780_nvram_info(tp);
13061                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13062                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13063                         tg3_get_5717_nvram_info(tp);
13064                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13065                         tg3_get_5720_nvram_info(tp);
13066                 else
13067                         tg3_get_nvram_info(tp);
13068
13069                 if (tp->nvram_size == 0)
13070                         tg3_get_nvram_size(tp);
13071
13072                 tg3_disable_nvram_access(tp);
13073                 tg3_nvram_unlock(tp);
13074
13075         } else {
13076                 tg3_flag_clear(tp, NVRAM);
13077                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13078
13079                 tg3_get_eeprom_size(tp);
13080         }
13081 }
13082
13083 struct subsys_tbl_ent {
13084         u16 subsys_vendor, subsys_devid;
13085         u32 phy_id;
13086 };
13087
13088 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13089         /* Broadcom boards. */
13090         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13091           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13092         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13093           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13094         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13095           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13096         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13097           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13098         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13099           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13100         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13101           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13102         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13103           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13104         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13105           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13106         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13107           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13108         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13109           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13110         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13111           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13112
13113         /* 3com boards. */
13114         { TG3PCI_SUBVENDOR_ID_3COM,
13115           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13116         { TG3PCI_SUBVENDOR_ID_3COM,
13117           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13118         { TG3PCI_SUBVENDOR_ID_3COM,
13119           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13120         { TG3PCI_SUBVENDOR_ID_3COM,
13121           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13122         { TG3PCI_SUBVENDOR_ID_3COM,
13123           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13124
13125         /* DELL boards. */
13126         { TG3PCI_SUBVENDOR_ID_DELL,
13127           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13128         { TG3PCI_SUBVENDOR_ID_DELL,
13129           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13130         { TG3PCI_SUBVENDOR_ID_DELL,
13131           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13132         { TG3PCI_SUBVENDOR_ID_DELL,
13133           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13134
13135         /* Compaq boards. */
13136         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13137           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13138         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13139           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13140         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13141           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13142         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13143           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13144         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13145           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13146
13147         /* IBM boards. */
13148         { TG3PCI_SUBVENDOR_ID_IBM,
13149           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13150 };
13151
13152 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13153 {
13154         int i;
13155
13156         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13157                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13158                      tp->pdev->subsystem_vendor) &&
13159                     (subsys_id_to_phy_id[i].subsys_devid ==
13160                      tp->pdev->subsystem_device))
13161                         return &subsys_id_to_phy_id[i];
13162         }
13163         return NULL;
13164 }
13165
13166 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13167 {
13168         u32 val;
13169
13170         tp->phy_id = TG3_PHY_ID_INVALID;
13171         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13172
13173         /* Assume an onboard device and WOL capable by default.  */
13174         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13175         tg3_flag_set(tp, WOL_CAP);
13176
13177         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13178                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13179                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13180                         tg3_flag_set(tp, IS_NIC);
13181                 }
13182                 val = tr32(VCPU_CFGSHDW);
13183                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13184                         tg3_flag_set(tp, ASPM_WORKAROUND);
13185                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13186                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13187                         tg3_flag_set(tp, WOL_ENABLE);
13188                         device_set_wakeup_enable(&tp->pdev->dev, true);
13189                 }
13190                 goto done;
13191         }
13192
13193         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13194         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13195                 u32 nic_cfg, led_cfg;
13196                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13197                 int eeprom_phy_serdes = 0;
13198
13199                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13200                 tp->nic_sram_data_cfg = nic_cfg;
13201
13202                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13203                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13204                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13205                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13206                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13207                     (ver > 0) && (ver < 0x100))
13208                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13209
13210                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13211                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13212
13213                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13214                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13215                         eeprom_phy_serdes = 1;
13216
13217                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13218                 if (nic_phy_id != 0) {
13219                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13220                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13221
13222                         eeprom_phy_id  = (id1 >> 16) << 10;
13223                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13224                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13225                 } else
13226                         eeprom_phy_id = 0;
13227
13228                 tp->phy_id = eeprom_phy_id;
13229                 if (eeprom_phy_serdes) {
13230                         if (!tg3_flag(tp, 5705_PLUS))
13231                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13232                         else
13233                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13234                 }
13235
13236                 if (tg3_flag(tp, 5750_PLUS))
13237                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13238                                     SHASTA_EXT_LED_MODE_MASK);
13239                 else
13240                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13241
13242                 switch (led_cfg) {
13243                 default:
13244                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13245                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13246                         break;
13247
13248                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13249                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13250                         break;
13251
13252                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13253                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13254
13255                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13256                          * read on some older 5700/5701 bootcode.
13257                          */
13258                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13259                             ASIC_REV_5700 ||
13260                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13261                             ASIC_REV_5701)
13262                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13263
13264                         break;
13265
13266                 case SHASTA_EXT_LED_SHARED:
13267                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13268                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13269                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13270                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13271                                                  LED_CTRL_MODE_PHY_2);
13272                         break;
13273
13274                 case SHASTA_EXT_LED_MAC:
13275                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13276                         break;
13277
13278                 case SHASTA_EXT_LED_COMBO:
13279                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13280                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13281                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13282                                                  LED_CTRL_MODE_PHY_2);
13283                         break;
13284
13285                 }
13286
13287                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13288                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13289                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13290                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13291
13292                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13293                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13294
13295                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13296                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13297                         if ((tp->pdev->subsystem_vendor ==
13298                              PCI_VENDOR_ID_ARIMA) &&
13299                             (tp->pdev->subsystem_device == 0x205a ||
13300                              tp->pdev->subsystem_device == 0x2063))
13301                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13302                 } else {
13303                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13304                         tg3_flag_set(tp, IS_NIC);
13305                 }
13306
13307                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13308                         tg3_flag_set(tp, ENABLE_ASF);
13309                         if (tg3_flag(tp, 5750_PLUS))
13310                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13311                 }
13312
13313                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13314                     tg3_flag(tp, 5750_PLUS))
13315                         tg3_flag_set(tp, ENABLE_APE);
13316
13317                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13318                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13319                         tg3_flag_clear(tp, WOL_CAP);
13320
13321                 if (tg3_flag(tp, WOL_CAP) &&
13322                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13323                         tg3_flag_set(tp, WOL_ENABLE);
13324                         device_set_wakeup_enable(&tp->pdev->dev, true);
13325                 }
13326
13327                 if (cfg2 & (1 << 17))
13328                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13329
13330                 /* serdes signal pre-emphasis in register 0x590 set by */
13331                 /* bootcode if bit 18 is set */
13332                 if (cfg2 & (1 << 18))
13333                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13334
13335                 if ((tg3_flag(tp, 57765_PLUS) ||
13336                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13337                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13338                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13339                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13340
13341                 if (tg3_flag(tp, PCI_EXPRESS) &&
13342                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13343                     !tg3_flag(tp, 57765_PLUS)) {
13344                         u32 cfg3;
13345
13346                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13347                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13348                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13349                 }
13350
13351                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13352                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13353                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13354                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13355                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13356                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13357         }
13358 done:
13359         if (tg3_flag(tp, WOL_CAP))
13360                 device_set_wakeup_enable(&tp->pdev->dev,
13361                                          tg3_flag(tp, WOL_ENABLE));
13362         else
13363                 device_set_wakeup_capable(&tp->pdev->dev, false);
13364 }
13365
13366 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13367 {
13368         int i;
13369         u32 val;
13370
13371         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13372         tw32(OTP_CTRL, cmd);
13373
13374         /* Wait for up to 1 ms for command to execute. */
13375         for (i = 0; i < 100; i++) {
13376                 val = tr32(OTP_STATUS);
13377                 if (val & OTP_STATUS_CMD_DONE)
13378                         break;
13379                 udelay(10);
13380         }
13381
13382         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13383 }
13384
13385 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13386  * configuration is a 32-bit value that straddles the alignment boundary.
13387  * We do two 32-bit reads and then shift and merge the results.
13388  */
13389 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13390 {
13391         u32 bhalf_otp, thalf_otp;
13392
13393         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13394
13395         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13396                 return 0;
13397
13398         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13399
13400         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13401                 return 0;
13402
13403         thalf_otp = tr32(OTP_READ_DATA);
13404
13405         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13406
13407         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13408                 return 0;
13409
13410         bhalf_otp = tr32(OTP_READ_DATA);
13411
13412         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13413 }
13414
13415 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13416 {
13417         u32 adv = ADVERTISED_Autoneg;
13418
13419         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13420                 adv |= ADVERTISED_1000baseT_Half |
13421                        ADVERTISED_1000baseT_Full;
13422
13423         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13424                 adv |= ADVERTISED_100baseT_Half |
13425                        ADVERTISED_100baseT_Full |
13426                        ADVERTISED_10baseT_Half |
13427                        ADVERTISED_10baseT_Full |
13428                        ADVERTISED_TP;
13429         else
13430                 adv |= ADVERTISED_FIBRE;
13431
13432         tp->link_config.advertising = adv;
13433         tp->link_config.speed = SPEED_UNKNOWN;
13434         tp->link_config.duplex = DUPLEX_UNKNOWN;
13435         tp->link_config.autoneg = AUTONEG_ENABLE;
13436         tp->link_config.active_speed = SPEED_UNKNOWN;
13437         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13438
13439         tp->old_link = -1;
13440 }
13441
13442 static int __devinit tg3_phy_probe(struct tg3 *tp)
13443 {
13444         u32 hw_phy_id_1, hw_phy_id_2;
13445         u32 hw_phy_id, hw_phy_id_masked;
13446         int err;
13447
13448         /* flow control autonegotiation is default behavior */
13449         tg3_flag_set(tp, PAUSE_AUTONEG);
13450         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13451
13452         if (tg3_flag(tp, USE_PHYLIB))
13453                 return tg3_phy_init(tp);
13454
13455         /* Reading the PHY ID register can conflict with ASF
13456          * firmware access to the PHY hardware.
13457          */
13458         err = 0;
13459         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13460                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13461         } else {
13462                 /* Now read the physical PHY_ID from the chip and verify
13463                  * that it is sane.  If it doesn't look good, we fall back
13464                  * to either the hard-coded table based PHY_ID and failing
13465                  * that the value found in the eeprom area.
13466                  */
13467                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13468                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13469
13470                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13471                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13472                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13473
13474                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13475         }
13476
13477         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13478                 tp->phy_id = hw_phy_id;
13479                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13480                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13481                 else
13482                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13483         } else {
13484                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13485                         /* Do nothing, phy ID already set up in
13486                          * tg3_get_eeprom_hw_cfg().
13487                          */
13488                 } else {
13489                         struct subsys_tbl_ent *p;
13490
13491                         /* No eeprom signature?  Try the hardcoded
13492                          * subsys device table.
13493                          */
13494                         p = tg3_lookup_by_subsys(tp);
13495                         if (!p)
13496                                 return -ENODEV;
13497
13498                         tp->phy_id = p->phy_id;
13499                         if (!tp->phy_id ||
13500                             tp->phy_id == TG3_PHY_ID_BCM8002)
13501                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13502                 }
13503         }
13504
13505         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13506             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13507              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13508              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13509               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13510              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13511               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13512                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13513
13514         tg3_phy_init_link_config(tp);
13515
13516         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13517             !tg3_flag(tp, ENABLE_APE) &&
13518             !tg3_flag(tp, ENABLE_ASF)) {
13519                 u32 bmsr, dummy;
13520
13521                 tg3_readphy(tp, MII_BMSR, &bmsr);
13522                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13523                     (bmsr & BMSR_LSTATUS))
13524                         goto skip_phy_reset;
13525
13526                 err = tg3_phy_reset(tp);
13527                 if (err)
13528                         return err;
13529
13530                 tg3_phy_set_wirespeed(tp);
13531
13532                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13533                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13534                                             tp->link_config.flowctrl);
13535
13536                         tg3_writephy(tp, MII_BMCR,
13537                                      BMCR_ANENABLE | BMCR_ANRESTART);
13538                 }
13539         }
13540
13541 skip_phy_reset:
13542         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13543                 err = tg3_init_5401phy_dsp(tp);
13544                 if (err)
13545                         return err;
13546
13547                 err = tg3_init_5401phy_dsp(tp);
13548         }
13549
13550         return err;
13551 }
13552
13553 static void __devinit tg3_read_vpd(struct tg3 *tp)
13554 {
13555         u8 *vpd_data;
13556         unsigned int block_end, rosize, len;
13557         u32 vpdlen;
13558         int j, i = 0;
13559
13560         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13561         if (!vpd_data)
13562                 goto out_no_vpd;
13563
13564         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13565         if (i < 0)
13566                 goto out_not_found;
13567
13568         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13569         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13570         i += PCI_VPD_LRDT_TAG_SIZE;
13571
13572         if (block_end > vpdlen)
13573                 goto out_not_found;
13574
13575         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13576                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13577         if (j > 0) {
13578                 len = pci_vpd_info_field_size(&vpd_data[j]);
13579
13580                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13581                 if (j + len > block_end || len != 4 ||
13582                     memcmp(&vpd_data[j], "1028", 4))
13583                         goto partno;
13584
13585                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13586                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13587                 if (j < 0)
13588                         goto partno;
13589
13590                 len = pci_vpd_info_field_size(&vpd_data[j]);
13591
13592                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13593                 if (j + len > block_end)
13594                         goto partno;
13595
13596                 memcpy(tp->fw_ver, &vpd_data[j], len);
13597                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13598         }
13599
13600 partno:
13601         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13602                                       PCI_VPD_RO_KEYWORD_PARTNO);
13603         if (i < 0)
13604                 goto out_not_found;
13605
13606         len = pci_vpd_info_field_size(&vpd_data[i]);
13607
13608         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13609         if (len > TG3_BPN_SIZE ||
13610             (len + i) > vpdlen)
13611                 goto out_not_found;
13612
13613         memcpy(tp->board_part_number, &vpd_data[i], len);
13614
13615 out_not_found:
13616         kfree(vpd_data);
13617         if (tp->board_part_number[0])
13618                 return;
13619
13620 out_no_vpd:
13621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13622                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13623                         strcpy(tp->board_part_number, "BCM5717");
13624                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13625                         strcpy(tp->board_part_number, "BCM5718");
13626                 else
13627                         goto nomatch;
13628         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13629                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13630                         strcpy(tp->board_part_number, "BCM57780");
13631                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13632                         strcpy(tp->board_part_number, "BCM57760");
13633                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13634                         strcpy(tp->board_part_number, "BCM57790");
13635                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13636                         strcpy(tp->board_part_number, "BCM57788");
13637                 else
13638                         goto nomatch;
13639         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13640                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13641                         strcpy(tp->board_part_number, "BCM57761");
13642                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13643                         strcpy(tp->board_part_number, "BCM57765");
13644                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13645                         strcpy(tp->board_part_number, "BCM57781");
13646                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13647                         strcpy(tp->board_part_number, "BCM57785");
13648                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13649                         strcpy(tp->board_part_number, "BCM57791");
13650                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13651                         strcpy(tp->board_part_number, "BCM57795");
13652                 else
13653                         goto nomatch;
13654         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13655                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13656                         strcpy(tp->board_part_number, "BCM57762");
13657                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13658                         strcpy(tp->board_part_number, "BCM57766");
13659                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13660                         strcpy(tp->board_part_number, "BCM57782");
13661                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13662                         strcpy(tp->board_part_number, "BCM57786");
13663                 else
13664                         goto nomatch;
13665         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13666                 strcpy(tp->board_part_number, "BCM95906");
13667         } else {
13668 nomatch:
13669                 strcpy(tp->board_part_number, "none");
13670         }
13671 }
13672
13673 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13674 {
13675         u32 val;
13676
13677         if (tg3_nvram_read(tp, offset, &val) ||
13678             (val & 0xfc000000) != 0x0c000000 ||
13679             tg3_nvram_read(tp, offset + 4, &val) ||
13680             val != 0)
13681                 return 0;
13682
13683         return 1;
13684 }
13685
13686 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13687 {
13688         u32 val, offset, start, ver_offset;
13689         int i, dst_off;
13690         bool newver = false;
13691
13692         if (tg3_nvram_read(tp, 0xc, &offset) ||
13693             tg3_nvram_read(tp, 0x4, &start))
13694                 return;
13695
13696         offset = tg3_nvram_logical_addr(tp, offset);
13697
13698         if (tg3_nvram_read(tp, offset, &val))
13699                 return;
13700
13701         if ((val & 0xfc000000) == 0x0c000000) {
13702                 if (tg3_nvram_read(tp, offset + 4, &val))
13703                         return;
13704
13705                 if (val == 0)
13706                         newver = true;
13707         }
13708
13709         dst_off = strlen(tp->fw_ver);
13710
13711         if (newver) {
13712                 if (TG3_VER_SIZE - dst_off < 16 ||
13713                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13714                         return;
13715
13716                 offset = offset + ver_offset - start;
13717                 for (i = 0; i < 16; i += 4) {
13718                         __be32 v;
13719                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13720                                 return;
13721
13722                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13723                 }
13724         } else {
13725                 u32 major, minor;
13726
13727                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13728                         return;
13729
13730                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13731                         TG3_NVM_BCVER_MAJSFT;
13732                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13733                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13734                          "v%d.%02d", major, minor);
13735         }
13736 }
13737
13738 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13739 {
13740         u32 val, major, minor;
13741
13742         /* Use native endian representation */
13743         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13744                 return;
13745
13746         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13747                 TG3_NVM_HWSB_CFG1_MAJSFT;
13748         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13749                 TG3_NVM_HWSB_CFG1_MINSFT;
13750
13751         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13752 }
13753
13754 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13755 {
13756         u32 offset, major, minor, build;
13757
13758         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13759
13760         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13761                 return;
13762
13763         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13764         case TG3_EEPROM_SB_REVISION_0:
13765                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13766                 break;
13767         case TG3_EEPROM_SB_REVISION_2:
13768                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13769                 break;
13770         case TG3_EEPROM_SB_REVISION_3:
13771                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13772                 break;
13773         case TG3_EEPROM_SB_REVISION_4:
13774                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13775                 break;
13776         case TG3_EEPROM_SB_REVISION_5:
13777                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13778                 break;
13779         case TG3_EEPROM_SB_REVISION_6:
13780                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13781                 break;
13782         default:
13783                 return;
13784         }
13785
13786         if (tg3_nvram_read(tp, offset, &val))
13787                 return;
13788
13789         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13790                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13791         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13792                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13793         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13794
13795         if (minor > 99 || build > 26)
13796                 return;
13797
13798         offset = strlen(tp->fw_ver);
13799         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13800                  " v%d.%02d", major, minor);
13801
13802         if (build > 0) {
13803                 offset = strlen(tp->fw_ver);
13804                 if (offset < TG3_VER_SIZE - 1)
13805                         tp->fw_ver[offset] = 'a' + build - 1;
13806         }
13807 }
13808
13809 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13810 {
13811         u32 val, offset, start;
13812         int i, vlen;
13813
13814         for (offset = TG3_NVM_DIR_START;
13815              offset < TG3_NVM_DIR_END;
13816              offset += TG3_NVM_DIRENT_SIZE) {
13817                 if (tg3_nvram_read(tp, offset, &val))
13818                         return;
13819
13820                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13821                         break;
13822         }
13823
13824         if (offset == TG3_NVM_DIR_END)
13825                 return;
13826
13827         if (!tg3_flag(tp, 5705_PLUS))
13828                 start = 0x08000000;
13829         else if (tg3_nvram_read(tp, offset - 4, &start))
13830                 return;
13831
13832         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13833             !tg3_fw_img_is_valid(tp, offset) ||
13834             tg3_nvram_read(tp, offset + 8, &val))
13835                 return;
13836
13837         offset += val - start;
13838
13839         vlen = strlen(tp->fw_ver);
13840
13841         tp->fw_ver[vlen++] = ',';
13842         tp->fw_ver[vlen++] = ' ';
13843
13844         for (i = 0; i < 4; i++) {
13845                 __be32 v;
13846                 if (tg3_nvram_read_be32(tp, offset, &v))
13847                         return;
13848
13849                 offset += sizeof(v);
13850
13851                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13852                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13853                         break;
13854                 }
13855
13856                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13857                 vlen += sizeof(v);
13858         }
13859 }
13860
13861 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13862 {
13863         int vlen;
13864         u32 apedata;
13865         char *fwtype;
13866
13867         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13868                 return;
13869
13870         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13871         if (apedata != APE_SEG_SIG_MAGIC)
13872                 return;
13873
13874         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13875         if (!(apedata & APE_FW_STATUS_READY))
13876                 return;
13877
13878         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13879
13880         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13881                 tg3_flag_set(tp, APE_HAS_NCSI);
13882                 fwtype = "NCSI";
13883         } else {
13884                 fwtype = "DASH";
13885         }
13886
13887         vlen = strlen(tp->fw_ver);
13888
13889         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13890                  fwtype,
13891                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13892                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13893                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13894                  (apedata & APE_FW_VERSION_BLDMSK));
13895 }
13896
13897 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13898 {
13899         u32 val;
13900         bool vpd_vers = false;
13901
13902         if (tp->fw_ver[0] != 0)
13903                 vpd_vers = true;
13904
13905         if (tg3_flag(tp, NO_NVRAM)) {
13906                 strcat(tp->fw_ver, "sb");
13907                 return;
13908         }
13909
13910         if (tg3_nvram_read(tp, 0, &val))
13911                 return;
13912
13913         if (val == TG3_EEPROM_MAGIC)
13914                 tg3_read_bc_ver(tp);
13915         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13916                 tg3_read_sb_ver(tp, val);
13917         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13918                 tg3_read_hwsb_ver(tp);
13919         else
13920                 return;
13921
13922         if (vpd_vers)
13923                 goto done;
13924
13925         if (tg3_flag(tp, ENABLE_APE)) {
13926                 if (tg3_flag(tp, ENABLE_ASF))
13927                         tg3_read_dash_ver(tp);
13928         } else if (tg3_flag(tp, ENABLE_ASF)) {
13929                 tg3_read_mgmtfw_ver(tp);
13930         }
13931
13932 done:
13933         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13934 }
13935
13936 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13937 {
13938         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13939                 return TG3_RX_RET_MAX_SIZE_5717;
13940         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13941                 return TG3_RX_RET_MAX_SIZE_5700;
13942         else
13943                 return TG3_RX_RET_MAX_SIZE_5705;
13944 }
13945
13946 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13947         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13948         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13949         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13950         { },
13951 };
13952
13953 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13954 {
13955         struct pci_dev *peer;
13956         unsigned int func, devnr = tp->pdev->devfn & ~7;
13957
13958         for (func = 0; func < 8; func++) {
13959                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13960                 if (peer && peer != tp->pdev)
13961                         break;
13962                 pci_dev_put(peer);
13963         }
13964         /* 5704 can be configured in single-port mode, set peer to
13965          * tp->pdev in that case.
13966          */
13967         if (!peer) {
13968                 peer = tp->pdev;
13969                 return peer;
13970         }
13971
13972         /*
13973          * We don't need to keep the refcount elevated; there's no way
13974          * to remove one half of this device without removing the other
13975          */
13976         pci_dev_put(peer);
13977
13978         return peer;
13979 }
13980
13981 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13982 {
13983         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13984         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13985                 u32 reg;
13986
13987                 /* All devices that use the alternate
13988                  * ASIC REV location have a CPMU.
13989                  */
13990                 tg3_flag_set(tp, CPMU_PRESENT);
13991
13992                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13993                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13994                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13995                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13996                         reg = TG3PCI_GEN2_PRODID_ASICREV;
13997                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13998                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13999                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14000                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14001                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14002                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14003                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14004                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14005                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14006                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14007                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14008                 else
14009                         reg = TG3PCI_PRODID_ASICREV;
14010
14011                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14012         }
14013
14014         /* Wrong chip ID in 5752 A0. This code can be removed later
14015          * as A0 is not in production.
14016          */
14017         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14018                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14019
14020         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14021             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14022             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14023                 tg3_flag_set(tp, 5717_PLUS);
14024
14025         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14026             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14027                 tg3_flag_set(tp, 57765_CLASS);
14028
14029         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14030                 tg3_flag_set(tp, 57765_PLUS);
14031
14032         /* Intentionally exclude ASIC_REV_5906 */
14033         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14034             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14035             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14036             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14037             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14038             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14039             tg3_flag(tp, 57765_PLUS))
14040                 tg3_flag_set(tp, 5755_PLUS);
14041
14042         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14043             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14044                 tg3_flag_set(tp, 5780_CLASS);
14045
14046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14047             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14048             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14049             tg3_flag(tp, 5755_PLUS) ||
14050             tg3_flag(tp, 5780_CLASS))
14051                 tg3_flag_set(tp, 5750_PLUS);
14052
14053         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14054             tg3_flag(tp, 5750_PLUS))
14055                 tg3_flag_set(tp, 5705_PLUS);
14056 }
14057
14058 static int __devinit tg3_get_invariants(struct tg3 *tp)
14059 {
14060         u32 misc_ctrl_reg;
14061         u32 pci_state_reg, grc_misc_cfg;
14062         u32 val;
14063         u16 pci_cmd;
14064         int err;
14065
14066         /* Force memory write invalidate off.  If we leave it on,
14067          * then on 5700_BX chips we have to enable a workaround.
14068          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14069          * to match the cacheline size.  The Broadcom driver have this
14070          * workaround but turns MWI off all the times so never uses
14071          * it.  This seems to suggest that the workaround is insufficient.
14072          */
14073         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14074         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14075         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14076
14077         /* Important! -- Make sure register accesses are byteswapped
14078          * correctly.  Also, for those chips that require it, make
14079          * sure that indirect register accesses are enabled before
14080          * the first operation.
14081          */
14082         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14083                               &misc_ctrl_reg);
14084         tp->misc_host_ctrl |= (misc_ctrl_reg &
14085                                MISC_HOST_CTRL_CHIPREV);
14086         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14087                                tp->misc_host_ctrl);
14088
14089         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14090
14091         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14092          * we need to disable memory and use config. cycles
14093          * only to access all registers. The 5702/03 chips
14094          * can mistakenly decode the special cycles from the
14095          * ICH chipsets as memory write cycles, causing corruption
14096          * of register and memory space. Only certain ICH bridges
14097          * will drive special cycles with non-zero data during the
14098          * address phase which can fall within the 5703's address
14099          * range. This is not an ICH bug as the PCI spec allows
14100          * non-zero address during special cycles. However, only
14101          * these ICH bridges are known to drive non-zero addresses
14102          * during special cycles.
14103          *
14104          * Since special cycles do not cross PCI bridges, we only
14105          * enable this workaround if the 5703 is on the secondary
14106          * bus of these ICH bridges.
14107          */
14108         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14109             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14110                 static struct tg3_dev_id {
14111                         u32     vendor;
14112                         u32     device;
14113                         u32     rev;
14114                 } ich_chipsets[] = {
14115                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14116                           PCI_ANY_ID },
14117                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14118                           PCI_ANY_ID },
14119                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14120                           0xa },
14121                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14122                           PCI_ANY_ID },
14123                         { },
14124                 };
14125                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14126                 struct pci_dev *bridge = NULL;
14127
14128                 while (pci_id->vendor != 0) {
14129                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14130                                                 bridge);
14131                         if (!bridge) {
14132                                 pci_id++;
14133                                 continue;
14134                         }
14135                         if (pci_id->rev != PCI_ANY_ID) {
14136                                 if (bridge->revision > pci_id->rev)
14137                                         continue;
14138                         }
14139                         if (bridge->subordinate &&
14140                             (bridge->subordinate->number ==
14141                              tp->pdev->bus->number)) {
14142                                 tg3_flag_set(tp, ICH_WORKAROUND);
14143                                 pci_dev_put(bridge);
14144                                 break;
14145                         }
14146                 }
14147         }
14148
14149         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14150                 static struct tg3_dev_id {
14151                         u32     vendor;
14152                         u32     device;
14153                 } bridge_chipsets[] = {
14154                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14155                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14156                         { },
14157                 };
14158                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14159                 struct pci_dev *bridge = NULL;
14160
14161                 while (pci_id->vendor != 0) {
14162                         bridge = pci_get_device(pci_id->vendor,
14163                                                 pci_id->device,
14164                                                 bridge);
14165                         if (!bridge) {
14166                                 pci_id++;
14167                                 continue;
14168                         }
14169                         if (bridge->subordinate &&
14170                             (bridge->subordinate->number <=
14171                              tp->pdev->bus->number) &&
14172                             (bridge->subordinate->subordinate >=
14173                              tp->pdev->bus->number)) {
14174                                 tg3_flag_set(tp, 5701_DMA_BUG);
14175                                 pci_dev_put(bridge);
14176                                 break;
14177                         }
14178                 }
14179         }
14180
14181         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14182          * DMA addresses > 40-bit. This bridge may have other additional
14183          * 57xx devices behind it in some 4-port NIC designs for example.
14184          * Any tg3 device found behind the bridge will also need the 40-bit
14185          * DMA workaround.
14186          */
14187         if (tg3_flag(tp, 5780_CLASS)) {
14188                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14189                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14190         } else {
14191                 struct pci_dev *bridge = NULL;
14192
14193                 do {
14194                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14195                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14196                                                 bridge);
14197                         if (bridge && bridge->subordinate &&
14198                             (bridge->subordinate->number <=
14199                              tp->pdev->bus->number) &&
14200                             (bridge->subordinate->subordinate >=
14201                              tp->pdev->bus->number)) {
14202                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14203                                 pci_dev_put(bridge);
14204                                 break;
14205                         }
14206                 } while (bridge);
14207         }
14208
14209         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14210             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14211                 tp->pdev_peer = tg3_find_peer(tp);
14212
14213         /* Determine TSO capabilities */
14214         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14215                 ; /* Do nothing. HW bug. */
14216         else if (tg3_flag(tp, 57765_PLUS))
14217                 tg3_flag_set(tp, HW_TSO_3);
14218         else if (tg3_flag(tp, 5755_PLUS) ||
14219                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14220                 tg3_flag_set(tp, HW_TSO_2);
14221         else if (tg3_flag(tp, 5750_PLUS)) {
14222                 tg3_flag_set(tp, HW_TSO_1);
14223                 tg3_flag_set(tp, TSO_BUG);
14224                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14225                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14226                         tg3_flag_clear(tp, TSO_BUG);
14227         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14228                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14229                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14230                         tg3_flag_set(tp, TSO_BUG);
14231                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14232                         tp->fw_needed = FIRMWARE_TG3TSO5;
14233                 else
14234                         tp->fw_needed = FIRMWARE_TG3TSO;
14235         }
14236
14237         /* Selectively allow TSO based on operating conditions */
14238         if (tg3_flag(tp, HW_TSO_1) ||
14239             tg3_flag(tp, HW_TSO_2) ||
14240             tg3_flag(tp, HW_TSO_3) ||
14241             tp->fw_needed) {
14242                 /* For firmware TSO, assume ASF is disabled.
14243                  * We'll disable TSO later if we discover ASF
14244                  * is enabled in tg3_get_eeprom_hw_cfg().
14245                  */
14246                 tg3_flag_set(tp, TSO_CAPABLE);
14247         } else {
14248                 tg3_flag_clear(tp, TSO_CAPABLE);
14249                 tg3_flag_clear(tp, TSO_BUG);
14250                 tp->fw_needed = NULL;
14251         }
14252
14253         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14254                 tp->fw_needed = FIRMWARE_TG3;
14255
14256         tp->irq_max = 1;
14257
14258         if (tg3_flag(tp, 5750_PLUS)) {
14259                 tg3_flag_set(tp, SUPPORT_MSI);
14260                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14261                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14262                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14263                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14264                      tp->pdev_peer == tp->pdev))
14265                         tg3_flag_clear(tp, SUPPORT_MSI);
14266
14267                 if (tg3_flag(tp, 5755_PLUS) ||
14268                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14269                         tg3_flag_set(tp, 1SHOT_MSI);
14270                 }
14271
14272                 if (tg3_flag(tp, 57765_PLUS)) {
14273                         tg3_flag_set(tp, SUPPORT_MSIX);
14274                         tp->irq_max = TG3_IRQ_MAX_VECS;
14275                         tg3_rss_init_dflt_indir_tbl(tp);
14276                 }
14277         }
14278
14279         if (tg3_flag(tp, 5755_PLUS) ||
14280             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14281                 tg3_flag_set(tp, SHORT_DMA_BUG);
14282
14283         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14284                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14285
14286         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14287             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14288             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14289                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14290
14291         if (tg3_flag(tp, 57765_PLUS) &&
14292             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14293                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14294
14295         if (!tg3_flag(tp, 5705_PLUS) ||
14296             tg3_flag(tp, 5780_CLASS) ||
14297             tg3_flag(tp, USE_JUMBO_BDFLAG))
14298                 tg3_flag_set(tp, JUMBO_CAPABLE);
14299
14300         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14301                               &pci_state_reg);
14302
14303         if (pci_is_pcie(tp->pdev)) {
14304                 u16 lnkctl;
14305
14306                 tg3_flag_set(tp, PCI_EXPRESS);
14307
14308                 pci_read_config_word(tp->pdev,
14309                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14310                                      &lnkctl);
14311                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14312                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14313                             ASIC_REV_5906) {
14314                                 tg3_flag_clear(tp, HW_TSO_2);
14315                                 tg3_flag_clear(tp, TSO_CAPABLE);
14316                         }
14317                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14318                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14319                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14320                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14321                                 tg3_flag_set(tp, CLKREQ_BUG);
14322                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14323                         tg3_flag_set(tp, L1PLLPD_EN);
14324                 }
14325         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14326                 /* BCM5785 devices are effectively PCIe devices, and should
14327                  * follow PCIe codepaths, but do not have a PCIe capabilities
14328                  * section.
14329                  */
14330                 tg3_flag_set(tp, PCI_EXPRESS);
14331         } else if (!tg3_flag(tp, 5705_PLUS) ||
14332                    tg3_flag(tp, 5780_CLASS)) {
14333                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14334                 if (!tp->pcix_cap) {
14335                         dev_err(&tp->pdev->dev,
14336                                 "Cannot find PCI-X capability, aborting\n");
14337                         return -EIO;
14338                 }
14339
14340                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14341                         tg3_flag_set(tp, PCIX_MODE);
14342         }
14343
14344         /* If we have an AMD 762 or VIA K8T800 chipset, write
14345          * reordering to the mailbox registers done by the host
14346          * controller can cause major troubles.  We read back from
14347          * every mailbox register write to force the writes to be
14348          * posted to the chip in order.
14349          */
14350         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14351             !tg3_flag(tp, PCI_EXPRESS))
14352                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14353
14354         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14355                              &tp->pci_cacheline_sz);
14356         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14357                              &tp->pci_lat_timer);
14358         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14359             tp->pci_lat_timer < 64) {
14360                 tp->pci_lat_timer = 64;
14361                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14362                                       tp->pci_lat_timer);
14363         }
14364
14365         /* Important! -- It is critical that the PCI-X hw workaround
14366          * situation is decided before the first MMIO register access.
14367          */
14368         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14369                 /* 5700 BX chips need to have their TX producer index
14370                  * mailboxes written twice to workaround a bug.
14371                  */
14372                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14373
14374                 /* If we are in PCI-X mode, enable register write workaround.
14375                  *
14376                  * The workaround is to use indirect register accesses
14377                  * for all chip writes not to mailbox registers.
14378                  */
14379                 if (tg3_flag(tp, PCIX_MODE)) {
14380                         u32 pm_reg;
14381
14382                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14383
14384                         /* The chip can have it's power management PCI config
14385                          * space registers clobbered due to this bug.
14386                          * So explicitly force the chip into D0 here.
14387                          */
14388                         pci_read_config_dword(tp->pdev,
14389                                               tp->pm_cap + PCI_PM_CTRL,
14390                                               &pm_reg);
14391                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14392                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14393                         pci_write_config_dword(tp->pdev,
14394                                                tp->pm_cap + PCI_PM_CTRL,
14395                                                pm_reg);
14396
14397                         /* Also, force SERR#/PERR# in PCI command. */
14398                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14399                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14400                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14401                 }
14402         }
14403
14404         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14405                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14406         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14407                 tg3_flag_set(tp, PCI_32BIT);
14408
14409         /* Chip-specific fixup from Broadcom driver */
14410         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14411             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14412                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14413                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14414         }
14415
14416         /* Default fast path register access methods */
14417         tp->read32 = tg3_read32;
14418         tp->write32 = tg3_write32;
14419         tp->read32_mbox = tg3_read32;
14420         tp->write32_mbox = tg3_write32;
14421         tp->write32_tx_mbox = tg3_write32;
14422         tp->write32_rx_mbox = tg3_write32;
14423
14424         /* Various workaround register access methods */
14425         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14426                 tp->write32 = tg3_write_indirect_reg32;
14427         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14428                  (tg3_flag(tp, PCI_EXPRESS) &&
14429                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14430                 /*
14431                  * Back to back register writes can cause problems on these
14432                  * chips, the workaround is to read back all reg writes
14433                  * except those to mailbox regs.
14434                  *
14435                  * See tg3_write_indirect_reg32().
14436                  */
14437                 tp->write32 = tg3_write_flush_reg32;
14438         }
14439
14440         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14441                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14442                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14443                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14444         }
14445
14446         if (tg3_flag(tp, ICH_WORKAROUND)) {
14447                 tp->read32 = tg3_read_indirect_reg32;
14448                 tp->write32 = tg3_write_indirect_reg32;
14449                 tp->read32_mbox = tg3_read_indirect_mbox;
14450                 tp->write32_mbox = tg3_write_indirect_mbox;
14451                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14452                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14453
14454                 iounmap(tp->regs);
14455                 tp->regs = NULL;
14456
14457                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14458                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14459                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14460         }
14461         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14462                 tp->read32_mbox = tg3_read32_mbox_5906;
14463                 tp->write32_mbox = tg3_write32_mbox_5906;
14464                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14465                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14466         }
14467
14468         if (tp->write32 == tg3_write_indirect_reg32 ||
14469             (tg3_flag(tp, PCIX_MODE) &&
14470              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14471               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14472                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14473
14474         /* The memory arbiter has to be enabled in order for SRAM accesses
14475          * to succeed.  Normally on powerup the tg3 chip firmware will make
14476          * sure it is enabled, but other entities such as system netboot
14477          * code might disable it.
14478          */
14479         val = tr32(MEMARB_MODE);
14480         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14481
14482         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14484             tg3_flag(tp, 5780_CLASS)) {
14485                 if (tg3_flag(tp, PCIX_MODE)) {
14486                         pci_read_config_dword(tp->pdev,
14487                                               tp->pcix_cap + PCI_X_STATUS,
14488                                               &val);
14489                         tp->pci_fn = val & 0x7;
14490                 }
14491         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14492                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14493                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14494                     NIC_SRAM_CPMUSTAT_SIG) {
14495                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14496                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14497                 }
14498         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14499                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14500                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14501                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14502                     NIC_SRAM_CPMUSTAT_SIG) {
14503                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14504                                      TG3_CPMU_STATUS_FSHFT_5719;
14505                 }
14506         }
14507
14508         /* Get eeprom hw config before calling tg3_set_power_state().
14509          * In particular, the TG3_FLAG_IS_NIC flag must be
14510          * determined before calling tg3_set_power_state() so that
14511          * we know whether or not to switch out of Vaux power.
14512          * When the flag is set, it means that GPIO1 is used for eeprom
14513          * write protect and also implies that it is a LOM where GPIOs
14514          * are not used to switch power.
14515          */
14516         tg3_get_eeprom_hw_cfg(tp);
14517
14518         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14519                 tg3_flag_clear(tp, TSO_CAPABLE);
14520                 tg3_flag_clear(tp, TSO_BUG);
14521                 tp->fw_needed = NULL;
14522         }
14523
14524         if (tg3_flag(tp, ENABLE_APE)) {
14525                 /* Allow reads and writes to the
14526                  * APE register and memory space.
14527                  */
14528                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14529                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14530                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14531                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14532                                        pci_state_reg);
14533
14534                 tg3_ape_lock_init(tp);
14535         }
14536
14537         /* Set up tp->grc_local_ctrl before calling
14538          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14539          * will bring 5700's external PHY out of reset.
14540          * It is also used as eeprom write protect on LOMs.
14541          */
14542         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14544             tg3_flag(tp, EEPROM_WRITE_PROT))
14545                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14546                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14547         /* Unused GPIO3 must be driven as output on 5752 because there
14548          * are no pull-up resistors on unused GPIO pins.
14549          */
14550         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14551                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14552
14553         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14554             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14555             tg3_flag(tp, 57765_CLASS))
14556                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14557
14558         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14559             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14560                 /* Turn off the debug UART. */
14561                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14562                 if (tg3_flag(tp, IS_NIC))
14563                         /* Keep VMain power. */
14564                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14565                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14566         }
14567
14568         /* Switch out of Vaux if it is a NIC */
14569         tg3_pwrsrc_switch_to_vmain(tp);
14570
14571         /* Derive initial jumbo mode from MTU assigned in
14572          * ether_setup() via the alloc_etherdev() call
14573          */
14574         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14575                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14576
14577         /* Determine WakeOnLan speed to use. */
14578         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14579             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14580             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14581             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14582                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14583         } else {
14584                 tg3_flag_set(tp, WOL_SPEED_100MB);
14585         }
14586
14587         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14588                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14589
14590         /* A few boards don't want Ethernet@WireSpeed phy feature */
14591         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14592             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14593              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14594              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14595             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14596             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14597                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14598
14599         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14600             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14601                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14602         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14603                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14604
14605         if (tg3_flag(tp, 5705_PLUS) &&
14606             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14607             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14608             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14609             !tg3_flag(tp, 57765_PLUS)) {
14610                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14611                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14612                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14613                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14614                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14615                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14616                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14617                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14618                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14619                 } else
14620                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14621         }
14622
14623         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14624             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14625                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14626                 if (tp->phy_otp == 0)
14627                         tp->phy_otp = TG3_OTP_DEFAULT;
14628         }
14629
14630         if (tg3_flag(tp, CPMU_PRESENT))
14631                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14632         else
14633                 tp->mi_mode = MAC_MI_MODE_BASE;
14634
14635         tp->coalesce_mode = 0;
14636         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14637             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14638                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14639
14640         /* Set these bits to enable statistics workaround. */
14641         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14642             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14643             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14644                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14645                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14646         }
14647
14648         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14649             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14650                 tg3_flag_set(tp, USE_PHYLIB);
14651
14652         err = tg3_mdio_init(tp);
14653         if (err)
14654                 return err;
14655
14656         /* Initialize data/descriptor byte/word swapping. */
14657         val = tr32(GRC_MODE);
14658         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14659                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14660                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14661                         GRC_MODE_B2HRX_ENABLE |
14662                         GRC_MODE_HTX2B_ENABLE |
14663                         GRC_MODE_HOST_STACKUP);
14664         else
14665                 val &= GRC_MODE_HOST_STACKUP;
14666
14667         tw32(GRC_MODE, val | tp->grc_mode);
14668
14669         tg3_switch_clocks(tp);
14670
14671         /* Clear this out for sanity. */
14672         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14673
14674         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14675                               &pci_state_reg);
14676         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14677             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14678                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14679
14680                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14681                     chiprevid == CHIPREV_ID_5701_B0 ||
14682                     chiprevid == CHIPREV_ID_5701_B2 ||
14683                     chiprevid == CHIPREV_ID_5701_B5) {
14684                         void __iomem *sram_base;
14685
14686                         /* Write some dummy words into the SRAM status block
14687                          * area, see if it reads back correctly.  If the return
14688                          * value is bad, force enable the PCIX workaround.
14689                          */
14690                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14691
14692                         writel(0x00000000, sram_base);
14693                         writel(0x00000000, sram_base + 4);
14694                         writel(0xffffffff, sram_base + 4);
14695                         if (readl(sram_base) != 0x00000000)
14696                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14697                 }
14698         }
14699
14700         udelay(50);
14701         tg3_nvram_init(tp);
14702
14703         grc_misc_cfg = tr32(GRC_MISC_CFG);
14704         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14705
14706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14707             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14708              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14709                 tg3_flag_set(tp, IS_5788);
14710
14711         if (!tg3_flag(tp, IS_5788) &&
14712             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14713                 tg3_flag_set(tp, TAGGED_STATUS);
14714         if (tg3_flag(tp, TAGGED_STATUS)) {
14715                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14716                                       HOSTCC_MODE_CLRTICK_TXBD);
14717
14718                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14719                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14720                                        tp->misc_host_ctrl);
14721         }
14722
14723         /* Preserve the APE MAC_MODE bits */
14724         if (tg3_flag(tp, ENABLE_APE))
14725                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14726         else
14727                 tp->mac_mode = 0;
14728
14729         /* these are limited to 10/100 only */
14730         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14731              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14732             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14733              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14734              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14735               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14736               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14737             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14738              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14739               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14740               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14741             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14742             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14743             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14744             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14745                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14746
14747         err = tg3_phy_probe(tp);
14748         if (err) {
14749                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14750                 /* ... but do not return immediately ... */
14751                 tg3_mdio_fini(tp);
14752         }
14753
14754         tg3_read_vpd(tp);
14755         tg3_read_fw_ver(tp);
14756
14757         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14758                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14759         } else {
14760                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14761                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14762                 else
14763                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14764         }
14765
14766         /* 5700 {AX,BX} chips have a broken status block link
14767          * change bit implementation, so we must use the
14768          * status register in those cases.
14769          */
14770         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14771                 tg3_flag_set(tp, USE_LINKCHG_REG);
14772         else
14773                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14774
14775         /* The led_ctrl is set during tg3_phy_probe, here we might
14776          * have to force the link status polling mechanism based
14777          * upon subsystem IDs.
14778          */
14779         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14780             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14781             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14782                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14783                 tg3_flag_set(tp, USE_LINKCHG_REG);
14784         }
14785
14786         /* For all SERDES we poll the MAC status register. */
14787         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14788                 tg3_flag_set(tp, POLL_SERDES);
14789         else
14790                 tg3_flag_clear(tp, POLL_SERDES);
14791
14792         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14793         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14794         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14795             tg3_flag(tp, PCIX_MODE)) {
14796                 tp->rx_offset = NET_SKB_PAD;
14797 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14798                 tp->rx_copy_thresh = ~(u16)0;
14799 #endif
14800         }
14801
14802         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14803         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14804         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14805
14806         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14807
14808         /* Increment the rx prod index on the rx std ring by at most
14809          * 8 for these chips to workaround hw errata.
14810          */
14811         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14812             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14813             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14814                 tp->rx_std_max_post = 8;
14815
14816         if (tg3_flag(tp, ASPM_WORKAROUND))
14817                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14818                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14819
14820         return err;
14821 }
14822
14823 #ifdef CONFIG_SPARC
14824 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14825 {
14826         struct net_device *dev = tp->dev;
14827         struct pci_dev *pdev = tp->pdev;
14828         struct device_node *dp = pci_device_to_OF_node(pdev);
14829         const unsigned char *addr;
14830         int len;
14831
14832         addr = of_get_property(dp, "local-mac-address", &len);
14833         if (addr && len == 6) {
14834                 memcpy(dev->dev_addr, addr, 6);
14835                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14836                 return 0;
14837         }
14838         return -ENODEV;
14839 }
14840
14841 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14842 {
14843         struct net_device *dev = tp->dev;
14844
14845         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14846         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14847         return 0;
14848 }
14849 #endif
14850
14851 static int __devinit tg3_get_device_address(struct tg3 *tp)
14852 {
14853         struct net_device *dev = tp->dev;
14854         u32 hi, lo, mac_offset;
14855         int addr_ok = 0;
14856
14857 #ifdef CONFIG_SPARC
14858         if (!tg3_get_macaddr_sparc(tp))
14859                 return 0;
14860 #endif
14861
14862         mac_offset = 0x7c;
14863         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14864             tg3_flag(tp, 5780_CLASS)) {
14865                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14866                         mac_offset = 0xcc;
14867                 if (tg3_nvram_lock(tp))
14868                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14869                 else
14870                         tg3_nvram_unlock(tp);
14871         } else if (tg3_flag(tp, 5717_PLUS)) {
14872                 if (tp->pci_fn & 1)
14873                         mac_offset = 0xcc;
14874                 if (tp->pci_fn > 1)
14875                         mac_offset += 0x18c;
14876         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14877                 mac_offset = 0x10;
14878
14879         /* First try to get it from MAC address mailbox. */
14880         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14881         if ((hi >> 16) == 0x484b) {
14882                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14883                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14884
14885                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14886                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14887                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14888                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14889                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14890
14891                 /* Some old bootcode may report a 0 MAC address in SRAM */
14892                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14893         }
14894         if (!addr_ok) {
14895                 /* Next, try NVRAM. */
14896                 if (!tg3_flag(tp, NO_NVRAM) &&
14897                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14898                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14899                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14900                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14901                 }
14902                 /* Finally just fetch it out of the MAC control regs. */
14903                 else {
14904                         hi = tr32(MAC_ADDR_0_HIGH);
14905                         lo = tr32(MAC_ADDR_0_LOW);
14906
14907                         dev->dev_addr[5] = lo & 0xff;
14908                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14909                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14910                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14911                         dev->dev_addr[1] = hi & 0xff;
14912                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14913                 }
14914         }
14915
14916         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14917 #ifdef CONFIG_SPARC
14918                 if (!tg3_get_default_macaddr_sparc(tp))
14919                         return 0;
14920 #endif
14921                 return -EINVAL;
14922         }
14923         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14924         return 0;
14925 }
14926
14927 #define BOUNDARY_SINGLE_CACHELINE       1
14928 #define BOUNDARY_MULTI_CACHELINE        2
14929
14930 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14931 {
14932         int cacheline_size;
14933         u8 byte;
14934         int goal;
14935
14936         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14937         if (byte == 0)
14938                 cacheline_size = 1024;
14939         else
14940                 cacheline_size = (int) byte * 4;
14941
14942         /* On 5703 and later chips, the boundary bits have no
14943          * effect.
14944          */
14945         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14946             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14947             !tg3_flag(tp, PCI_EXPRESS))
14948                 goto out;
14949
14950 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14951         goal = BOUNDARY_MULTI_CACHELINE;
14952 #else
14953 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14954         goal = BOUNDARY_SINGLE_CACHELINE;
14955 #else
14956         goal = 0;
14957 #endif
14958 #endif
14959
14960         if (tg3_flag(tp, 57765_PLUS)) {
14961                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14962                 goto out;
14963         }
14964
14965         if (!goal)
14966                 goto out;
14967
14968         /* PCI controllers on most RISC systems tend to disconnect
14969          * when a device tries to burst across a cache-line boundary.
14970          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14971          *
14972          * Unfortunately, for PCI-E there are only limited
14973          * write-side controls for this, and thus for reads
14974          * we will still get the disconnects.  We'll also waste
14975          * these PCI cycles for both read and write for chips
14976          * other than 5700 and 5701 which do not implement the
14977          * boundary bits.
14978          */
14979         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14980                 switch (cacheline_size) {
14981                 case 16:
14982                 case 32:
14983                 case 64:
14984                 case 128:
14985                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14986                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14987                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14988                         } else {
14989                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14990                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14991                         }
14992                         break;
14993
14994                 case 256:
14995                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14996                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14997                         break;
14998
14999                 default:
15000                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15001                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15002                         break;
15003                 }
15004         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15005                 switch (cacheline_size) {
15006                 case 16:
15007                 case 32:
15008                 case 64:
15009                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15010                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15011                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15012                                 break;
15013                         }
15014                         /* fallthrough */
15015                 case 128:
15016                 default:
15017                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15018                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15019                         break;
15020                 }
15021         } else {
15022                 switch (cacheline_size) {
15023                 case 16:
15024                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15025                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15026                                         DMA_RWCTRL_WRITE_BNDRY_16);
15027                                 break;
15028                         }
15029                         /* fallthrough */
15030                 case 32:
15031                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15032                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15033                                         DMA_RWCTRL_WRITE_BNDRY_32);
15034                                 break;
15035                         }
15036                         /* fallthrough */
15037                 case 64:
15038                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15039                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15040                                         DMA_RWCTRL_WRITE_BNDRY_64);
15041                                 break;
15042                         }
15043                         /* fallthrough */
15044                 case 128:
15045                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15046                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15047                                         DMA_RWCTRL_WRITE_BNDRY_128);
15048                                 break;
15049                         }
15050                         /* fallthrough */
15051                 case 256:
15052                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15053                                 DMA_RWCTRL_WRITE_BNDRY_256);
15054                         break;
15055                 case 512:
15056                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15057                                 DMA_RWCTRL_WRITE_BNDRY_512);
15058                         break;
15059                 case 1024:
15060                 default:
15061                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15062                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15063                         break;
15064                 }
15065         }
15066
15067 out:
15068         return val;
15069 }
15070
15071 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15072 {
15073         struct tg3_internal_buffer_desc test_desc;
15074         u32 sram_dma_descs;
15075         int i, ret;
15076
15077         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15078
15079         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15080         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15081         tw32(RDMAC_STATUS, 0);
15082         tw32(WDMAC_STATUS, 0);
15083
15084         tw32(BUFMGR_MODE, 0);
15085         tw32(FTQ_RESET, 0);
15086
15087         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15088         test_desc.addr_lo = buf_dma & 0xffffffff;
15089         test_desc.nic_mbuf = 0x00002100;
15090         test_desc.len = size;
15091
15092         /*
15093          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15094          * the *second* time the tg3 driver was getting loaded after an
15095          * initial scan.
15096          *
15097          * Broadcom tells me:
15098          *   ...the DMA engine is connected to the GRC block and a DMA
15099          *   reset may affect the GRC block in some unpredictable way...
15100          *   The behavior of resets to individual blocks has not been tested.
15101          *
15102          * Broadcom noted the GRC reset will also reset all sub-components.
15103          */
15104         if (to_device) {
15105                 test_desc.cqid_sqid = (13 << 8) | 2;
15106
15107                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15108                 udelay(40);
15109         } else {
15110                 test_desc.cqid_sqid = (16 << 8) | 7;
15111
15112                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15113                 udelay(40);
15114         }
15115         test_desc.flags = 0x00000005;
15116
15117         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15118                 u32 val;
15119
15120                 val = *(((u32 *)&test_desc) + i);
15121                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15122                                        sram_dma_descs + (i * sizeof(u32)));
15123                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15124         }
15125         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15126
15127         if (to_device)
15128                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15129         else
15130                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15131
15132         ret = -ENODEV;
15133         for (i = 0; i < 40; i++) {
15134                 u32 val;
15135
15136                 if (to_device)
15137                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15138                 else
15139                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15140                 if ((val & 0xffff) == sram_dma_descs) {
15141                         ret = 0;
15142                         break;
15143                 }
15144
15145                 udelay(100);
15146         }
15147
15148         return ret;
15149 }
15150
15151 #define TEST_BUFFER_SIZE        0x2000
15152
15153 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15154         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15155         { },
15156 };
15157
15158 static int __devinit tg3_test_dma(struct tg3 *tp)
15159 {
15160         dma_addr_t buf_dma;
15161         u32 *buf, saved_dma_rwctrl;
15162         int ret = 0;
15163
15164         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15165                                  &buf_dma, GFP_KERNEL);
15166         if (!buf) {
15167                 ret = -ENOMEM;
15168                 goto out_nofree;
15169         }
15170
15171         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15172                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15173
15174         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15175
15176         if (tg3_flag(tp, 57765_PLUS))
15177                 goto out;
15178
15179         if (tg3_flag(tp, PCI_EXPRESS)) {
15180                 /* DMA read watermark not used on PCIE */
15181                 tp->dma_rwctrl |= 0x00180000;
15182         } else if (!tg3_flag(tp, PCIX_MODE)) {
15183                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15184                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15185                         tp->dma_rwctrl |= 0x003f0000;
15186                 else
15187                         tp->dma_rwctrl |= 0x003f000f;
15188         } else {
15189                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15190                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15191                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15192                         u32 read_water = 0x7;
15193
15194                         /* If the 5704 is behind the EPB bridge, we can
15195                          * do the less restrictive ONE_DMA workaround for
15196                          * better performance.
15197                          */
15198                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15199                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15200                                 tp->dma_rwctrl |= 0x8000;
15201                         else if (ccval == 0x6 || ccval == 0x7)
15202                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15203
15204                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15205                                 read_water = 4;
15206                         /* Set bit 23 to enable PCIX hw bug fix */
15207                         tp->dma_rwctrl |=
15208                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15209                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15210                                 (1 << 23);
15211                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15212                         /* 5780 always in PCIX mode */
15213                         tp->dma_rwctrl |= 0x00144000;
15214                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15215                         /* 5714 always in PCIX mode */
15216                         tp->dma_rwctrl |= 0x00148000;
15217                 } else {
15218                         tp->dma_rwctrl |= 0x001b000f;
15219                 }
15220         }
15221
15222         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15223             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15224                 tp->dma_rwctrl &= 0xfffffff0;
15225
15226         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15227             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15228                 /* Remove this if it causes problems for some boards. */
15229                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15230
15231                 /* On 5700/5701 chips, we need to set this bit.
15232                  * Otherwise the chip will issue cacheline transactions
15233                  * to streamable DMA memory with not all the byte
15234                  * enables turned on.  This is an error on several
15235                  * RISC PCI controllers, in particular sparc64.
15236                  *
15237                  * On 5703/5704 chips, this bit has been reassigned
15238                  * a different meaning.  In particular, it is used
15239                  * on those chips to enable a PCI-X workaround.
15240                  */
15241                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15242         }
15243
15244         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15245
15246 #if 0
15247         /* Unneeded, already done by tg3_get_invariants.  */
15248         tg3_switch_clocks(tp);
15249 #endif
15250
15251         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15252             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15253                 goto out;
15254
15255         /* It is best to perform DMA test with maximum write burst size
15256          * to expose the 5700/5701 write DMA bug.
15257          */
15258         saved_dma_rwctrl = tp->dma_rwctrl;
15259         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15260         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15261
15262         while (1) {
15263                 u32 *p = buf, i;
15264
15265                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15266                         p[i] = i;
15267
15268                 /* Send the buffer to the chip. */
15269                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15270                 if (ret) {
15271                         dev_err(&tp->pdev->dev,
15272                                 "%s: Buffer write failed. err = %d\n",
15273                                 __func__, ret);
15274                         break;
15275                 }
15276
15277 #if 0
15278                 /* validate data reached card RAM correctly. */
15279                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15280                         u32 val;
15281                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15282                         if (le32_to_cpu(val) != p[i]) {
15283                                 dev_err(&tp->pdev->dev,
15284                                         "%s: Buffer corrupted on device! "
15285                                         "(%d != %d)\n", __func__, val, i);
15286                                 /* ret = -ENODEV here? */
15287                         }
15288                         p[i] = 0;
15289                 }
15290 #endif
15291                 /* Now read it back. */
15292                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15293                 if (ret) {
15294                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15295                                 "err = %d\n", __func__, ret);
15296                         break;
15297                 }
15298
15299                 /* Verify it. */
15300                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15301                         if (p[i] == i)
15302                                 continue;
15303
15304                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15305                             DMA_RWCTRL_WRITE_BNDRY_16) {
15306                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15307                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15308                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15309                                 break;
15310                         } else {
15311                                 dev_err(&tp->pdev->dev,
15312                                         "%s: Buffer corrupted on read back! "
15313                                         "(%d != %d)\n", __func__, p[i], i);
15314                                 ret = -ENODEV;
15315                                 goto out;
15316                         }
15317                 }
15318
15319                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15320                         /* Success. */
15321                         ret = 0;
15322                         break;
15323                 }
15324         }
15325         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15326             DMA_RWCTRL_WRITE_BNDRY_16) {
15327                 /* DMA test passed without adjusting DMA boundary,
15328                  * now look for chipsets that are known to expose the
15329                  * DMA bug without failing the test.
15330                  */
15331                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15332                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15333                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15334                 } else {
15335                         /* Safe to use the calculated DMA boundary. */
15336                         tp->dma_rwctrl = saved_dma_rwctrl;
15337                 }
15338
15339                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15340         }
15341
15342 out:
15343         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15344 out_nofree:
15345         return ret;
15346 }
15347
15348 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15349 {
15350         if (tg3_flag(tp, 57765_PLUS)) {
15351                 tp->bufmgr_config.mbuf_read_dma_low_water =
15352                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15353                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15354                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15355                 tp->bufmgr_config.mbuf_high_water =
15356                         DEFAULT_MB_HIGH_WATER_57765;
15357
15358                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15359                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15360                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15361                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15362                 tp->bufmgr_config.mbuf_high_water_jumbo =
15363                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15364         } else if (tg3_flag(tp, 5705_PLUS)) {
15365                 tp->bufmgr_config.mbuf_read_dma_low_water =
15366                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15367                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15368                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15369                 tp->bufmgr_config.mbuf_high_water =
15370                         DEFAULT_MB_HIGH_WATER_5705;
15371                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15372                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15373                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15374                         tp->bufmgr_config.mbuf_high_water =
15375                                 DEFAULT_MB_HIGH_WATER_5906;
15376                 }
15377
15378                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15379                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15380                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15381                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15382                 tp->bufmgr_config.mbuf_high_water_jumbo =
15383                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15384         } else {
15385                 tp->bufmgr_config.mbuf_read_dma_low_water =
15386                         DEFAULT_MB_RDMA_LOW_WATER;
15387                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15388                         DEFAULT_MB_MACRX_LOW_WATER;
15389                 tp->bufmgr_config.mbuf_high_water =
15390                         DEFAULT_MB_HIGH_WATER;
15391
15392                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15393                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15394                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15395                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15396                 tp->bufmgr_config.mbuf_high_water_jumbo =
15397                         DEFAULT_MB_HIGH_WATER_JUMBO;
15398         }
15399
15400         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15401         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15402 }
15403
15404 static char * __devinit tg3_phy_string(struct tg3 *tp)
15405 {
15406         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15407         case TG3_PHY_ID_BCM5400:        return "5400";
15408         case TG3_PHY_ID_BCM5401:        return "5401";
15409         case TG3_PHY_ID_BCM5411:        return "5411";
15410         case TG3_PHY_ID_BCM5701:        return "5701";
15411         case TG3_PHY_ID_BCM5703:        return "5703";
15412         case TG3_PHY_ID_BCM5704:        return "5704";
15413         case TG3_PHY_ID_BCM5705:        return "5705";
15414         case TG3_PHY_ID_BCM5750:        return "5750";
15415         case TG3_PHY_ID_BCM5752:        return "5752";
15416         case TG3_PHY_ID_BCM5714:        return "5714";
15417         case TG3_PHY_ID_BCM5780:        return "5780";
15418         case TG3_PHY_ID_BCM5755:        return "5755";
15419         case TG3_PHY_ID_BCM5787:        return "5787";
15420         case TG3_PHY_ID_BCM5784:        return "5784";
15421         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15422         case TG3_PHY_ID_BCM5906:        return "5906";
15423         case TG3_PHY_ID_BCM5761:        return "5761";
15424         case TG3_PHY_ID_BCM5718C:       return "5718C";
15425         case TG3_PHY_ID_BCM5718S:       return "5718S";
15426         case TG3_PHY_ID_BCM57765:       return "57765";
15427         case TG3_PHY_ID_BCM5719C:       return "5719C";
15428         case TG3_PHY_ID_BCM5720C:       return "5720C";
15429         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15430         case 0:                 return "serdes";
15431         default:                return "unknown";
15432         }
15433 }
15434
15435 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15436 {
15437         if (tg3_flag(tp, PCI_EXPRESS)) {
15438                 strcpy(str, "PCI Express");
15439                 return str;
15440         } else if (tg3_flag(tp, PCIX_MODE)) {
15441                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15442
15443                 strcpy(str, "PCIX:");
15444
15445                 if ((clock_ctrl == 7) ||
15446                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15447                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15448                         strcat(str, "133MHz");
15449                 else if (clock_ctrl == 0)
15450                         strcat(str, "33MHz");
15451                 else if (clock_ctrl == 2)
15452                         strcat(str, "50MHz");
15453                 else if (clock_ctrl == 4)
15454                         strcat(str, "66MHz");
15455                 else if (clock_ctrl == 6)
15456                         strcat(str, "100MHz");
15457         } else {
15458                 strcpy(str, "PCI:");
15459                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15460                         strcat(str, "66MHz");
15461                 else
15462                         strcat(str, "33MHz");
15463         }
15464         if (tg3_flag(tp, PCI_32BIT))
15465                 strcat(str, ":32-bit");
15466         else
15467                 strcat(str, ":64-bit");
15468         return str;
15469 }
15470
15471 static void __devinit tg3_init_coal(struct tg3 *tp)
15472 {
15473         struct ethtool_coalesce *ec = &tp->coal;
15474
15475         memset(ec, 0, sizeof(*ec));
15476         ec->cmd = ETHTOOL_GCOALESCE;
15477         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15478         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15479         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15480         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15481         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15482         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15483         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15484         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15485         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15486
15487         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15488                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15489                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15490                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15491                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15492                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15493         }
15494
15495         if (tg3_flag(tp, 5705_PLUS)) {
15496                 ec->rx_coalesce_usecs_irq = 0;
15497                 ec->tx_coalesce_usecs_irq = 0;
15498                 ec->stats_block_coalesce_usecs = 0;
15499         }
15500 }
15501
15502 static int __devinit tg3_init_one(struct pci_dev *pdev,
15503                                   const struct pci_device_id *ent)
15504 {
15505         struct net_device *dev;
15506         struct tg3 *tp;
15507         int i, err, pm_cap;
15508         u32 sndmbx, rcvmbx, intmbx;
15509         char str[40];
15510         u64 dma_mask, persist_dma_mask;
15511         netdev_features_t features = 0;
15512
15513         printk_once(KERN_INFO "%s\n", version);
15514
15515         err = pci_enable_device(pdev);
15516         if (err) {
15517                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15518                 return err;
15519         }
15520
15521         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15522         if (err) {
15523                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15524                 goto err_out_disable_pdev;
15525         }
15526
15527         pci_set_master(pdev);
15528
15529         /* Find power-management capability. */
15530         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15531         if (pm_cap == 0) {
15532                 dev_err(&pdev->dev,
15533                         "Cannot find Power Management capability, aborting\n");
15534                 err = -EIO;
15535                 goto err_out_free_res;
15536         }
15537
15538         err = pci_set_power_state(pdev, PCI_D0);
15539         if (err) {
15540                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15541                 goto err_out_free_res;
15542         }
15543
15544         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15545         if (!dev) {
15546                 err = -ENOMEM;
15547                 goto err_out_power_down;
15548         }
15549
15550         SET_NETDEV_DEV(dev, &pdev->dev);
15551
15552         tp = netdev_priv(dev);
15553         tp->pdev = pdev;
15554         tp->dev = dev;
15555         tp->pm_cap = pm_cap;
15556         tp->rx_mode = TG3_DEF_RX_MODE;
15557         tp->tx_mode = TG3_DEF_TX_MODE;
15558
15559         if (tg3_debug > 0)
15560                 tp->msg_enable = tg3_debug;
15561         else
15562                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15563
15564         /* The word/byte swap controls here control register access byte
15565          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15566          * setting below.
15567          */
15568         tp->misc_host_ctrl =
15569                 MISC_HOST_CTRL_MASK_PCI_INT |
15570                 MISC_HOST_CTRL_WORD_SWAP |
15571                 MISC_HOST_CTRL_INDIR_ACCESS |
15572                 MISC_HOST_CTRL_PCISTATE_RW;
15573
15574         /* The NONFRM (non-frame) byte/word swap controls take effect
15575          * on descriptor entries, anything which isn't packet data.
15576          *
15577          * The StrongARM chips on the board (one for tx, one for rx)
15578          * are running in big-endian mode.
15579          */
15580         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15581                         GRC_MODE_WSWAP_NONFRM_DATA);
15582 #ifdef __BIG_ENDIAN
15583         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15584 #endif
15585         spin_lock_init(&tp->lock);
15586         spin_lock_init(&tp->indirect_lock);
15587         INIT_WORK(&tp->reset_task, tg3_reset_task);
15588
15589         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15590         if (!tp->regs) {
15591                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15592                 err = -ENOMEM;
15593                 goto err_out_free_dev;
15594         }
15595
15596         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15597             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15598             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15599             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15600             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15601             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15602             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15603             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15604                 tg3_flag_set(tp, ENABLE_APE);
15605                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15606                 if (!tp->aperegs) {
15607                         dev_err(&pdev->dev,
15608                                 "Cannot map APE registers, aborting\n");
15609                         err = -ENOMEM;
15610                         goto err_out_iounmap;
15611                 }
15612         }
15613
15614         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15615         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15616
15617         dev->ethtool_ops = &tg3_ethtool_ops;
15618         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15619         dev->netdev_ops = &tg3_netdev_ops;
15620         dev->irq = pdev->irq;
15621
15622         err = tg3_get_invariants(tp);
15623         if (err) {
15624                 dev_err(&pdev->dev,
15625                         "Problem fetching invariants of chip, aborting\n");
15626                 goto err_out_apeunmap;
15627         }
15628
15629         /* The EPB bridge inside 5714, 5715, and 5780 and any
15630          * device behind the EPB cannot support DMA addresses > 40-bit.
15631          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15632          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15633          * do DMA address check in tg3_start_xmit().
15634          */
15635         if (tg3_flag(tp, IS_5788))
15636                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15637         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15638                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15639 #ifdef CONFIG_HIGHMEM
15640                 dma_mask = DMA_BIT_MASK(64);
15641 #endif
15642         } else
15643                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15644
15645         /* Configure DMA attributes. */
15646         if (dma_mask > DMA_BIT_MASK(32)) {
15647                 err = pci_set_dma_mask(pdev, dma_mask);
15648                 if (!err) {
15649                         features |= NETIF_F_HIGHDMA;
15650                         err = pci_set_consistent_dma_mask(pdev,
15651                                                           persist_dma_mask);
15652                         if (err < 0) {
15653                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15654                                         "DMA for consistent allocations\n");
15655                                 goto err_out_apeunmap;
15656                         }
15657                 }
15658         }
15659         if (err || dma_mask == DMA_BIT_MASK(32)) {
15660                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15661                 if (err) {
15662                         dev_err(&pdev->dev,
15663                                 "No usable DMA configuration, aborting\n");
15664                         goto err_out_apeunmap;
15665                 }
15666         }
15667
15668         tg3_init_bufmgr_config(tp);
15669
15670         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15671
15672         /* 5700 B0 chips do not support checksumming correctly due
15673          * to hardware bugs.
15674          */
15675         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15676                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15677
15678                 if (tg3_flag(tp, 5755_PLUS))
15679                         features |= NETIF_F_IPV6_CSUM;
15680         }
15681
15682         /* TSO is on by default on chips that support hardware TSO.
15683          * Firmware TSO on older chips gives lower performance, so it
15684          * is off by default, but can be enabled using ethtool.
15685          */
15686         if ((tg3_flag(tp, HW_TSO_1) ||
15687              tg3_flag(tp, HW_TSO_2) ||
15688              tg3_flag(tp, HW_TSO_3)) &&
15689             (features & NETIF_F_IP_CSUM))
15690                 features |= NETIF_F_TSO;
15691         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15692                 if (features & NETIF_F_IPV6_CSUM)
15693                         features |= NETIF_F_TSO6;
15694                 if (tg3_flag(tp, HW_TSO_3) ||
15695                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15696                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15697                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15698                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15699                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15700                         features |= NETIF_F_TSO_ECN;
15701         }
15702
15703         dev->features |= features;
15704         dev->vlan_features |= features;
15705
15706         /*
15707          * Add loopback capability only for a subset of devices that support
15708          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15709          * loopback for the remaining devices.
15710          */
15711         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15712             !tg3_flag(tp, CPMU_PRESENT))
15713                 /* Add the loopback capability */
15714                 features |= NETIF_F_LOOPBACK;
15715
15716         dev->hw_features |= features;
15717
15718         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15719             !tg3_flag(tp, TSO_CAPABLE) &&
15720             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15721                 tg3_flag_set(tp, MAX_RXPEND_64);
15722                 tp->rx_pending = 63;
15723         }
15724
15725         err = tg3_get_device_address(tp);
15726         if (err) {
15727                 dev_err(&pdev->dev,
15728                         "Could not obtain valid ethernet address, aborting\n");
15729                 goto err_out_apeunmap;
15730         }
15731
15732         /*
15733          * Reset chip in case UNDI or EFI driver did not shutdown
15734          * DMA self test will enable WDMAC and we'll see (spurious)
15735          * pending DMA on the PCI bus at that point.
15736          */
15737         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15738             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15739                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15740                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15741         }
15742
15743         err = tg3_test_dma(tp);
15744         if (err) {
15745                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15746                 goto err_out_apeunmap;
15747         }
15748
15749         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15750         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15751         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15752         for (i = 0; i < tp->irq_max; i++) {
15753                 struct tg3_napi *tnapi = &tp->napi[i];
15754
15755                 tnapi->tp = tp;
15756                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15757
15758                 tnapi->int_mbox = intmbx;
15759                 if (i <= 4)
15760                         intmbx += 0x8;
15761                 else
15762                         intmbx += 0x4;
15763
15764                 tnapi->consmbox = rcvmbx;
15765                 tnapi->prodmbox = sndmbx;
15766
15767                 if (i)
15768                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15769                 else
15770                         tnapi->coal_now = HOSTCC_MODE_NOW;
15771
15772                 if (!tg3_flag(tp, SUPPORT_MSIX))
15773                         break;
15774
15775                 /*
15776                  * If we support MSIX, we'll be using RSS.  If we're using
15777                  * RSS, the first vector only handles link interrupts and the
15778                  * remaining vectors handle rx and tx interrupts.  Reuse the
15779                  * mailbox values for the next iteration.  The values we setup
15780                  * above are still useful for the single vectored mode.
15781                  */
15782                 if (!i)
15783                         continue;
15784
15785                 rcvmbx += 0x8;
15786
15787                 if (sndmbx & 0x4)
15788                         sndmbx -= 0x4;
15789                 else
15790                         sndmbx += 0xc;
15791         }
15792
15793         tg3_init_coal(tp);
15794
15795         pci_set_drvdata(pdev, dev);
15796
15797         if (tg3_flag(tp, 5717_PLUS)) {
15798                 /* Resume a low-power mode */
15799                 tg3_frob_aux_power(tp, false);
15800         }
15801
15802         tg3_timer_init(tp);
15803
15804         err = register_netdev(dev);
15805         if (err) {
15806                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15807                 goto err_out_apeunmap;
15808         }
15809
15810         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15811                     tp->board_part_number,
15812                     tp->pci_chip_rev_id,
15813                     tg3_bus_string(tp, str),
15814                     dev->dev_addr);
15815
15816         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15817                 struct phy_device *phydev;
15818                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15819                 netdev_info(dev,
15820                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15821                             phydev->drv->name, dev_name(&phydev->dev));
15822         } else {
15823                 char *ethtype;
15824
15825                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15826                         ethtype = "10/100Base-TX";
15827                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15828                         ethtype = "1000Base-SX";
15829                 else
15830                         ethtype = "10/100/1000Base-T";
15831
15832                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15833                             "(WireSpeed[%d], EEE[%d])\n",
15834                             tg3_phy_string(tp), ethtype,
15835                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15836                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15837         }
15838
15839         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15840                     (dev->features & NETIF_F_RXCSUM) != 0,
15841                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15842                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15843                     tg3_flag(tp, ENABLE_ASF) != 0,
15844                     tg3_flag(tp, TSO_CAPABLE) != 0);
15845         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15846                     tp->dma_rwctrl,
15847                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15848                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15849
15850         pci_save_state(pdev);
15851
15852         return 0;
15853
15854 err_out_apeunmap:
15855         if (tp->aperegs) {
15856                 iounmap(tp->aperegs);
15857                 tp->aperegs = NULL;
15858         }
15859
15860 err_out_iounmap:
15861         if (tp->regs) {
15862                 iounmap(tp->regs);
15863                 tp->regs = NULL;
15864         }
15865
15866 err_out_free_dev:
15867         free_netdev(dev);
15868
15869 err_out_power_down:
15870         pci_set_power_state(pdev, PCI_D3hot);
15871
15872 err_out_free_res:
15873         pci_release_regions(pdev);
15874
15875 err_out_disable_pdev:
15876         pci_disable_device(pdev);
15877         pci_set_drvdata(pdev, NULL);
15878         return err;
15879 }
15880
15881 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15882 {
15883         struct net_device *dev = pci_get_drvdata(pdev);
15884
15885         if (dev) {
15886                 struct tg3 *tp = netdev_priv(dev);
15887
15888                 release_firmware(tp->fw);
15889
15890                 tg3_reset_task_cancel(tp);
15891
15892                 if (tg3_flag(tp, USE_PHYLIB)) {
15893                         tg3_phy_fini(tp);
15894                         tg3_mdio_fini(tp);
15895                 }
15896
15897                 unregister_netdev(dev);
15898                 if (tp->aperegs) {
15899                         iounmap(tp->aperegs);
15900                         tp->aperegs = NULL;
15901                 }
15902                 if (tp->regs) {
15903                         iounmap(tp->regs);
15904                         tp->regs = NULL;
15905                 }
15906                 free_netdev(dev);
15907                 pci_release_regions(pdev);
15908                 pci_disable_device(pdev);
15909                 pci_set_drvdata(pdev, NULL);
15910         }
15911 }
15912
15913 #ifdef CONFIG_PM_SLEEP
15914 static int tg3_suspend(struct device *device)
15915 {
15916         struct pci_dev *pdev = to_pci_dev(device);
15917         struct net_device *dev = pci_get_drvdata(pdev);
15918         struct tg3 *tp = netdev_priv(dev);
15919         int err;
15920
15921         if (!netif_running(dev))
15922                 return 0;
15923
15924         tg3_reset_task_cancel(tp);
15925         tg3_phy_stop(tp);
15926         tg3_netif_stop(tp);
15927
15928         tg3_timer_stop(tp);
15929
15930         tg3_full_lock(tp, 1);
15931         tg3_disable_ints(tp);
15932         tg3_full_unlock(tp);
15933
15934         netif_device_detach(dev);
15935
15936         tg3_full_lock(tp, 0);
15937         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15938         tg3_flag_clear(tp, INIT_COMPLETE);
15939         tg3_full_unlock(tp);
15940
15941         err = tg3_power_down_prepare(tp);
15942         if (err) {
15943                 int err2;
15944
15945                 tg3_full_lock(tp, 0);
15946
15947                 tg3_flag_set(tp, INIT_COMPLETE);
15948                 err2 = tg3_restart_hw(tp, 1);
15949                 if (err2)
15950                         goto out;
15951
15952                 tg3_timer_start(tp);
15953
15954                 netif_device_attach(dev);
15955                 tg3_netif_start(tp);
15956
15957 out:
15958                 tg3_full_unlock(tp);
15959
15960                 if (!err2)
15961                         tg3_phy_start(tp);
15962         }
15963
15964         return err;
15965 }
15966
15967 static int tg3_resume(struct device *device)
15968 {
15969         struct pci_dev *pdev = to_pci_dev(device);
15970         struct net_device *dev = pci_get_drvdata(pdev);
15971         struct tg3 *tp = netdev_priv(dev);
15972         int err;
15973
15974         if (!netif_running(dev))
15975                 return 0;
15976
15977         netif_device_attach(dev);
15978
15979         tg3_full_lock(tp, 0);
15980
15981         tg3_flag_set(tp, INIT_COMPLETE);
15982         err = tg3_restart_hw(tp, 1);
15983         if (err)
15984                 goto out;
15985
15986         tg3_timer_start(tp);
15987
15988         tg3_netif_start(tp);
15989
15990 out:
15991         tg3_full_unlock(tp);
15992
15993         if (!err)
15994                 tg3_phy_start(tp);
15995
15996         return err;
15997 }
15998
15999 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16000 #define TG3_PM_OPS (&tg3_pm_ops)
16001
16002 #else
16003
16004 #define TG3_PM_OPS NULL
16005
16006 #endif /* CONFIG_PM_SLEEP */
16007
16008 /**
16009  * tg3_io_error_detected - called when PCI error is detected
16010  * @pdev: Pointer to PCI device
16011  * @state: The current pci connection state
16012  *
16013  * This function is called after a PCI bus error affecting
16014  * this device has been detected.
16015  */
16016 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16017                                               pci_channel_state_t state)
16018 {
16019         struct net_device *netdev = pci_get_drvdata(pdev);
16020         struct tg3 *tp = netdev_priv(netdev);
16021         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16022
16023         netdev_info(netdev, "PCI I/O error detected\n");
16024
16025         rtnl_lock();
16026
16027         if (!netif_running(netdev))
16028                 goto done;
16029
16030         tg3_phy_stop(tp);
16031
16032         tg3_netif_stop(tp);
16033
16034         tg3_timer_stop(tp);
16035
16036         /* Want to make sure that the reset task doesn't run */
16037         tg3_reset_task_cancel(tp);
16038
16039         netif_device_detach(netdev);
16040
16041         /* Clean up software state, even if MMIO is blocked */
16042         tg3_full_lock(tp, 0);
16043         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16044         tg3_full_unlock(tp);
16045
16046 done:
16047         if (state == pci_channel_io_perm_failure)
16048                 err = PCI_ERS_RESULT_DISCONNECT;
16049         else
16050                 pci_disable_device(pdev);
16051
16052         rtnl_unlock();
16053
16054         return err;
16055 }
16056
16057 /**
16058  * tg3_io_slot_reset - called after the pci bus has been reset.
16059  * @pdev: Pointer to PCI device
16060  *
16061  * Restart the card from scratch, as if from a cold-boot.
16062  * At this point, the card has exprienced a hard reset,
16063  * followed by fixups by BIOS, and has its config space
16064  * set up identically to what it was at cold boot.
16065  */
16066 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16067 {
16068         struct net_device *netdev = pci_get_drvdata(pdev);
16069         struct tg3 *tp = netdev_priv(netdev);
16070         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16071         int err;
16072
16073         rtnl_lock();
16074
16075         if (pci_enable_device(pdev)) {
16076                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16077                 goto done;
16078         }
16079
16080         pci_set_master(pdev);
16081         pci_restore_state(pdev);
16082         pci_save_state(pdev);
16083
16084         if (!netif_running(netdev)) {
16085                 rc = PCI_ERS_RESULT_RECOVERED;
16086                 goto done;
16087         }
16088
16089         err = tg3_power_up(tp);
16090         if (err)
16091                 goto done;
16092
16093         rc = PCI_ERS_RESULT_RECOVERED;
16094
16095 done:
16096         rtnl_unlock();
16097
16098         return rc;
16099 }
16100
16101 /**
16102  * tg3_io_resume - called when traffic can start flowing again.
16103  * @pdev: Pointer to PCI device
16104  *
16105  * This callback is called when the error recovery driver tells
16106  * us that its OK to resume normal operation.
16107  */
16108 static void tg3_io_resume(struct pci_dev *pdev)
16109 {
16110         struct net_device *netdev = pci_get_drvdata(pdev);
16111         struct tg3 *tp = netdev_priv(netdev);
16112         int err;
16113
16114         rtnl_lock();
16115
16116         if (!netif_running(netdev))
16117                 goto done;
16118
16119         tg3_full_lock(tp, 0);
16120         tg3_flag_set(tp, INIT_COMPLETE);
16121         err = tg3_restart_hw(tp, 1);
16122         tg3_full_unlock(tp);
16123         if (err) {
16124                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16125                 goto done;
16126         }
16127
16128         netif_device_attach(netdev);
16129
16130         tg3_timer_start(tp);
16131
16132         tg3_netif_start(tp);
16133
16134         tg3_phy_start(tp);
16135
16136 done:
16137         rtnl_unlock();
16138 }
16139
16140 static struct pci_error_handlers tg3_err_handler = {
16141         .error_detected = tg3_io_error_detected,
16142         .slot_reset     = tg3_io_slot_reset,
16143         .resume         = tg3_io_resume
16144 };
16145
16146 static struct pci_driver tg3_driver = {
16147         .name           = DRV_MODULE_NAME,
16148         .id_table       = tg3_pci_tbl,
16149         .probe          = tg3_init_one,
16150         .remove         = __devexit_p(tg3_remove_one),
16151         .err_handler    = &tg3_err_handler,
16152         .driver.pm      = TG3_PM_OPS,
16153 };
16154
16155 static int __init tg3_init(void)
16156 {
16157         return pci_register_driver(&tg3_driver);
16158 }
16159
16160 static void __exit tg3_cleanup(void)
16161 {
16162         pci_unregister_driver(&tg3_driver);
16163 }
16164
16165 module_init(tg3_init);
16166 module_exit(tg3_cleanup);