]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
8506c54ee1632298163c3db398ed175f8f518329
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     123
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "March 21, 2012"
95
96 #define RESET_KIND_SHUTDOWN     0
97 #define RESET_KIND_INIT         1
98 #define RESET_KIND_SUSPEND      2
99
100 #define TG3_DEF_RX_MODE         0
101 #define TG3_DEF_TX_MODE         0
102 #define TG3_DEF_MSG_ENABLE        \
103         (NETIF_MSG_DRV          | \
104          NETIF_MSG_PROBE        | \
105          NETIF_MSG_LINK         | \
106          NETIF_MSG_TIMER        | \
107          NETIF_MSG_IFDOWN       | \
108          NETIF_MSG_IFUP         | \
109          NETIF_MSG_RX_ERR       | \
110          NETIF_MSG_TX_ERR)
111
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
113
114 /* length of time before we decide the hardware is borked,
115  * and dev->tx_timeout() should be called to fix the problem
116  */
117
118 #define TG3_TX_TIMEOUT                  (5 * HZ)
119
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU                     60
122 #define TG3_MAX_MTU(tp) \
123         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
124
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126  * You can't change the ring sizes, but you can change where you place
127  * them in the NIC onboard memory.
128  */
129 #define TG3_RX_STD_RING_SIZE(tp) \
130         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING         200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
137
138 /* Do not place this n-ring entries value into the tp struct itself,
139  * we really want to expose these constants to GCC so that modulo et
140  * al.  operations are done with shifts and masks instead of with
141  * hw multiply/modulo instructions.  Another solution would be to
142  * replace things like '% foo' with '& (foo - 1)'.
143  */
144
145 #define TG3_TX_RING_SIZE                512
146 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
147
148 #define TG3_RX_STD_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
155                                  TG3_TX_RING_SIZE)
156 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157
158 #define TG3_DMA_BYTE_ENAB               64
159
160 #define TG3_RX_STD_DMA_SZ               1536
161 #define TG3_RX_JMB_DMA_SZ               9046
162
163 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
164
165 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175  * that are at least dword aligned when used in PCIX mode.  The driver
176  * works around this bug by double copying the packet.  This workaround
177  * is built into the normal double copy length check for efficiency.
178  *
179  * However, the double copy is only necessary on those architectures
180  * where unaligned memory accesses are inefficient.  For those architectures
181  * where unaligned memory accesses incur little penalty, we can reintegrate
182  * the 5701 in the normal rx path.  Doing so saves a device structure
183  * dereference by hardcoding the double copy threshold in place.
184  */
185 #define TG3_RX_COPY_THRESHOLD           256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
188 #else
189         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
190 #endif
191
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
194 #else
195 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
196 #endif
197
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K            2048
201 #define TG3_TX_BD_DMA_MAX_4K            4096
202
203 #define TG3_RAW_IP_ALIGN 2
204
205 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
206 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
307         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
308         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
309         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
310         {}
311 };
312
313 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314
315 static const struct {
316         const char string[ETH_GSTRING_LEN];
317 } ethtool_stats_keys[] = {
318         { "rx_octets" },
319         { "rx_fragments" },
320         { "rx_ucast_packets" },
321         { "rx_mcast_packets" },
322         { "rx_bcast_packets" },
323         { "rx_fcs_errors" },
324         { "rx_align_errors" },
325         { "rx_xon_pause_rcvd" },
326         { "rx_xoff_pause_rcvd" },
327         { "rx_mac_ctrl_rcvd" },
328         { "rx_xoff_entered" },
329         { "rx_frame_too_long_errors" },
330         { "rx_jabbers" },
331         { "rx_undersize_packets" },
332         { "rx_in_length_errors" },
333         { "rx_out_length_errors" },
334         { "rx_64_or_less_octet_packets" },
335         { "rx_65_to_127_octet_packets" },
336         { "rx_128_to_255_octet_packets" },
337         { "rx_256_to_511_octet_packets" },
338         { "rx_512_to_1023_octet_packets" },
339         { "rx_1024_to_1522_octet_packets" },
340         { "rx_1523_to_2047_octet_packets" },
341         { "rx_2048_to_4095_octet_packets" },
342         { "rx_4096_to_8191_octet_packets" },
343         { "rx_8192_to_9022_octet_packets" },
344
345         { "tx_octets" },
346         { "tx_collisions" },
347
348         { "tx_xon_sent" },
349         { "tx_xoff_sent" },
350         { "tx_flow_control" },
351         { "tx_mac_errors" },
352         { "tx_single_collisions" },
353         { "tx_mult_collisions" },
354         { "tx_deferred" },
355         { "tx_excessive_collisions" },
356         { "tx_late_collisions" },
357         { "tx_collide_2times" },
358         { "tx_collide_3times" },
359         { "tx_collide_4times" },
360         { "tx_collide_5times" },
361         { "tx_collide_6times" },
362         { "tx_collide_7times" },
363         { "tx_collide_8times" },
364         { "tx_collide_9times" },
365         { "tx_collide_10times" },
366         { "tx_collide_11times" },
367         { "tx_collide_12times" },
368         { "tx_collide_13times" },
369         { "tx_collide_14times" },
370         { "tx_collide_15times" },
371         { "tx_ucast_packets" },
372         { "tx_mcast_packets" },
373         { "tx_bcast_packets" },
374         { "tx_carrier_sense_errors" },
375         { "tx_discards" },
376         { "tx_errors" },
377
378         { "dma_writeq_full" },
379         { "dma_write_prioq_full" },
380         { "rxbds_empty" },
381         { "rx_discards" },
382         { "rx_errors" },
383         { "rx_threshold_hit" },
384
385         { "dma_readq_full" },
386         { "dma_read_prioq_full" },
387         { "tx_comp_queue_full" },
388
389         { "ring_set_send_prod_index" },
390         { "ring_status_update" },
391         { "nic_irqs" },
392         { "nic_avoided_irqs" },
393         { "nic_tx_threshold_hit" },
394
395         { "mbuf_lwm_thresh_hit" },
396 };
397
398 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
399
400
401 static const struct {
402         const char string[ETH_GSTRING_LEN];
403 } ethtool_test_keys[] = {
404         { "nvram test        (online) " },
405         { "link test         (online) " },
406         { "register test     (offline)" },
407         { "memory test       (offline)" },
408         { "mac loopback test (offline)" },
409         { "phy loopback test (offline)" },
410         { "ext loopback test (offline)" },
411         { "interrupt test    (offline)" },
412 };
413
414 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
415
416
417 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 {
419         writel(val, tp->regs + off);
420 }
421
422 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 {
424         return readl(tp->regs + off);
425 }
426
427 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 {
429         writel(val, tp->aperegs + off);
430 }
431
432 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 {
434         return readl(tp->aperegs + off);
435 }
436
437 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
438 {
439         unsigned long flags;
440
441         spin_lock_irqsave(&tp->indirect_lock, flags);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
443         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
444         spin_unlock_irqrestore(&tp->indirect_lock, flags);
445 }
446
447 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 {
449         writel(val, tp->regs + off);
450         readl(tp->regs + off);
451 }
452
453 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
454 {
455         unsigned long flags;
456         u32 val;
457
458         spin_lock_irqsave(&tp->indirect_lock, flags);
459         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
460         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
461         spin_unlock_irqrestore(&tp->indirect_lock, flags);
462         return val;
463 }
464
465 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
466 {
467         unsigned long flags;
468
469         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
470                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
471                                        TG3_64BIT_REG_LOW, val);
472                 return;
473         }
474         if (off == TG3_RX_STD_PROD_IDX_REG) {
475                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
476                                        TG3_64BIT_REG_LOW, val);
477                 return;
478         }
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
482         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483         spin_unlock_irqrestore(&tp->indirect_lock, flags);
484
485         /* In indirect mode when disabling interrupts, we also need
486          * to clear the interrupt bit in the GRC local ctrl register.
487          */
488         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
489             (val == 0x1)) {
490                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
491                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
492         }
493 }
494
495 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
496 {
497         unsigned long flags;
498         u32 val;
499
500         spin_lock_irqsave(&tp->indirect_lock, flags);
501         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
502         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503         spin_unlock_irqrestore(&tp->indirect_lock, flags);
504         return val;
505 }
506
507 /* usec_wait specifies the wait time in usec when writing to certain registers
508  * where it is unsafe to read back the register without some delay.
509  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
510  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511  */
512 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 {
514         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
515                 /* Non-posted methods */
516                 tp->write32(tp, off, val);
517         else {
518                 /* Posted method */
519                 tg3_write32(tp, off, val);
520                 if (usec_wait)
521                         udelay(usec_wait);
522                 tp->read32(tp, off);
523         }
524         /* Wait again after the read for the posted method to guarantee that
525          * the wait time is met.
526          */
527         if (usec_wait)
528                 udelay(usec_wait);
529 }
530
531 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 {
533         tp->write32_mbox(tp, off, val);
534         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
535                 tp->read32_mbox(tp, off);
536 }
537
538 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 {
540         void __iomem *mbox = tp->regs + off;
541         writel(val, mbox);
542         if (tg3_flag(tp, TXD_MBOX_HWBUG))
543                 writel(val, mbox);
544         if (tg3_flag(tp, MBOX_WRITE_REORDER))
545                 readl(mbox);
546 }
547
548 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 {
550         return readl(tp->regs + off + GRCMBOX_BASE);
551 }
552
553 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 {
555         writel(val, tp->regs + off + GRCMBOX_BASE);
556 }
557
558 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
559 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
560 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
561 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
562 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
563
564 #define tw32(reg, val)                  tp->write32(tp, reg, val)
565 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
566 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
567 #define tr32(reg)                       tp->read32(tp, reg)
568
569 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
570 {
571         unsigned long flags;
572
573         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
574             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
575                 return;
576
577         spin_lock_irqsave(&tp->indirect_lock, flags);
578         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
580                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581
582                 /* Always leave this as zero. */
583                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
584         } else {
585                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
586                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587
588                 /* Always leave this as zero. */
589                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590         }
591         spin_unlock_irqrestore(&tp->indirect_lock, flags);
592 }
593
594 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
595 {
596         unsigned long flags;
597
598         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
599             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
600                 *val = 0;
601                 return;
602         }
603
604         spin_lock_irqsave(&tp->indirect_lock, flags);
605         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
606                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
607                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608
609                 /* Always leave this as zero. */
610                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
611         } else {
612                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
613                 *val = tr32(TG3PCI_MEM_WIN_DATA);
614
615                 /* Always leave this as zero. */
616                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617         }
618         spin_unlock_irqrestore(&tp->indirect_lock, flags);
619 }
620
621 static void tg3_ape_lock_init(struct tg3 *tp)
622 {
623         int i;
624         u32 regbase, bit;
625
626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
627                 regbase = TG3_APE_LOCK_GRANT;
628         else
629                 regbase = TG3_APE_PER_LOCK_GRANT;
630
631         /* Make sure the driver hasn't any stale locks. */
632         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
633                 switch (i) {
634                 case TG3_APE_LOCK_PHY0:
635                 case TG3_APE_LOCK_PHY1:
636                 case TG3_APE_LOCK_PHY2:
637                 case TG3_APE_LOCK_PHY3:
638                         bit = APE_LOCK_GRANT_DRIVER;
639                         break;
640                 default:
641                         if (!tp->pci_fn)
642                                 bit = APE_LOCK_GRANT_DRIVER;
643                         else
644                                 bit = 1 << tp->pci_fn;
645                 }
646                 tg3_ape_write32(tp, regbase + 4 * i, bit);
647         }
648
649 }
650
651 static int tg3_ape_lock(struct tg3 *tp, int locknum)
652 {
653         int i, off;
654         int ret = 0;
655         u32 status, req, gnt, bit;
656
657         if (!tg3_flag(tp, ENABLE_APE))
658                 return 0;
659
660         switch (locknum) {
661         case TG3_APE_LOCK_GPIO:
662                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
663                         return 0;
664         case TG3_APE_LOCK_GRC:
665         case TG3_APE_LOCK_MEM:
666                 if (!tp->pci_fn)
667                         bit = APE_LOCK_REQ_DRIVER;
668                 else
669                         bit = 1 << tp->pci_fn;
670                 break;
671         default:
672                 return -EINVAL;
673         }
674
675         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
676                 req = TG3_APE_LOCK_REQ;
677                 gnt = TG3_APE_LOCK_GRANT;
678         } else {
679                 req = TG3_APE_PER_LOCK_REQ;
680                 gnt = TG3_APE_PER_LOCK_GRANT;
681         }
682
683         off = 4 * locknum;
684
685         tg3_ape_write32(tp, req + off, bit);
686
687         /* Wait for up to 1 millisecond to acquire lock. */
688         for (i = 0; i < 100; i++) {
689                 status = tg3_ape_read32(tp, gnt + off);
690                 if (status == bit)
691                         break;
692                 udelay(10);
693         }
694
695         if (status != bit) {
696                 /* Revoke the lock request. */
697                 tg3_ape_write32(tp, gnt + off, bit);
698                 ret = -EBUSY;
699         }
700
701         return ret;
702 }
703
704 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
705 {
706         u32 gnt, bit;
707
708         if (!tg3_flag(tp, ENABLE_APE))
709                 return;
710
711         switch (locknum) {
712         case TG3_APE_LOCK_GPIO:
713                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
714                         return;
715         case TG3_APE_LOCK_GRC:
716         case TG3_APE_LOCK_MEM:
717                 if (!tp->pci_fn)
718                         bit = APE_LOCK_GRANT_DRIVER;
719                 else
720                         bit = 1 << tp->pci_fn;
721                 break;
722         default:
723                 return;
724         }
725
726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
727                 gnt = TG3_APE_LOCK_GRANT;
728         else
729                 gnt = TG3_APE_PER_LOCK_GRANT;
730
731         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
732 }
733
734 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
735 {
736         int i;
737         u32 apedata;
738
739         /* NCSI does not support APE events */
740         if (tg3_flag(tp, APE_HAS_NCSI))
741                 return;
742
743         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
744         if (apedata != APE_SEG_SIG_MAGIC)
745                 return;
746
747         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
748         if (!(apedata & APE_FW_STATUS_READY))
749                 return;
750
751         /* Wait for up to 1 millisecond for APE to service previous event. */
752         for (i = 0; i < 10; i++) {
753                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
754                         return;
755
756                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757
758                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
759                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
760                                         event | APE_EVENT_STATUS_EVENT_PENDING);
761
762                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763
764                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
765                         break;
766
767                 udelay(100);
768         }
769
770         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
771                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
772 }
773
774 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
775 {
776         u32 event;
777         u32 apedata;
778
779         if (!tg3_flag(tp, ENABLE_APE))
780                 return;
781
782         switch (kind) {
783         case RESET_KIND_INIT:
784                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
785                                 APE_HOST_SEG_SIG_MAGIC);
786                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
787                                 APE_HOST_SEG_LEN_MAGIC);
788                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
789                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
790                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
791                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
792                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
793                                 APE_HOST_BEHAV_NO_PHYLOCK);
794                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
795                                     TG3_APE_HOST_DRVR_STATE_START);
796
797                 event = APE_EVENT_STATUS_STATE_START;
798                 break;
799         case RESET_KIND_SHUTDOWN:
800                 /* With the interface we are currently using,
801                  * APE does not track driver state.  Wiping
802                  * out the HOST SEGMENT SIGNATURE forces
803                  * the APE to assume OS absent status.
804                  */
805                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
806
807                 if (device_may_wakeup(&tp->pdev->dev) &&
808                     tg3_flag(tp, WOL_ENABLE)) {
809                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
810                                             TG3_APE_HOST_WOL_SPEED_AUTO);
811                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
812                 } else
813                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
814
815                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
816
817                 event = APE_EVENT_STATUS_STATE_UNLOAD;
818                 break;
819         case RESET_KIND_SUSPEND:
820                 event = APE_EVENT_STATUS_STATE_SUSPEND;
821                 break;
822         default:
823                 return;
824         }
825
826         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
827
828         tg3_ape_send_event(tp, event);
829 }
830
831 static void tg3_disable_ints(struct tg3 *tp)
832 {
833         int i;
834
835         tw32(TG3PCI_MISC_HOST_CTRL,
836              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
837         for (i = 0; i < tp->irq_max; i++)
838                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
839 }
840
841 static void tg3_enable_ints(struct tg3 *tp)
842 {
843         int i;
844
845         tp->irq_sync = 0;
846         wmb();
847
848         tw32(TG3PCI_MISC_HOST_CTRL,
849              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
850
851         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
852         for (i = 0; i < tp->irq_cnt; i++) {
853                 struct tg3_napi *tnapi = &tp->napi[i];
854
855                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
856                 if (tg3_flag(tp, 1SHOT_MSI))
857                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
858
859                 tp->coal_now |= tnapi->coal_now;
860         }
861
862         /* Force an initial interrupt */
863         if (!tg3_flag(tp, TAGGED_STATUS) &&
864             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
865                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
866         else
867                 tw32(HOSTCC_MODE, tp->coal_now);
868
869         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
870 }
871
872 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
873 {
874         struct tg3 *tp = tnapi->tp;
875         struct tg3_hw_status *sblk = tnapi->hw_status;
876         unsigned int work_exists = 0;
877
878         /* check for phy events */
879         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
880                 if (sblk->status & SD_STATUS_LINK_CHG)
881                         work_exists = 1;
882         }
883
884         /* check for TX work to do */
885         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
886                 work_exists = 1;
887
888         /* check for RX work to do */
889         if (tnapi->rx_rcb_prod_idx &&
890             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
891                 work_exists = 1;
892
893         return work_exists;
894 }
895
896 /* tg3_int_reenable
897  *  similar to tg3_enable_ints, but it accurately determines whether there
898  *  is new work pending and can return without flushing the PIO write
899  *  which reenables interrupts
900  */
901 static void tg3_int_reenable(struct tg3_napi *tnapi)
902 {
903         struct tg3 *tp = tnapi->tp;
904
905         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
906         mmiowb();
907
908         /* When doing tagged status, this work check is unnecessary.
909          * The last_tag we write above tells the chip which piece of
910          * work we've completed.
911          */
912         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
913                 tw32(HOSTCC_MODE, tp->coalesce_mode |
914                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
915 }
916
917 static void tg3_switch_clocks(struct tg3 *tp)
918 {
919         u32 clock_ctrl;
920         u32 orig_clock_ctrl;
921
922         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
923                 return;
924
925         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
926
927         orig_clock_ctrl = clock_ctrl;
928         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
929                        CLOCK_CTRL_CLKRUN_OENABLE |
930                        0x1f);
931         tp->pci_clock_ctrl = clock_ctrl;
932
933         if (tg3_flag(tp, 5705_PLUS)) {
934                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
935                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
936                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
937                 }
938         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
939                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
940                             clock_ctrl |
941                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
942                             40);
943                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
944                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
945                             40);
946         }
947         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
948 }
949
950 #define PHY_BUSY_LOOPS  5000
951
952 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
953 {
954         u32 frame_val;
955         unsigned int loops;
956         int ret;
957
958         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
959                 tw32_f(MAC_MI_MODE,
960                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
961                 udelay(80);
962         }
963
964         *val = 0x0;
965
966         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
967                       MI_COM_PHY_ADDR_MASK);
968         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
969                       MI_COM_REG_ADDR_MASK);
970         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
971
972         tw32_f(MAC_MI_COM, frame_val);
973
974         loops = PHY_BUSY_LOOPS;
975         while (loops != 0) {
976                 udelay(10);
977                 frame_val = tr32(MAC_MI_COM);
978
979                 if ((frame_val & MI_COM_BUSY) == 0) {
980                         udelay(5);
981                         frame_val = tr32(MAC_MI_COM);
982                         break;
983                 }
984                 loops -= 1;
985         }
986
987         ret = -EBUSY;
988         if (loops != 0) {
989                 *val = frame_val & MI_COM_DATA_MASK;
990                 ret = 0;
991         }
992
993         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
994                 tw32_f(MAC_MI_MODE, tp->mi_mode);
995                 udelay(80);
996         }
997
998         return ret;
999 }
1000
1001 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1002 {
1003         u32 frame_val;
1004         unsigned int loops;
1005         int ret;
1006
1007         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1008             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1009                 return 0;
1010
1011         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1012                 tw32_f(MAC_MI_MODE,
1013                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1014                 udelay(80);
1015         }
1016
1017         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1018                       MI_COM_PHY_ADDR_MASK);
1019         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1020                       MI_COM_REG_ADDR_MASK);
1021         frame_val |= (val & MI_COM_DATA_MASK);
1022         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1023
1024         tw32_f(MAC_MI_COM, frame_val);
1025
1026         loops = PHY_BUSY_LOOPS;
1027         while (loops != 0) {
1028                 udelay(10);
1029                 frame_val = tr32(MAC_MI_COM);
1030                 if ((frame_val & MI_COM_BUSY) == 0) {
1031                         udelay(5);
1032                         frame_val = tr32(MAC_MI_COM);
1033                         break;
1034                 }
1035                 loops -= 1;
1036         }
1037
1038         ret = -EBUSY;
1039         if (loops != 0)
1040                 ret = 0;
1041
1042         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1043                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1044                 udelay(80);
1045         }
1046
1047         return ret;
1048 }
1049
1050 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1051 {
1052         int err;
1053
1054         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1055         if (err)
1056                 goto done;
1057
1058         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1059         if (err)
1060                 goto done;
1061
1062         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1063                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1064         if (err)
1065                 goto done;
1066
1067         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1068
1069 done:
1070         return err;
1071 }
1072
1073 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1074 {
1075         int err;
1076
1077         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1078         if (err)
1079                 goto done;
1080
1081         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1082         if (err)
1083                 goto done;
1084
1085         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1086                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1087         if (err)
1088                 goto done;
1089
1090         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1091
1092 done:
1093         return err;
1094 }
1095
1096 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1097 {
1098         int err;
1099
1100         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1101         if (!err)
1102                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1103
1104         return err;
1105 }
1106
1107 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1108 {
1109         int err;
1110
1111         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1112         if (!err)
1113                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1114
1115         return err;
1116 }
1117
1118 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1119 {
1120         int err;
1121
1122         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1123                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1124                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1125         if (!err)
1126                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1127
1128         return err;
1129 }
1130
1131 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1132 {
1133         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1134                 set |= MII_TG3_AUXCTL_MISC_WREN;
1135
1136         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1137 }
1138
1139 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1140         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1141                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1142                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1143
1144 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1145         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1146                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1147
1148 static int tg3_bmcr_reset(struct tg3 *tp)
1149 {
1150         u32 phy_control;
1151         int limit, err;
1152
1153         /* OK, reset it, and poll the BMCR_RESET bit until it
1154          * clears or we time out.
1155          */
1156         phy_control = BMCR_RESET;
1157         err = tg3_writephy(tp, MII_BMCR, phy_control);
1158         if (err != 0)
1159                 return -EBUSY;
1160
1161         limit = 5000;
1162         while (limit--) {
1163                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1164                 if (err != 0)
1165                         return -EBUSY;
1166
1167                 if ((phy_control & BMCR_RESET) == 0) {
1168                         udelay(40);
1169                         break;
1170                 }
1171                 udelay(10);
1172         }
1173         if (limit < 0)
1174                 return -EBUSY;
1175
1176         return 0;
1177 }
1178
1179 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1180 {
1181         struct tg3 *tp = bp->priv;
1182         u32 val;
1183
1184         spin_lock_bh(&tp->lock);
1185
1186         if (tg3_readphy(tp, reg, &val))
1187                 val = -EIO;
1188
1189         spin_unlock_bh(&tp->lock);
1190
1191         return val;
1192 }
1193
1194 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1195 {
1196         struct tg3 *tp = bp->priv;
1197         u32 ret = 0;
1198
1199         spin_lock_bh(&tp->lock);
1200
1201         if (tg3_writephy(tp, reg, val))
1202                 ret = -EIO;
1203
1204         spin_unlock_bh(&tp->lock);
1205
1206         return ret;
1207 }
1208
1209 static int tg3_mdio_reset(struct mii_bus *bp)
1210 {
1211         return 0;
1212 }
1213
1214 static void tg3_mdio_config_5785(struct tg3 *tp)
1215 {
1216         u32 val;
1217         struct phy_device *phydev;
1218
1219         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1220         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1221         case PHY_ID_BCM50610:
1222         case PHY_ID_BCM50610M:
1223                 val = MAC_PHYCFG2_50610_LED_MODES;
1224                 break;
1225         case PHY_ID_BCMAC131:
1226                 val = MAC_PHYCFG2_AC131_LED_MODES;
1227                 break;
1228         case PHY_ID_RTL8211C:
1229                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1230                 break;
1231         case PHY_ID_RTL8201E:
1232                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1233                 break;
1234         default:
1235                 return;
1236         }
1237
1238         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1239                 tw32(MAC_PHYCFG2, val);
1240
1241                 val = tr32(MAC_PHYCFG1);
1242                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1243                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1244                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1245                 tw32(MAC_PHYCFG1, val);
1246
1247                 return;
1248         }
1249
1250         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1251                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1252                        MAC_PHYCFG2_FMODE_MASK_MASK |
1253                        MAC_PHYCFG2_GMODE_MASK_MASK |
1254                        MAC_PHYCFG2_ACT_MASK_MASK   |
1255                        MAC_PHYCFG2_QUAL_MASK_MASK |
1256                        MAC_PHYCFG2_INBAND_ENABLE;
1257
1258         tw32(MAC_PHYCFG2, val);
1259
1260         val = tr32(MAC_PHYCFG1);
1261         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1262                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1263         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1264                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1265                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1266                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1267                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1268         }
1269         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1270                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1271         tw32(MAC_PHYCFG1, val);
1272
1273         val = tr32(MAC_EXT_RGMII_MODE);
1274         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1275                  MAC_RGMII_MODE_RX_QUALITY |
1276                  MAC_RGMII_MODE_RX_ACTIVITY |
1277                  MAC_RGMII_MODE_RX_ENG_DET |
1278                  MAC_RGMII_MODE_TX_ENABLE |
1279                  MAC_RGMII_MODE_TX_LOWPWR |
1280                  MAC_RGMII_MODE_TX_RESET);
1281         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1282                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1283                         val |= MAC_RGMII_MODE_RX_INT_B |
1284                                MAC_RGMII_MODE_RX_QUALITY |
1285                                MAC_RGMII_MODE_RX_ACTIVITY |
1286                                MAC_RGMII_MODE_RX_ENG_DET;
1287                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1288                         val |= MAC_RGMII_MODE_TX_ENABLE |
1289                                MAC_RGMII_MODE_TX_LOWPWR |
1290                                MAC_RGMII_MODE_TX_RESET;
1291         }
1292         tw32(MAC_EXT_RGMII_MODE, val);
1293 }
1294
1295 static void tg3_mdio_start(struct tg3 *tp)
1296 {
1297         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1298         tw32_f(MAC_MI_MODE, tp->mi_mode);
1299         udelay(80);
1300
1301         if (tg3_flag(tp, MDIOBUS_INITED) &&
1302             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1303                 tg3_mdio_config_5785(tp);
1304 }
1305
1306 static int tg3_mdio_init(struct tg3 *tp)
1307 {
1308         int i;
1309         u32 reg;
1310         struct phy_device *phydev;
1311
1312         if (tg3_flag(tp, 5717_PLUS)) {
1313                 u32 is_serdes;
1314
1315                 tp->phy_addr = tp->pci_fn + 1;
1316
1317                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1318                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1319                 else
1320                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1321                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1322                 if (is_serdes)
1323                         tp->phy_addr += 7;
1324         } else
1325                 tp->phy_addr = TG3_PHY_MII_ADDR;
1326
1327         tg3_mdio_start(tp);
1328
1329         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1330                 return 0;
1331
1332         tp->mdio_bus = mdiobus_alloc();
1333         if (tp->mdio_bus == NULL)
1334                 return -ENOMEM;
1335
1336         tp->mdio_bus->name     = "tg3 mdio bus";
1337         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1338                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1339         tp->mdio_bus->priv     = tp;
1340         tp->mdio_bus->parent   = &tp->pdev->dev;
1341         tp->mdio_bus->read     = &tg3_mdio_read;
1342         tp->mdio_bus->write    = &tg3_mdio_write;
1343         tp->mdio_bus->reset    = &tg3_mdio_reset;
1344         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1345         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1346
1347         for (i = 0; i < PHY_MAX_ADDR; i++)
1348                 tp->mdio_bus->irq[i] = PHY_POLL;
1349
1350         /* The bus registration will look for all the PHYs on the mdio bus.
1351          * Unfortunately, it does not ensure the PHY is powered up before
1352          * accessing the PHY ID registers.  A chip reset is the
1353          * quickest way to bring the device back to an operational state..
1354          */
1355         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1356                 tg3_bmcr_reset(tp);
1357
1358         i = mdiobus_register(tp->mdio_bus);
1359         if (i) {
1360                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1361                 mdiobus_free(tp->mdio_bus);
1362                 return i;
1363         }
1364
1365         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1366
1367         if (!phydev || !phydev->drv) {
1368                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1369                 mdiobus_unregister(tp->mdio_bus);
1370                 mdiobus_free(tp->mdio_bus);
1371                 return -ENODEV;
1372         }
1373
1374         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1375         case PHY_ID_BCM57780:
1376                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1377                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1378                 break;
1379         case PHY_ID_BCM50610:
1380         case PHY_ID_BCM50610M:
1381                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1382                                      PHY_BRCM_RX_REFCLK_UNUSED |
1383                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1384                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1385                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1386                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1387                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1388                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1389                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1390                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1391                 /* fallthru */
1392         case PHY_ID_RTL8211C:
1393                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1394                 break;
1395         case PHY_ID_RTL8201E:
1396         case PHY_ID_BCMAC131:
1397                 phydev->interface = PHY_INTERFACE_MODE_MII;
1398                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1399                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1400                 break;
1401         }
1402
1403         tg3_flag_set(tp, MDIOBUS_INITED);
1404
1405         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1406                 tg3_mdio_config_5785(tp);
1407
1408         return 0;
1409 }
1410
1411 static void tg3_mdio_fini(struct tg3 *tp)
1412 {
1413         if (tg3_flag(tp, MDIOBUS_INITED)) {
1414                 tg3_flag_clear(tp, MDIOBUS_INITED);
1415                 mdiobus_unregister(tp->mdio_bus);
1416                 mdiobus_free(tp->mdio_bus);
1417         }
1418 }
1419
1420 /* tp->lock is held. */
1421 static inline void tg3_generate_fw_event(struct tg3 *tp)
1422 {
1423         u32 val;
1424
1425         val = tr32(GRC_RX_CPU_EVENT);
1426         val |= GRC_RX_CPU_DRIVER_EVENT;
1427         tw32_f(GRC_RX_CPU_EVENT, val);
1428
1429         tp->last_event_jiffies = jiffies;
1430 }
1431
1432 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1433
1434 /* tp->lock is held. */
1435 static void tg3_wait_for_event_ack(struct tg3 *tp)
1436 {
1437         int i;
1438         unsigned int delay_cnt;
1439         long time_remain;
1440
1441         /* If enough time has passed, no wait is necessary. */
1442         time_remain = (long)(tp->last_event_jiffies + 1 +
1443                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1444                       (long)jiffies;
1445         if (time_remain < 0)
1446                 return;
1447
1448         /* Check if we can shorten the wait time. */
1449         delay_cnt = jiffies_to_usecs(time_remain);
1450         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1451                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1452         delay_cnt = (delay_cnt >> 3) + 1;
1453
1454         for (i = 0; i < delay_cnt; i++) {
1455                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1456                         break;
1457                 udelay(8);
1458         }
1459 }
1460
1461 /* tp->lock is held. */
1462 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1463 {
1464         u32 reg, val;
1465
1466         val = 0;
1467         if (!tg3_readphy(tp, MII_BMCR, &reg))
1468                 val = reg << 16;
1469         if (!tg3_readphy(tp, MII_BMSR, &reg))
1470                 val |= (reg & 0xffff);
1471         *data++ = val;
1472
1473         val = 0;
1474         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1475                 val = reg << 16;
1476         if (!tg3_readphy(tp, MII_LPA, &reg))
1477                 val |= (reg & 0xffff);
1478         *data++ = val;
1479
1480         val = 0;
1481         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1482                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1483                         val = reg << 16;
1484                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1485                         val |= (reg & 0xffff);
1486         }
1487         *data++ = val;
1488
1489         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1490                 val = reg << 16;
1491         else
1492                 val = 0;
1493         *data++ = val;
1494 }
1495
1496 /* tp->lock is held. */
1497 static void tg3_ump_link_report(struct tg3 *tp)
1498 {
1499         u32 data[4];
1500
1501         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1502                 return;
1503
1504         tg3_phy_gather_ump_data(tp, data);
1505
1506         tg3_wait_for_event_ack(tp);
1507
1508         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1509         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1510         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1511         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1512         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1513         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1514
1515         tg3_generate_fw_event(tp);
1516 }
1517
1518 /* tp->lock is held. */
1519 static void tg3_stop_fw(struct tg3 *tp)
1520 {
1521         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1522                 /* Wait for RX cpu to ACK the previous event. */
1523                 tg3_wait_for_event_ack(tp);
1524
1525                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1526
1527                 tg3_generate_fw_event(tp);
1528
1529                 /* Wait for RX cpu to ACK this event. */
1530                 tg3_wait_for_event_ack(tp);
1531         }
1532 }
1533
1534 /* tp->lock is held. */
1535 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1536 {
1537         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1538                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1539
1540         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1541                 switch (kind) {
1542                 case RESET_KIND_INIT:
1543                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1544                                       DRV_STATE_START);
1545                         break;
1546
1547                 case RESET_KIND_SHUTDOWN:
1548                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1549                                       DRV_STATE_UNLOAD);
1550                         break;
1551
1552                 case RESET_KIND_SUSPEND:
1553                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1554                                       DRV_STATE_SUSPEND);
1555                         break;
1556
1557                 default:
1558                         break;
1559                 }
1560         }
1561
1562         if (kind == RESET_KIND_INIT ||
1563             kind == RESET_KIND_SUSPEND)
1564                 tg3_ape_driver_state_change(tp, kind);
1565 }
1566
1567 /* tp->lock is held. */
1568 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1569 {
1570         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1571                 switch (kind) {
1572                 case RESET_KIND_INIT:
1573                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1574                                       DRV_STATE_START_DONE);
1575                         break;
1576
1577                 case RESET_KIND_SHUTDOWN:
1578                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1579                                       DRV_STATE_UNLOAD_DONE);
1580                         break;
1581
1582                 default:
1583                         break;
1584                 }
1585         }
1586
1587         if (kind == RESET_KIND_SHUTDOWN)
1588                 tg3_ape_driver_state_change(tp, kind);
1589 }
1590
1591 /* tp->lock is held. */
1592 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1593 {
1594         if (tg3_flag(tp, ENABLE_ASF)) {
1595                 switch (kind) {
1596                 case RESET_KIND_INIT:
1597                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1598                                       DRV_STATE_START);
1599                         break;
1600
1601                 case RESET_KIND_SHUTDOWN:
1602                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1603                                       DRV_STATE_UNLOAD);
1604                         break;
1605
1606                 case RESET_KIND_SUSPEND:
1607                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1608                                       DRV_STATE_SUSPEND);
1609                         break;
1610
1611                 default:
1612                         break;
1613                 }
1614         }
1615 }
1616
1617 static int tg3_poll_fw(struct tg3 *tp)
1618 {
1619         int i;
1620         u32 val;
1621
1622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1623                 /* Wait up to 20ms for init done. */
1624                 for (i = 0; i < 200; i++) {
1625                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1626                                 return 0;
1627                         udelay(100);
1628                 }
1629                 return -ENODEV;
1630         }
1631
1632         /* Wait for firmware initialization to complete. */
1633         for (i = 0; i < 100000; i++) {
1634                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1635                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1636                         break;
1637                 udelay(10);
1638         }
1639
1640         /* Chip might not be fitted with firmware.  Some Sun onboard
1641          * parts are configured like that.  So don't signal the timeout
1642          * of the above loop as an error, but do report the lack of
1643          * running firmware once.
1644          */
1645         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1646                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1647
1648                 netdev_info(tp->dev, "No firmware running\n");
1649         }
1650
1651         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1652                 /* The 57765 A0 needs a little more
1653                  * time to do some important work.
1654                  */
1655                 mdelay(10);
1656         }
1657
1658         return 0;
1659 }
1660
1661 static void tg3_link_report(struct tg3 *tp)
1662 {
1663         if (!netif_carrier_ok(tp->dev)) {
1664                 netif_info(tp, link, tp->dev, "Link is down\n");
1665                 tg3_ump_link_report(tp);
1666         } else if (netif_msg_link(tp)) {
1667                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1668                             (tp->link_config.active_speed == SPEED_1000 ?
1669                              1000 :
1670                              (tp->link_config.active_speed == SPEED_100 ?
1671                               100 : 10)),
1672                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1673                              "full" : "half"));
1674
1675                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1676                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1677                             "on" : "off",
1678                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1679                             "on" : "off");
1680
1681                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1682                         netdev_info(tp->dev, "EEE is %s\n",
1683                                     tp->setlpicnt ? "enabled" : "disabled");
1684
1685                 tg3_ump_link_report(tp);
1686         }
1687 }
1688
1689 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1690 {
1691         u16 miireg;
1692
1693         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1694                 miireg = ADVERTISE_1000XPAUSE;
1695         else if (flow_ctrl & FLOW_CTRL_TX)
1696                 miireg = ADVERTISE_1000XPSE_ASYM;
1697         else if (flow_ctrl & FLOW_CTRL_RX)
1698                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1699         else
1700                 miireg = 0;
1701
1702         return miireg;
1703 }
1704
1705 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1706 {
1707         u8 cap = 0;
1708
1709         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1710                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1711         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1712                 if (lcladv & ADVERTISE_1000XPAUSE)
1713                         cap = FLOW_CTRL_RX;
1714                 if (rmtadv & ADVERTISE_1000XPAUSE)
1715                         cap = FLOW_CTRL_TX;
1716         }
1717
1718         return cap;
1719 }
1720
1721 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1722 {
1723         u8 autoneg;
1724         u8 flowctrl = 0;
1725         u32 old_rx_mode = tp->rx_mode;
1726         u32 old_tx_mode = tp->tx_mode;
1727
1728         if (tg3_flag(tp, USE_PHYLIB))
1729                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1730         else
1731                 autoneg = tp->link_config.autoneg;
1732
1733         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1734                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1735                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1736                 else
1737                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1738         } else
1739                 flowctrl = tp->link_config.flowctrl;
1740
1741         tp->link_config.active_flowctrl = flowctrl;
1742
1743         if (flowctrl & FLOW_CTRL_RX)
1744                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1745         else
1746                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1747
1748         if (old_rx_mode != tp->rx_mode)
1749                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1750
1751         if (flowctrl & FLOW_CTRL_TX)
1752                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1753         else
1754                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1755
1756         if (old_tx_mode != tp->tx_mode)
1757                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1758 }
1759
1760 static void tg3_adjust_link(struct net_device *dev)
1761 {
1762         u8 oldflowctrl, linkmesg = 0;
1763         u32 mac_mode, lcl_adv, rmt_adv;
1764         struct tg3 *tp = netdev_priv(dev);
1765         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1766
1767         spin_lock_bh(&tp->lock);
1768
1769         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1770                                     MAC_MODE_HALF_DUPLEX);
1771
1772         oldflowctrl = tp->link_config.active_flowctrl;
1773
1774         if (phydev->link) {
1775                 lcl_adv = 0;
1776                 rmt_adv = 0;
1777
1778                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1779                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1780                 else if (phydev->speed == SPEED_1000 ||
1781                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1782                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1783                 else
1784                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1785
1786                 if (phydev->duplex == DUPLEX_HALF)
1787                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1788                 else {
1789                         lcl_adv = mii_advertise_flowctrl(
1790                                   tp->link_config.flowctrl);
1791
1792                         if (phydev->pause)
1793                                 rmt_adv = LPA_PAUSE_CAP;
1794                         if (phydev->asym_pause)
1795                                 rmt_adv |= LPA_PAUSE_ASYM;
1796                 }
1797
1798                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1799         } else
1800                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1801
1802         if (mac_mode != tp->mac_mode) {
1803                 tp->mac_mode = mac_mode;
1804                 tw32_f(MAC_MODE, tp->mac_mode);
1805                 udelay(40);
1806         }
1807
1808         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1809                 if (phydev->speed == SPEED_10)
1810                         tw32(MAC_MI_STAT,
1811                              MAC_MI_STAT_10MBPS_MODE |
1812                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1813                 else
1814                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1815         }
1816
1817         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1818                 tw32(MAC_TX_LENGTHS,
1819                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1820                       (6 << TX_LENGTHS_IPG_SHIFT) |
1821                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1822         else
1823                 tw32(MAC_TX_LENGTHS,
1824                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1825                       (6 << TX_LENGTHS_IPG_SHIFT) |
1826                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1827
1828         if (phydev->link != tp->old_link ||
1829             phydev->speed != tp->link_config.active_speed ||
1830             phydev->duplex != tp->link_config.active_duplex ||
1831             oldflowctrl != tp->link_config.active_flowctrl)
1832                 linkmesg = 1;
1833
1834         tp->old_link = phydev->link;
1835         tp->link_config.active_speed = phydev->speed;
1836         tp->link_config.active_duplex = phydev->duplex;
1837
1838         spin_unlock_bh(&tp->lock);
1839
1840         if (linkmesg)
1841                 tg3_link_report(tp);
1842 }
1843
1844 static int tg3_phy_init(struct tg3 *tp)
1845 {
1846         struct phy_device *phydev;
1847
1848         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1849                 return 0;
1850
1851         /* Bring the PHY back to a known state. */
1852         tg3_bmcr_reset(tp);
1853
1854         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1855
1856         /* Attach the MAC to the PHY. */
1857         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1858                              phydev->dev_flags, phydev->interface);
1859         if (IS_ERR(phydev)) {
1860                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1861                 return PTR_ERR(phydev);
1862         }
1863
1864         /* Mask with MAC supported features. */
1865         switch (phydev->interface) {
1866         case PHY_INTERFACE_MODE_GMII:
1867         case PHY_INTERFACE_MODE_RGMII:
1868                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1869                         phydev->supported &= (PHY_GBIT_FEATURES |
1870                                               SUPPORTED_Pause |
1871                                               SUPPORTED_Asym_Pause);
1872                         break;
1873                 }
1874                 /* fallthru */
1875         case PHY_INTERFACE_MODE_MII:
1876                 phydev->supported &= (PHY_BASIC_FEATURES |
1877                                       SUPPORTED_Pause |
1878                                       SUPPORTED_Asym_Pause);
1879                 break;
1880         default:
1881                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1882                 return -EINVAL;
1883         }
1884
1885         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1886
1887         phydev->advertising = phydev->supported;
1888
1889         return 0;
1890 }
1891
1892 static void tg3_phy_start(struct tg3 *tp)
1893 {
1894         struct phy_device *phydev;
1895
1896         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1897                 return;
1898
1899         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1900
1901         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1902                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1903                 phydev->speed = tp->link_config.speed;
1904                 phydev->duplex = tp->link_config.duplex;
1905                 phydev->autoneg = tp->link_config.autoneg;
1906                 phydev->advertising = tp->link_config.advertising;
1907         }
1908
1909         phy_start(phydev);
1910
1911         phy_start_aneg(phydev);
1912 }
1913
1914 static void tg3_phy_stop(struct tg3 *tp)
1915 {
1916         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1917                 return;
1918
1919         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1920 }
1921
1922 static void tg3_phy_fini(struct tg3 *tp)
1923 {
1924         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1925                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1926                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1927         }
1928 }
1929
1930 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1931 {
1932         int err;
1933         u32 val;
1934
1935         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1936                 return 0;
1937
1938         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1939                 /* Cannot do read-modify-write on 5401 */
1940                 err = tg3_phy_auxctl_write(tp,
1941                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1942                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1943                                            0x4c20);
1944                 goto done;
1945         }
1946
1947         err = tg3_phy_auxctl_read(tp,
1948                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1949         if (err)
1950                 return err;
1951
1952         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1953         err = tg3_phy_auxctl_write(tp,
1954                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1955
1956 done:
1957         return err;
1958 }
1959
1960 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1961 {
1962         u32 phytest;
1963
1964         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1965                 u32 phy;
1966
1967                 tg3_writephy(tp, MII_TG3_FET_TEST,
1968                              phytest | MII_TG3_FET_SHADOW_EN);
1969                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1970                         if (enable)
1971                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1972                         else
1973                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1974                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1975                 }
1976                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1977         }
1978 }
1979
1980 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1981 {
1982         u32 reg;
1983
1984         if (!tg3_flag(tp, 5705_PLUS) ||
1985             (tg3_flag(tp, 5717_PLUS) &&
1986              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1987                 return;
1988
1989         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1990                 tg3_phy_fet_toggle_apd(tp, enable);
1991                 return;
1992         }
1993
1994         reg = MII_TG3_MISC_SHDW_WREN |
1995               MII_TG3_MISC_SHDW_SCR5_SEL |
1996               MII_TG3_MISC_SHDW_SCR5_LPED |
1997               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1998               MII_TG3_MISC_SHDW_SCR5_SDTL |
1999               MII_TG3_MISC_SHDW_SCR5_C125OE;
2000         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2001                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2002
2003         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2004
2005
2006         reg = MII_TG3_MISC_SHDW_WREN |
2007               MII_TG3_MISC_SHDW_APD_SEL |
2008               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2009         if (enable)
2010                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2011
2012         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2013 }
2014
2015 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2016 {
2017         u32 phy;
2018
2019         if (!tg3_flag(tp, 5705_PLUS) ||
2020             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2021                 return;
2022
2023         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2024                 u32 ephy;
2025
2026                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2027                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2028
2029                         tg3_writephy(tp, MII_TG3_FET_TEST,
2030                                      ephy | MII_TG3_FET_SHADOW_EN);
2031                         if (!tg3_readphy(tp, reg, &phy)) {
2032                                 if (enable)
2033                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2034                                 else
2035                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2036                                 tg3_writephy(tp, reg, phy);
2037                         }
2038                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2039                 }
2040         } else {
2041                 int ret;
2042
2043                 ret = tg3_phy_auxctl_read(tp,
2044                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2045                 if (!ret) {
2046                         if (enable)
2047                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2048                         else
2049                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2050                         tg3_phy_auxctl_write(tp,
2051                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2052                 }
2053         }
2054 }
2055
2056 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2057 {
2058         int ret;
2059         u32 val;
2060
2061         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2062                 return;
2063
2064         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2065         if (!ret)
2066                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2067                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2068 }
2069
2070 static void tg3_phy_apply_otp(struct tg3 *tp)
2071 {
2072         u32 otp, phy;
2073
2074         if (!tp->phy_otp)
2075                 return;
2076
2077         otp = tp->phy_otp;
2078
2079         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2080                 return;
2081
2082         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2083         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2084         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2085
2086         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2087               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2088         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2089
2090         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2091         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2092         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2093
2094         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2095         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2096
2097         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2098         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2099
2100         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2101               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2102         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2103
2104         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2105 }
2106
2107 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2108 {
2109         u32 val;
2110
2111         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2112                 return;
2113
2114         tp->setlpicnt = 0;
2115
2116         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2117             current_link_up == 1 &&
2118             tp->link_config.active_duplex == DUPLEX_FULL &&
2119             (tp->link_config.active_speed == SPEED_100 ||
2120              tp->link_config.active_speed == SPEED_1000)) {
2121                 u32 eeectl;
2122
2123                 if (tp->link_config.active_speed == SPEED_1000)
2124                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2125                 else
2126                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2127
2128                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2129
2130                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2131                                   TG3_CL45_D7_EEERES_STAT, &val);
2132
2133                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2134                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2135                         tp->setlpicnt = 2;
2136         }
2137
2138         if (!tp->setlpicnt) {
2139                 if (current_link_up == 1 &&
2140                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2141                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2142                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2143                 }
2144
2145                 val = tr32(TG3_CPMU_EEE_MODE);
2146                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2147         }
2148 }
2149
2150 static void tg3_phy_eee_enable(struct tg3 *tp)
2151 {
2152         u32 val;
2153
2154         if (tp->link_config.active_speed == SPEED_1000 &&
2155             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2156              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2157              tg3_flag(tp, 57765_CLASS)) &&
2158             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2159                 val = MII_TG3_DSP_TAP26_ALNOKO |
2160                       MII_TG3_DSP_TAP26_RMRXSTO;
2161                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2162                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2163         }
2164
2165         val = tr32(TG3_CPMU_EEE_MODE);
2166         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2167 }
2168
2169 static int tg3_wait_macro_done(struct tg3 *tp)
2170 {
2171         int limit = 100;
2172
2173         while (limit--) {
2174                 u32 tmp32;
2175
2176                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2177                         if ((tmp32 & 0x1000) == 0)
2178                                 break;
2179                 }
2180         }
2181         if (limit < 0)
2182                 return -EBUSY;
2183
2184         return 0;
2185 }
2186
2187 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2188 {
2189         static const u32 test_pat[4][6] = {
2190         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2191         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2192         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2193         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2194         };
2195         int chan;
2196
2197         for (chan = 0; chan < 4; chan++) {
2198                 int i;
2199
2200                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2201                              (chan * 0x2000) | 0x0200);
2202                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2203
2204                 for (i = 0; i < 6; i++)
2205                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2206                                      test_pat[chan][i]);
2207
2208                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2209                 if (tg3_wait_macro_done(tp)) {
2210                         *resetp = 1;
2211                         return -EBUSY;
2212                 }
2213
2214                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2215                              (chan * 0x2000) | 0x0200);
2216                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2217                 if (tg3_wait_macro_done(tp)) {
2218                         *resetp = 1;
2219                         return -EBUSY;
2220                 }
2221
2222                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2223                 if (tg3_wait_macro_done(tp)) {
2224                         *resetp = 1;
2225                         return -EBUSY;
2226                 }
2227
2228                 for (i = 0; i < 6; i += 2) {
2229                         u32 low, high;
2230
2231                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2232                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2233                             tg3_wait_macro_done(tp)) {
2234                                 *resetp = 1;
2235                                 return -EBUSY;
2236                         }
2237                         low &= 0x7fff;
2238                         high &= 0x000f;
2239                         if (low != test_pat[chan][i] ||
2240                             high != test_pat[chan][i+1]) {
2241                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2242                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2243                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2244
2245                                 return -EBUSY;
2246                         }
2247                 }
2248         }
2249
2250         return 0;
2251 }
2252
2253 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2254 {
2255         int chan;
2256
2257         for (chan = 0; chan < 4; chan++) {
2258                 int i;
2259
2260                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2261                              (chan * 0x2000) | 0x0200);
2262                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2263                 for (i = 0; i < 6; i++)
2264                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2265                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2266                 if (tg3_wait_macro_done(tp))
2267                         return -EBUSY;
2268         }
2269
2270         return 0;
2271 }
2272
2273 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2274 {
2275         u32 reg32, phy9_orig;
2276         int retries, do_phy_reset, err;
2277
2278         retries = 10;
2279         do_phy_reset = 1;
2280         do {
2281                 if (do_phy_reset) {
2282                         err = tg3_bmcr_reset(tp);
2283                         if (err)
2284                                 return err;
2285                         do_phy_reset = 0;
2286                 }
2287
2288                 /* Disable transmitter and interrupt.  */
2289                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2290                         continue;
2291
2292                 reg32 |= 0x3000;
2293                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2294
2295                 /* Set full-duplex, 1000 mbps.  */
2296                 tg3_writephy(tp, MII_BMCR,
2297                              BMCR_FULLDPLX | BMCR_SPEED1000);
2298
2299                 /* Set to master mode.  */
2300                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2301                         continue;
2302
2303                 tg3_writephy(tp, MII_CTRL1000,
2304                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2305
2306                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2307                 if (err)
2308                         return err;
2309
2310                 /* Block the PHY control access.  */
2311                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2312
2313                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2314                 if (!err)
2315                         break;
2316         } while (--retries);
2317
2318         err = tg3_phy_reset_chanpat(tp);
2319         if (err)
2320                 return err;
2321
2322         tg3_phydsp_write(tp, 0x8005, 0x0000);
2323
2324         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2325         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2326
2327         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2328
2329         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2330
2331         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2332                 reg32 &= ~0x3000;
2333                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2334         } else if (!err)
2335                 err = -EBUSY;
2336
2337         return err;
2338 }
2339
2340 /* This will reset the tigon3 PHY if there is no valid
2341  * link unless the FORCE argument is non-zero.
2342  */
2343 static int tg3_phy_reset(struct tg3 *tp)
2344 {
2345         u32 val, cpmuctrl;
2346         int err;
2347
2348         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2349                 val = tr32(GRC_MISC_CFG);
2350                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2351                 udelay(40);
2352         }
2353         err  = tg3_readphy(tp, MII_BMSR, &val);
2354         err |= tg3_readphy(tp, MII_BMSR, &val);
2355         if (err != 0)
2356                 return -EBUSY;
2357
2358         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2359                 netif_carrier_off(tp->dev);
2360                 tg3_link_report(tp);
2361         }
2362
2363         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2364             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2365             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2366                 err = tg3_phy_reset_5703_4_5(tp);
2367                 if (err)
2368                         return err;
2369                 goto out;
2370         }
2371
2372         cpmuctrl = 0;
2373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2374             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2375                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2376                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2377                         tw32(TG3_CPMU_CTRL,
2378                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2379         }
2380
2381         err = tg3_bmcr_reset(tp);
2382         if (err)
2383                 return err;
2384
2385         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2386                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2387                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2388
2389                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2390         }
2391
2392         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2393             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2394                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2395                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2396                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2397                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2398                         udelay(40);
2399                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2400                 }
2401         }
2402
2403         if (tg3_flag(tp, 5717_PLUS) &&
2404             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2405                 return 0;
2406
2407         tg3_phy_apply_otp(tp);
2408
2409         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2410                 tg3_phy_toggle_apd(tp, true);
2411         else
2412                 tg3_phy_toggle_apd(tp, false);
2413
2414 out:
2415         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2416             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2417                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2418                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2419                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2420         }
2421
2422         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2423                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2424                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2425         }
2426
2427         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2428                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2429                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2430                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2431                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2432                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2433                 }
2434         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2435                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2436                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2437                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2438                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2439                                 tg3_writephy(tp, MII_TG3_TEST1,
2440                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2441                         } else
2442                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2443
2444                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2445                 }
2446         }
2447
2448         /* Set Extended packet length bit (bit 14) on all chips that */
2449         /* support jumbo frames */
2450         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2451                 /* Cannot do read-modify-write on 5401 */
2452                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2453         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2454                 /* Set bit 14 with read-modify-write to preserve other bits */
2455                 err = tg3_phy_auxctl_read(tp,
2456                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2457                 if (!err)
2458                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2459                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2460         }
2461
2462         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2463          * jumbo frames transmission.
2464          */
2465         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2466                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2467                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2468                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2469         }
2470
2471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2472                 /* adjust output voltage */
2473                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2474         }
2475
2476         tg3_phy_toggle_automdix(tp, 1);
2477         tg3_phy_set_wirespeed(tp);
2478         return 0;
2479 }
2480
2481 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2482 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2483 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2484                                           TG3_GPIO_MSG_NEED_VAUX)
2485 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2486         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2487          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2488          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2489          (TG3_GPIO_MSG_DRVR_PRES << 12))
2490
2491 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2492         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2493          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2494          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2495          (TG3_GPIO_MSG_NEED_VAUX << 12))
2496
2497 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2498 {
2499         u32 status, shift;
2500
2501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2502             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2503                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2504         else
2505                 status = tr32(TG3_CPMU_DRV_STATUS);
2506
2507         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2508         status &= ~(TG3_GPIO_MSG_MASK << shift);
2509         status |= (newstat << shift);
2510
2511         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2512             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2513                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2514         else
2515                 tw32(TG3_CPMU_DRV_STATUS, status);
2516
2517         return status >> TG3_APE_GPIO_MSG_SHIFT;
2518 }
2519
2520 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2521 {
2522         if (!tg3_flag(tp, IS_NIC))
2523                 return 0;
2524
2525         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2526             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2527             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2528                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2529                         return -EIO;
2530
2531                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2532
2533                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2534                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2535
2536                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2537         } else {
2538                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2539                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2540         }
2541
2542         return 0;
2543 }
2544
2545 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2546 {
2547         u32 grc_local_ctrl;
2548
2549         if (!tg3_flag(tp, IS_NIC) ||
2550             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2551             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2552                 return;
2553
2554         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2555
2556         tw32_wait_f(GRC_LOCAL_CTRL,
2557                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2558                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2559
2560         tw32_wait_f(GRC_LOCAL_CTRL,
2561                     grc_local_ctrl,
2562                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2563
2564         tw32_wait_f(GRC_LOCAL_CTRL,
2565                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2566                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2567 }
2568
2569 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2570 {
2571         if (!tg3_flag(tp, IS_NIC))
2572                 return;
2573
2574         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2575             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2576                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2577                             (GRC_LCLCTRL_GPIO_OE0 |
2578                              GRC_LCLCTRL_GPIO_OE1 |
2579                              GRC_LCLCTRL_GPIO_OE2 |
2580                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2581                              GRC_LCLCTRL_GPIO_OUTPUT1),
2582                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2583         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2584                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2585                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2586                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2587                                      GRC_LCLCTRL_GPIO_OE1 |
2588                                      GRC_LCLCTRL_GPIO_OE2 |
2589                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2590                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2591                                      tp->grc_local_ctrl;
2592                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2593                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2594
2595                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2596                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2597                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2598
2599                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2600                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2601                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2602         } else {
2603                 u32 no_gpio2;
2604                 u32 grc_local_ctrl = 0;
2605
2606                 /* Workaround to prevent overdrawing Amps. */
2607                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2608                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2609                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2610                                     grc_local_ctrl,
2611                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2612                 }
2613
2614                 /* On 5753 and variants, GPIO2 cannot be used. */
2615                 no_gpio2 = tp->nic_sram_data_cfg &
2616                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2617
2618                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2619                                   GRC_LCLCTRL_GPIO_OE1 |
2620                                   GRC_LCLCTRL_GPIO_OE2 |
2621                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2622                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2623                 if (no_gpio2) {
2624                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2625                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2626                 }
2627                 tw32_wait_f(GRC_LOCAL_CTRL,
2628                             tp->grc_local_ctrl | grc_local_ctrl,
2629                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2630
2631                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2632
2633                 tw32_wait_f(GRC_LOCAL_CTRL,
2634                             tp->grc_local_ctrl | grc_local_ctrl,
2635                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2636
2637                 if (!no_gpio2) {
2638                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2639                         tw32_wait_f(GRC_LOCAL_CTRL,
2640                                     tp->grc_local_ctrl | grc_local_ctrl,
2641                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2642                 }
2643         }
2644 }
2645
2646 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2647 {
2648         u32 msg = 0;
2649
2650         /* Serialize power state transitions */
2651         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2652                 return;
2653
2654         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2655                 msg = TG3_GPIO_MSG_NEED_VAUX;
2656
2657         msg = tg3_set_function_status(tp, msg);
2658
2659         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2660                 goto done;
2661
2662         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2663                 tg3_pwrsrc_switch_to_vaux(tp);
2664         else
2665                 tg3_pwrsrc_die_with_vmain(tp);
2666
2667 done:
2668         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2669 }
2670
2671 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2672 {
2673         bool need_vaux = false;
2674
2675         /* The GPIOs do something completely different on 57765. */
2676         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2677                 return;
2678
2679         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2680             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2682                 tg3_frob_aux_power_5717(tp, include_wol ?
2683                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2684                 return;
2685         }
2686
2687         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2688                 struct net_device *dev_peer;
2689
2690                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2691
2692                 /* remove_one() may have been run on the peer. */
2693                 if (dev_peer) {
2694                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2695
2696                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2697                                 return;
2698
2699                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2700                             tg3_flag(tp_peer, ENABLE_ASF))
2701                                 need_vaux = true;
2702                 }
2703         }
2704
2705         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2706             tg3_flag(tp, ENABLE_ASF))
2707                 need_vaux = true;
2708
2709         if (need_vaux)
2710                 tg3_pwrsrc_switch_to_vaux(tp);
2711         else
2712                 tg3_pwrsrc_die_with_vmain(tp);
2713 }
2714
2715 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2716 {
2717         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2718                 return 1;
2719         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2720                 if (speed != SPEED_10)
2721                         return 1;
2722         } else if (speed == SPEED_10)
2723                 return 1;
2724
2725         return 0;
2726 }
2727
2728 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2729 {
2730         u32 val;
2731
2732         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2733                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2734                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2735                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2736
2737                         sg_dig_ctrl |=
2738                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2739                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2740                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2741                 }
2742                 return;
2743         }
2744
2745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2746                 tg3_bmcr_reset(tp);
2747                 val = tr32(GRC_MISC_CFG);
2748                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2749                 udelay(40);
2750                 return;
2751         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2752                 u32 phytest;
2753                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2754                         u32 phy;
2755
2756                         tg3_writephy(tp, MII_ADVERTISE, 0);
2757                         tg3_writephy(tp, MII_BMCR,
2758                                      BMCR_ANENABLE | BMCR_ANRESTART);
2759
2760                         tg3_writephy(tp, MII_TG3_FET_TEST,
2761                                      phytest | MII_TG3_FET_SHADOW_EN);
2762                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2763                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2764                                 tg3_writephy(tp,
2765                                              MII_TG3_FET_SHDW_AUXMODE4,
2766                                              phy);
2767                         }
2768                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2769                 }
2770                 return;
2771         } else if (do_low_power) {
2772                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2773                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2774
2775                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2776                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2777                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2778                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2779         }
2780
2781         /* The PHY should not be powered down on some chips because
2782          * of bugs.
2783          */
2784         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2785             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2786             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2787              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2788             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2789              !tp->pci_fn))
2790                 return;
2791
2792         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2793             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2794                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2795                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2796                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2797                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2798         }
2799
2800         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2801 }
2802
2803 /* tp->lock is held. */
2804 static int tg3_nvram_lock(struct tg3 *tp)
2805 {
2806         if (tg3_flag(tp, NVRAM)) {
2807                 int i;
2808
2809                 if (tp->nvram_lock_cnt == 0) {
2810                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2811                         for (i = 0; i < 8000; i++) {
2812                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2813                                         break;
2814                                 udelay(20);
2815                         }
2816                         if (i == 8000) {
2817                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2818                                 return -ENODEV;
2819                         }
2820                 }
2821                 tp->nvram_lock_cnt++;
2822         }
2823         return 0;
2824 }
2825
2826 /* tp->lock is held. */
2827 static void tg3_nvram_unlock(struct tg3 *tp)
2828 {
2829         if (tg3_flag(tp, NVRAM)) {
2830                 if (tp->nvram_lock_cnt > 0)
2831                         tp->nvram_lock_cnt--;
2832                 if (tp->nvram_lock_cnt == 0)
2833                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2834         }
2835 }
2836
2837 /* tp->lock is held. */
2838 static void tg3_enable_nvram_access(struct tg3 *tp)
2839 {
2840         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2841                 u32 nvaccess = tr32(NVRAM_ACCESS);
2842
2843                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2844         }
2845 }
2846
2847 /* tp->lock is held. */
2848 static void tg3_disable_nvram_access(struct tg3 *tp)
2849 {
2850         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2851                 u32 nvaccess = tr32(NVRAM_ACCESS);
2852
2853                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2854         }
2855 }
2856
2857 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2858                                         u32 offset, u32 *val)
2859 {
2860         u32 tmp;
2861         int i;
2862
2863         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2864                 return -EINVAL;
2865
2866         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2867                                         EEPROM_ADDR_DEVID_MASK |
2868                                         EEPROM_ADDR_READ);
2869         tw32(GRC_EEPROM_ADDR,
2870              tmp |
2871              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2872              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2873               EEPROM_ADDR_ADDR_MASK) |
2874              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2875
2876         for (i = 0; i < 1000; i++) {
2877                 tmp = tr32(GRC_EEPROM_ADDR);
2878
2879                 if (tmp & EEPROM_ADDR_COMPLETE)
2880                         break;
2881                 msleep(1);
2882         }
2883         if (!(tmp & EEPROM_ADDR_COMPLETE))
2884                 return -EBUSY;
2885
2886         tmp = tr32(GRC_EEPROM_DATA);
2887
2888         /*
2889          * The data will always be opposite the native endian
2890          * format.  Perform a blind byteswap to compensate.
2891          */
2892         *val = swab32(tmp);
2893
2894         return 0;
2895 }
2896
2897 #define NVRAM_CMD_TIMEOUT 10000
2898
2899 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2900 {
2901         int i;
2902
2903         tw32(NVRAM_CMD, nvram_cmd);
2904         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2905                 udelay(10);
2906                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2907                         udelay(10);
2908                         break;
2909                 }
2910         }
2911
2912         if (i == NVRAM_CMD_TIMEOUT)
2913                 return -EBUSY;
2914
2915         return 0;
2916 }
2917
2918 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2919 {
2920         if (tg3_flag(tp, NVRAM) &&
2921             tg3_flag(tp, NVRAM_BUFFERED) &&
2922             tg3_flag(tp, FLASH) &&
2923             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2924             (tp->nvram_jedecnum == JEDEC_ATMEL))
2925
2926                 addr = ((addr / tp->nvram_pagesize) <<
2927                         ATMEL_AT45DB0X1B_PAGE_POS) +
2928                        (addr % tp->nvram_pagesize);
2929
2930         return addr;
2931 }
2932
2933 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2934 {
2935         if (tg3_flag(tp, NVRAM) &&
2936             tg3_flag(tp, NVRAM_BUFFERED) &&
2937             tg3_flag(tp, FLASH) &&
2938             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2939             (tp->nvram_jedecnum == JEDEC_ATMEL))
2940
2941                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2942                         tp->nvram_pagesize) +
2943                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2944
2945         return addr;
2946 }
2947
2948 /* NOTE: Data read in from NVRAM is byteswapped according to
2949  * the byteswapping settings for all other register accesses.
2950  * tg3 devices are BE devices, so on a BE machine, the data
2951  * returned will be exactly as it is seen in NVRAM.  On a LE
2952  * machine, the 32-bit value will be byteswapped.
2953  */
2954 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2955 {
2956         int ret;
2957
2958         if (!tg3_flag(tp, NVRAM))
2959                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2960
2961         offset = tg3_nvram_phys_addr(tp, offset);
2962
2963         if (offset > NVRAM_ADDR_MSK)
2964                 return -EINVAL;
2965
2966         ret = tg3_nvram_lock(tp);
2967         if (ret)
2968                 return ret;
2969
2970         tg3_enable_nvram_access(tp);
2971
2972         tw32(NVRAM_ADDR, offset);
2973         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2974                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2975
2976         if (ret == 0)
2977                 *val = tr32(NVRAM_RDDATA);
2978
2979         tg3_disable_nvram_access(tp);
2980
2981         tg3_nvram_unlock(tp);
2982
2983         return ret;
2984 }
2985
2986 /* Ensures NVRAM data is in bytestream format. */
2987 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2988 {
2989         u32 v;
2990         int res = tg3_nvram_read(tp, offset, &v);
2991         if (!res)
2992                 *val = cpu_to_be32(v);
2993         return res;
2994 }
2995
2996 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2997                                     u32 offset, u32 len, u8 *buf)
2998 {
2999         int i, j, rc = 0;
3000         u32 val;
3001
3002         for (i = 0; i < len; i += 4) {
3003                 u32 addr;
3004                 __be32 data;
3005
3006                 addr = offset + i;
3007
3008                 memcpy(&data, buf + i, 4);
3009
3010                 /*
3011                  * The SEEPROM interface expects the data to always be opposite
3012                  * the native endian format.  We accomplish this by reversing
3013                  * all the operations that would have been performed on the
3014                  * data from a call to tg3_nvram_read_be32().
3015                  */
3016                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3017
3018                 val = tr32(GRC_EEPROM_ADDR);
3019                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3020
3021                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3022                         EEPROM_ADDR_READ);
3023                 tw32(GRC_EEPROM_ADDR, val |
3024                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3025                         (addr & EEPROM_ADDR_ADDR_MASK) |
3026                         EEPROM_ADDR_START |
3027                         EEPROM_ADDR_WRITE);
3028
3029                 for (j = 0; j < 1000; j++) {
3030                         val = tr32(GRC_EEPROM_ADDR);
3031
3032                         if (val & EEPROM_ADDR_COMPLETE)
3033                                 break;
3034                         msleep(1);
3035                 }
3036                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3037                         rc = -EBUSY;
3038                         break;
3039                 }
3040         }
3041
3042         return rc;
3043 }
3044
3045 /* offset and length are dword aligned */
3046 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3047                 u8 *buf)
3048 {
3049         int ret = 0;
3050         u32 pagesize = tp->nvram_pagesize;
3051         u32 pagemask = pagesize - 1;
3052         u32 nvram_cmd;
3053         u8 *tmp;
3054
3055         tmp = kmalloc(pagesize, GFP_KERNEL);
3056         if (tmp == NULL)
3057                 return -ENOMEM;
3058
3059         while (len) {
3060                 int j;
3061                 u32 phy_addr, page_off, size;
3062
3063                 phy_addr = offset & ~pagemask;
3064
3065                 for (j = 0; j < pagesize; j += 4) {
3066                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3067                                                   (__be32 *) (tmp + j));
3068                         if (ret)
3069                                 break;
3070                 }
3071                 if (ret)
3072                         break;
3073
3074                 page_off = offset & pagemask;
3075                 size = pagesize;
3076                 if (len < size)
3077                         size = len;
3078
3079                 len -= size;
3080
3081                 memcpy(tmp + page_off, buf, size);
3082
3083                 offset = offset + (pagesize - page_off);
3084
3085                 tg3_enable_nvram_access(tp);
3086
3087                 /*
3088                  * Before we can erase the flash page, we need
3089                  * to issue a special "write enable" command.
3090                  */
3091                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3092
3093                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3094                         break;
3095
3096                 /* Erase the target page */
3097                 tw32(NVRAM_ADDR, phy_addr);
3098
3099                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3100                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3101
3102                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3103                         break;
3104
3105                 /* Issue another write enable to start the write. */
3106                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3107
3108                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3109                         break;
3110
3111                 for (j = 0; j < pagesize; j += 4) {
3112                         __be32 data;
3113
3114                         data = *((__be32 *) (tmp + j));
3115
3116                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3117
3118                         tw32(NVRAM_ADDR, phy_addr + j);
3119
3120                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3121                                 NVRAM_CMD_WR;
3122
3123                         if (j == 0)
3124                                 nvram_cmd |= NVRAM_CMD_FIRST;
3125                         else if (j == (pagesize - 4))
3126                                 nvram_cmd |= NVRAM_CMD_LAST;
3127
3128                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3129                         if (ret)
3130                                 break;
3131                 }
3132                 if (ret)
3133                         break;
3134         }
3135
3136         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3137         tg3_nvram_exec_cmd(tp, nvram_cmd);
3138
3139         kfree(tmp);
3140
3141         return ret;
3142 }
3143
3144 /* offset and length are dword aligned */
3145 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3146                 u8 *buf)
3147 {
3148         int i, ret = 0;
3149
3150         for (i = 0; i < len; i += 4, offset += 4) {
3151                 u32 page_off, phy_addr, nvram_cmd;
3152                 __be32 data;
3153
3154                 memcpy(&data, buf + i, 4);
3155                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3156
3157                 page_off = offset % tp->nvram_pagesize;
3158
3159                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3160
3161                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3162
3163                 if (page_off == 0 || i == 0)
3164                         nvram_cmd |= NVRAM_CMD_FIRST;
3165                 if (page_off == (tp->nvram_pagesize - 4))
3166                         nvram_cmd |= NVRAM_CMD_LAST;
3167
3168                 if (i == (len - 4))
3169                         nvram_cmd |= NVRAM_CMD_LAST;
3170
3171                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3172                     !tg3_flag(tp, FLASH) ||
3173                     !tg3_flag(tp, 57765_PLUS))
3174                         tw32(NVRAM_ADDR, phy_addr);
3175
3176                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3177                     !tg3_flag(tp, 5755_PLUS) &&
3178                     (tp->nvram_jedecnum == JEDEC_ST) &&
3179                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3180                         u32 cmd;
3181
3182                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3183                         ret = tg3_nvram_exec_cmd(tp, cmd);
3184                         if (ret)
3185                                 break;
3186                 }
3187                 if (!tg3_flag(tp, FLASH)) {
3188                         /* We always do complete word writes to eeprom. */
3189                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3190                 }
3191
3192                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3193                 if (ret)
3194                         break;
3195         }
3196         return ret;
3197 }
3198
3199 /* offset and length are dword aligned */
3200 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3201 {
3202         int ret;
3203
3204         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3205                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3206                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3207                 udelay(40);
3208         }
3209
3210         if (!tg3_flag(tp, NVRAM)) {
3211                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3212         } else {
3213                 u32 grc_mode;
3214
3215                 ret = tg3_nvram_lock(tp);
3216                 if (ret)
3217                         return ret;
3218
3219                 tg3_enable_nvram_access(tp);
3220                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3221                         tw32(NVRAM_WRITE1, 0x406);
3222
3223                 grc_mode = tr32(GRC_MODE);
3224                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3225
3226                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3227                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3228                                 buf);
3229                 } else {
3230                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3231                                 buf);
3232                 }
3233
3234                 grc_mode = tr32(GRC_MODE);
3235                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3236
3237                 tg3_disable_nvram_access(tp);
3238                 tg3_nvram_unlock(tp);
3239         }
3240
3241         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3242                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3243                 udelay(40);
3244         }
3245
3246         return ret;
3247 }
3248
3249 #define RX_CPU_SCRATCH_BASE     0x30000
3250 #define RX_CPU_SCRATCH_SIZE     0x04000
3251 #define TX_CPU_SCRATCH_BASE     0x34000
3252 #define TX_CPU_SCRATCH_SIZE     0x04000
3253
3254 /* tp->lock is held. */
3255 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3256 {
3257         int i;
3258
3259         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3260
3261         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3262                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3263
3264                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3265                 return 0;
3266         }
3267         if (offset == RX_CPU_BASE) {
3268                 for (i = 0; i < 10000; i++) {
3269                         tw32(offset + CPU_STATE, 0xffffffff);
3270                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3271                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3272                                 break;
3273                 }
3274
3275                 tw32(offset + CPU_STATE, 0xffffffff);
3276                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3277                 udelay(10);
3278         } else {
3279                 for (i = 0; i < 10000; i++) {
3280                         tw32(offset + CPU_STATE, 0xffffffff);
3281                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3282                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3283                                 break;
3284                 }
3285         }
3286
3287         if (i >= 10000) {
3288                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3289                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3290                 return -ENODEV;
3291         }
3292
3293         /* Clear firmware's nvram arbitration. */
3294         if (tg3_flag(tp, NVRAM))
3295                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3296         return 0;
3297 }
3298
3299 struct fw_info {
3300         unsigned int fw_base;
3301         unsigned int fw_len;
3302         const __be32 *fw_data;
3303 };
3304
3305 /* tp->lock is held. */
3306 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3307                                  u32 cpu_scratch_base, int cpu_scratch_size,
3308                                  struct fw_info *info)
3309 {
3310         int err, lock_err, i;
3311         void (*write_op)(struct tg3 *, u32, u32);
3312
3313         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3314                 netdev_err(tp->dev,
3315                            "%s: Trying to load TX cpu firmware which is 5705\n",
3316                            __func__);
3317                 return -EINVAL;
3318         }
3319
3320         if (tg3_flag(tp, 5705_PLUS))
3321                 write_op = tg3_write_mem;
3322         else
3323                 write_op = tg3_write_indirect_reg32;
3324
3325         /* It is possible that bootcode is still loading at this point.
3326          * Get the nvram lock first before halting the cpu.
3327          */
3328         lock_err = tg3_nvram_lock(tp);
3329         err = tg3_halt_cpu(tp, cpu_base);
3330         if (!lock_err)
3331                 tg3_nvram_unlock(tp);
3332         if (err)
3333                 goto out;
3334
3335         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3336                 write_op(tp, cpu_scratch_base + i, 0);
3337         tw32(cpu_base + CPU_STATE, 0xffffffff);
3338         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3339         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3340                 write_op(tp, (cpu_scratch_base +
3341                               (info->fw_base & 0xffff) +
3342                               (i * sizeof(u32))),
3343                               be32_to_cpu(info->fw_data[i]));
3344
3345         err = 0;
3346
3347 out:
3348         return err;
3349 }
3350
3351 /* tp->lock is held. */
3352 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3353 {
3354         struct fw_info info;
3355         const __be32 *fw_data;
3356         int err, i;
3357
3358         fw_data = (void *)tp->fw->data;
3359
3360         /* Firmware blob starts with version numbers, followed by
3361            start address and length. We are setting complete length.
3362            length = end_address_of_bss - start_address_of_text.
3363            Remainder is the blob to be loaded contiguously
3364            from start address. */
3365
3366         info.fw_base = be32_to_cpu(fw_data[1]);
3367         info.fw_len = tp->fw->size - 12;
3368         info.fw_data = &fw_data[3];
3369
3370         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3371                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3372                                     &info);
3373         if (err)
3374                 return err;
3375
3376         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3377                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3378                                     &info);
3379         if (err)
3380                 return err;
3381
3382         /* Now startup only the RX cpu. */
3383         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3384         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3385
3386         for (i = 0; i < 5; i++) {
3387                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3388                         break;
3389                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3390                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3391                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3392                 udelay(1000);
3393         }
3394         if (i >= 5) {
3395                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3396                            "should be %08x\n", __func__,
3397                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3398                 return -ENODEV;
3399         }
3400         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3401         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3402
3403         return 0;
3404 }
3405
3406 /* tp->lock is held. */
3407 static int tg3_load_tso_firmware(struct tg3 *tp)
3408 {
3409         struct fw_info info;
3410         const __be32 *fw_data;
3411         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3412         int err, i;
3413
3414         if (tg3_flag(tp, HW_TSO_1) ||
3415             tg3_flag(tp, HW_TSO_2) ||
3416             tg3_flag(tp, HW_TSO_3))
3417                 return 0;
3418
3419         fw_data = (void *)tp->fw->data;
3420
3421         /* Firmware blob starts with version numbers, followed by
3422            start address and length. We are setting complete length.
3423            length = end_address_of_bss - start_address_of_text.
3424            Remainder is the blob to be loaded contiguously
3425            from start address. */
3426
3427         info.fw_base = be32_to_cpu(fw_data[1]);
3428         cpu_scratch_size = tp->fw_len;
3429         info.fw_len = tp->fw->size - 12;
3430         info.fw_data = &fw_data[3];
3431
3432         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3433                 cpu_base = RX_CPU_BASE;
3434                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3435         } else {
3436                 cpu_base = TX_CPU_BASE;
3437                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3438                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3439         }
3440
3441         err = tg3_load_firmware_cpu(tp, cpu_base,
3442                                     cpu_scratch_base, cpu_scratch_size,
3443                                     &info);
3444         if (err)
3445                 return err;
3446
3447         /* Now startup the cpu. */
3448         tw32(cpu_base + CPU_STATE, 0xffffffff);
3449         tw32_f(cpu_base + CPU_PC, info.fw_base);
3450
3451         for (i = 0; i < 5; i++) {
3452                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3453                         break;
3454                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3455                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3456                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3457                 udelay(1000);
3458         }
3459         if (i >= 5) {
3460                 netdev_err(tp->dev,
3461                            "%s fails to set CPU PC, is %08x should be %08x\n",
3462                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3463                 return -ENODEV;
3464         }
3465         tw32(cpu_base + CPU_STATE, 0xffffffff);
3466         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3467         return 0;
3468 }
3469
3470
3471 /* tp->lock is held. */
3472 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3473 {
3474         u32 addr_high, addr_low;
3475         int i;
3476
3477         addr_high = ((tp->dev->dev_addr[0] << 8) |
3478                      tp->dev->dev_addr[1]);
3479         addr_low = ((tp->dev->dev_addr[2] << 24) |
3480                     (tp->dev->dev_addr[3] << 16) |
3481                     (tp->dev->dev_addr[4] <<  8) |
3482                     (tp->dev->dev_addr[5] <<  0));
3483         for (i = 0; i < 4; i++) {
3484                 if (i == 1 && skip_mac_1)
3485                         continue;
3486                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3487                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3488         }
3489
3490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3491             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3492                 for (i = 0; i < 12; i++) {
3493                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3494                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3495                 }
3496         }
3497
3498         addr_high = (tp->dev->dev_addr[0] +
3499                      tp->dev->dev_addr[1] +
3500                      tp->dev->dev_addr[2] +
3501                      tp->dev->dev_addr[3] +
3502                      tp->dev->dev_addr[4] +
3503                      tp->dev->dev_addr[5]) &
3504                 TX_BACKOFF_SEED_MASK;
3505         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3506 }
3507
3508 static void tg3_enable_register_access(struct tg3 *tp)
3509 {
3510         /*
3511          * Make sure register accesses (indirect or otherwise) will function
3512          * correctly.
3513          */
3514         pci_write_config_dword(tp->pdev,
3515                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3516 }
3517
3518 static int tg3_power_up(struct tg3 *tp)
3519 {
3520         int err;
3521
3522         tg3_enable_register_access(tp);
3523
3524         err = pci_set_power_state(tp->pdev, PCI_D0);
3525         if (!err) {
3526                 /* Switch out of Vaux if it is a NIC */
3527                 tg3_pwrsrc_switch_to_vmain(tp);
3528         } else {
3529                 netdev_err(tp->dev, "Transition to D0 failed\n");
3530         }
3531
3532         return err;
3533 }
3534
3535 static int tg3_setup_phy(struct tg3 *, int);
3536
3537 static int tg3_power_down_prepare(struct tg3 *tp)
3538 {
3539         u32 misc_host_ctrl;
3540         bool device_should_wake, do_low_power;
3541
3542         tg3_enable_register_access(tp);
3543
3544         /* Restore the CLKREQ setting. */
3545         if (tg3_flag(tp, CLKREQ_BUG)) {
3546                 u16 lnkctl;
3547
3548                 pci_read_config_word(tp->pdev,
3549                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3550                                      &lnkctl);
3551                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3552                 pci_write_config_word(tp->pdev,
3553                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3554                                       lnkctl);
3555         }
3556
3557         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3558         tw32(TG3PCI_MISC_HOST_CTRL,
3559              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3560
3561         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3562                              tg3_flag(tp, WOL_ENABLE);
3563
3564         if (tg3_flag(tp, USE_PHYLIB)) {
3565                 do_low_power = false;
3566                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3567                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3568                         struct phy_device *phydev;
3569                         u32 phyid, advertising;
3570
3571                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3572
3573                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3574
3575                         tp->link_config.speed = phydev->speed;
3576                         tp->link_config.duplex = phydev->duplex;
3577                         tp->link_config.autoneg = phydev->autoneg;
3578                         tp->link_config.advertising = phydev->advertising;
3579
3580                         advertising = ADVERTISED_TP |
3581                                       ADVERTISED_Pause |
3582                                       ADVERTISED_Autoneg |
3583                                       ADVERTISED_10baseT_Half;
3584
3585                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3586                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3587                                         advertising |=
3588                                                 ADVERTISED_100baseT_Half |
3589                                                 ADVERTISED_100baseT_Full |
3590                                                 ADVERTISED_10baseT_Full;
3591                                 else
3592                                         advertising |= ADVERTISED_10baseT_Full;
3593                         }
3594
3595                         phydev->advertising = advertising;
3596
3597                         phy_start_aneg(phydev);
3598
3599                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3600                         if (phyid != PHY_ID_BCMAC131) {
3601                                 phyid &= PHY_BCM_OUI_MASK;
3602                                 if (phyid == PHY_BCM_OUI_1 ||
3603                                     phyid == PHY_BCM_OUI_2 ||
3604                                     phyid == PHY_BCM_OUI_3)
3605                                         do_low_power = true;
3606                         }
3607                 }
3608         } else {
3609                 do_low_power = true;
3610
3611                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3612                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3613
3614                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3615                         tg3_setup_phy(tp, 0);
3616         }
3617
3618         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3619                 u32 val;
3620
3621                 val = tr32(GRC_VCPU_EXT_CTRL);
3622                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3623         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3624                 int i;
3625                 u32 val;
3626
3627                 for (i = 0; i < 200; i++) {
3628                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3629                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3630                                 break;
3631                         msleep(1);
3632                 }
3633         }
3634         if (tg3_flag(tp, WOL_CAP))
3635                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3636                                                      WOL_DRV_STATE_SHUTDOWN |
3637                                                      WOL_DRV_WOL |
3638                                                      WOL_SET_MAGIC_PKT);
3639
3640         if (device_should_wake) {
3641                 u32 mac_mode;
3642
3643                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3644                         if (do_low_power &&
3645                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3646                                 tg3_phy_auxctl_write(tp,
3647                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3648                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3649                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3650                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3651                                 udelay(40);
3652                         }
3653
3654                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3655                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3656                         else
3657                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3658
3659                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3660                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3661                             ASIC_REV_5700) {
3662                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3663                                              SPEED_100 : SPEED_10;
3664                                 if (tg3_5700_link_polarity(tp, speed))
3665                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3666                                 else
3667                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3668                         }
3669                 } else {
3670                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3671                 }
3672
3673                 if (!tg3_flag(tp, 5750_PLUS))
3674                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3675
3676                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3677                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3678                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3679                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3680
3681                 if (tg3_flag(tp, ENABLE_APE))
3682                         mac_mode |= MAC_MODE_APE_TX_EN |
3683                                     MAC_MODE_APE_RX_EN |
3684                                     MAC_MODE_TDE_ENABLE;
3685
3686                 tw32_f(MAC_MODE, mac_mode);
3687                 udelay(100);
3688
3689                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3690                 udelay(10);
3691         }
3692
3693         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3694             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3695              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3696                 u32 base_val;
3697
3698                 base_val = tp->pci_clock_ctrl;
3699                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3700                              CLOCK_CTRL_TXCLK_DISABLE);
3701
3702                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3703                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3704         } else if (tg3_flag(tp, 5780_CLASS) ||
3705                    tg3_flag(tp, CPMU_PRESENT) ||
3706                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3707                 /* do nothing */
3708         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3709                 u32 newbits1, newbits2;
3710
3711                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3712                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3713                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3714                                     CLOCK_CTRL_TXCLK_DISABLE |
3715                                     CLOCK_CTRL_ALTCLK);
3716                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3717                 } else if (tg3_flag(tp, 5705_PLUS)) {
3718                         newbits1 = CLOCK_CTRL_625_CORE;
3719                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3720                 } else {
3721                         newbits1 = CLOCK_CTRL_ALTCLK;
3722                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3723                 }
3724
3725                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3726                             40);
3727
3728                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3729                             40);
3730
3731                 if (!tg3_flag(tp, 5705_PLUS)) {
3732                         u32 newbits3;
3733
3734                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3735                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3736                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3737                                             CLOCK_CTRL_TXCLK_DISABLE |
3738                                             CLOCK_CTRL_44MHZ_CORE);
3739                         } else {
3740                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3741                         }
3742
3743                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3744                                     tp->pci_clock_ctrl | newbits3, 40);
3745                 }
3746         }
3747
3748         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3749                 tg3_power_down_phy(tp, do_low_power);
3750
3751         tg3_frob_aux_power(tp, true);
3752
3753         /* Workaround for unstable PLL clock */
3754         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3755             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3756                 u32 val = tr32(0x7d00);
3757
3758                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3759                 tw32(0x7d00, val);
3760                 if (!tg3_flag(tp, ENABLE_ASF)) {
3761                         int err;
3762
3763                         err = tg3_nvram_lock(tp);
3764                         tg3_halt_cpu(tp, RX_CPU_BASE);
3765                         if (!err)
3766                                 tg3_nvram_unlock(tp);
3767                 }
3768         }
3769
3770         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3771
3772         return 0;
3773 }
3774
3775 static void tg3_power_down(struct tg3 *tp)
3776 {
3777         tg3_power_down_prepare(tp);
3778
3779         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3780         pci_set_power_state(tp->pdev, PCI_D3hot);
3781 }
3782
3783 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3784 {
3785         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3786         case MII_TG3_AUX_STAT_10HALF:
3787                 *speed = SPEED_10;
3788                 *duplex = DUPLEX_HALF;
3789                 break;
3790
3791         case MII_TG3_AUX_STAT_10FULL:
3792                 *speed = SPEED_10;
3793                 *duplex = DUPLEX_FULL;
3794                 break;
3795
3796         case MII_TG3_AUX_STAT_100HALF:
3797                 *speed = SPEED_100;
3798                 *duplex = DUPLEX_HALF;
3799                 break;
3800
3801         case MII_TG3_AUX_STAT_100FULL:
3802                 *speed = SPEED_100;
3803                 *duplex = DUPLEX_FULL;
3804                 break;
3805
3806         case MII_TG3_AUX_STAT_1000HALF:
3807                 *speed = SPEED_1000;
3808                 *duplex = DUPLEX_HALF;
3809                 break;
3810
3811         case MII_TG3_AUX_STAT_1000FULL:
3812                 *speed = SPEED_1000;
3813                 *duplex = DUPLEX_FULL;
3814                 break;
3815
3816         default:
3817                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3818                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3819                                  SPEED_10;
3820                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3821                                   DUPLEX_HALF;
3822                         break;
3823                 }
3824                 *speed = SPEED_UNKNOWN;
3825                 *duplex = DUPLEX_UNKNOWN;
3826                 break;
3827         }
3828 }
3829
3830 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3831 {
3832         int err = 0;
3833         u32 val, new_adv;
3834
3835         new_adv = ADVERTISE_CSMA;
3836         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3837         new_adv |= mii_advertise_flowctrl(flowctrl);
3838
3839         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3840         if (err)
3841                 goto done;
3842
3843         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3844                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3845
3846                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3847                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3848                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3849
3850                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3851                 if (err)
3852                         goto done;
3853         }
3854
3855         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3856                 goto done;
3857
3858         tw32(TG3_CPMU_EEE_MODE,
3859              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3860
3861         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3862         if (!err) {
3863                 u32 err2;
3864
3865                 val = 0;
3866                 /* Advertise 100-BaseTX EEE ability */
3867                 if (advertise & ADVERTISED_100baseT_Full)
3868                         val |= MDIO_AN_EEE_ADV_100TX;
3869                 /* Advertise 1000-BaseT EEE ability */
3870                 if (advertise & ADVERTISED_1000baseT_Full)
3871                         val |= MDIO_AN_EEE_ADV_1000T;
3872                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3873                 if (err)
3874                         val = 0;
3875
3876                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3877                 case ASIC_REV_5717:
3878                 case ASIC_REV_57765:
3879                 case ASIC_REV_57766:
3880                 case ASIC_REV_5719:
3881                         /* If we advertised any eee advertisements above... */
3882                         if (val)
3883                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3884                                       MII_TG3_DSP_TAP26_RMRXSTO |
3885                                       MII_TG3_DSP_TAP26_OPCSINPT;
3886                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3887                         /* Fall through */
3888                 case ASIC_REV_5720:
3889                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3890                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3891                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3892                 }
3893
3894                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3895                 if (!err)
3896                         err = err2;
3897         }
3898
3899 done:
3900         return err;
3901 }
3902
3903 static void tg3_phy_copper_begin(struct tg3 *tp)
3904 {
3905         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3906             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3907                 u32 adv, fc;
3908
3909                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3910                         adv = ADVERTISED_10baseT_Half |
3911                               ADVERTISED_10baseT_Full;
3912                         if (tg3_flag(tp, WOL_SPEED_100MB))
3913                                 adv |= ADVERTISED_100baseT_Half |
3914                                        ADVERTISED_100baseT_Full;
3915
3916                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3917                 } else {
3918                         adv = tp->link_config.advertising;
3919                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3920                                 adv &= ~(ADVERTISED_1000baseT_Half |
3921                                          ADVERTISED_1000baseT_Full);
3922
3923                         fc = tp->link_config.flowctrl;
3924                 }
3925
3926                 tg3_phy_autoneg_cfg(tp, adv, fc);
3927
3928                 tg3_writephy(tp, MII_BMCR,
3929                              BMCR_ANENABLE | BMCR_ANRESTART);
3930         } else {
3931                 int i;
3932                 u32 bmcr, orig_bmcr;
3933
3934                 tp->link_config.active_speed = tp->link_config.speed;
3935                 tp->link_config.active_duplex = tp->link_config.duplex;
3936
3937                 bmcr = 0;
3938                 switch (tp->link_config.speed) {
3939                 default:
3940                 case SPEED_10:
3941                         break;
3942
3943                 case SPEED_100:
3944                         bmcr |= BMCR_SPEED100;
3945                         break;
3946
3947                 case SPEED_1000:
3948                         bmcr |= BMCR_SPEED1000;
3949                         break;
3950                 }
3951
3952                 if (tp->link_config.duplex == DUPLEX_FULL)
3953                         bmcr |= BMCR_FULLDPLX;
3954
3955                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3956                     (bmcr != orig_bmcr)) {
3957                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3958                         for (i = 0; i < 1500; i++) {
3959                                 u32 tmp;
3960
3961                                 udelay(10);
3962                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3963                                     tg3_readphy(tp, MII_BMSR, &tmp))
3964                                         continue;
3965                                 if (!(tmp & BMSR_LSTATUS)) {
3966                                         udelay(40);
3967                                         break;
3968                                 }
3969                         }
3970                         tg3_writephy(tp, MII_BMCR, bmcr);
3971                         udelay(40);
3972                 }
3973         }
3974 }
3975
3976 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3977 {
3978         int err;
3979
3980         /* Turn off tap power management. */
3981         /* Set Extended packet length bit */
3982         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3983
3984         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3985         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3986         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3987         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3988         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3989
3990         udelay(40);
3991
3992         return err;
3993 }
3994
3995 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3996 {
3997         u32 advmsk, tgtadv, advertising;
3998
3999         advertising = tp->link_config.advertising;
4000         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4001
4002         advmsk = ADVERTISE_ALL;
4003         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4004                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4005                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4006         }
4007
4008         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4009                 return false;
4010
4011         if ((*lcladv & advmsk) != tgtadv)
4012                 return false;
4013
4014         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4015                 u32 tg3_ctrl;
4016
4017                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4018
4019                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4020                         return false;
4021
4022                 if (tgtadv &&
4023                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4024                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4025                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4026                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4027                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4028                 } else {
4029                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4030                 }
4031
4032                 if (tg3_ctrl != tgtadv)
4033                         return false;
4034         }
4035
4036         return true;
4037 }
4038
4039 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4040 {
4041         u32 lpeth = 0;
4042
4043         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4044                 u32 val;
4045
4046                 if (tg3_readphy(tp, MII_STAT1000, &val))
4047                         return false;
4048
4049                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4050         }
4051
4052         if (tg3_readphy(tp, MII_LPA, rmtadv))
4053                 return false;
4054
4055         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4056         tp->link_config.rmt_adv = lpeth;
4057
4058         return true;
4059 }
4060
4061 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4062 {
4063         int current_link_up;
4064         u32 bmsr, val;
4065         u32 lcl_adv, rmt_adv;
4066         u16 current_speed;
4067         u8 current_duplex;
4068         int i, err;
4069
4070         tw32(MAC_EVENT, 0);
4071
4072         tw32_f(MAC_STATUS,
4073              (MAC_STATUS_SYNC_CHANGED |
4074               MAC_STATUS_CFG_CHANGED |
4075               MAC_STATUS_MI_COMPLETION |
4076               MAC_STATUS_LNKSTATE_CHANGED));
4077         udelay(40);
4078
4079         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4080                 tw32_f(MAC_MI_MODE,
4081                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4082                 udelay(80);
4083         }
4084
4085         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4086
4087         /* Some third-party PHYs need to be reset on link going
4088          * down.
4089          */
4090         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4091              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4092              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4093             netif_carrier_ok(tp->dev)) {
4094                 tg3_readphy(tp, MII_BMSR, &bmsr);
4095                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4096                     !(bmsr & BMSR_LSTATUS))
4097                         force_reset = 1;
4098         }
4099         if (force_reset)
4100                 tg3_phy_reset(tp);
4101
4102         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4103                 tg3_readphy(tp, MII_BMSR, &bmsr);
4104                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4105                     !tg3_flag(tp, INIT_COMPLETE))
4106                         bmsr = 0;
4107
4108                 if (!(bmsr & BMSR_LSTATUS)) {
4109                         err = tg3_init_5401phy_dsp(tp);
4110                         if (err)
4111                                 return err;
4112
4113                         tg3_readphy(tp, MII_BMSR, &bmsr);
4114                         for (i = 0; i < 1000; i++) {
4115                                 udelay(10);
4116                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4117                                     (bmsr & BMSR_LSTATUS)) {
4118                                         udelay(40);
4119                                         break;
4120                                 }
4121                         }
4122
4123                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4124                             TG3_PHY_REV_BCM5401_B0 &&
4125                             !(bmsr & BMSR_LSTATUS) &&
4126                             tp->link_config.active_speed == SPEED_1000) {
4127                                 err = tg3_phy_reset(tp);
4128                                 if (!err)
4129                                         err = tg3_init_5401phy_dsp(tp);
4130                                 if (err)
4131                                         return err;
4132                         }
4133                 }
4134         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4135                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4136                 /* 5701 {A0,B0} CRC bug workaround */
4137                 tg3_writephy(tp, 0x15, 0x0a75);
4138                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4139                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4140                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4141         }
4142
4143         /* Clear pending interrupts... */
4144         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4145         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4146
4147         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4148                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4149         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4150                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4151
4152         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4153             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4154                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4155                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4156                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4157                 else
4158                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4159         }
4160
4161         current_link_up = 0;
4162         current_speed = SPEED_UNKNOWN;
4163         current_duplex = DUPLEX_UNKNOWN;
4164         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4165         tp->link_config.rmt_adv = 0;
4166
4167         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4168                 err = tg3_phy_auxctl_read(tp,
4169                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4170                                           &val);
4171                 if (!err && !(val & (1 << 10))) {
4172                         tg3_phy_auxctl_write(tp,
4173                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4174                                              val | (1 << 10));
4175                         goto relink;
4176                 }
4177         }
4178
4179         bmsr = 0;
4180         for (i = 0; i < 100; i++) {
4181                 tg3_readphy(tp, MII_BMSR, &bmsr);
4182                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4183                     (bmsr & BMSR_LSTATUS))
4184                         break;
4185                 udelay(40);
4186         }
4187
4188         if (bmsr & BMSR_LSTATUS) {
4189                 u32 aux_stat, bmcr;
4190
4191                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4192                 for (i = 0; i < 2000; i++) {
4193                         udelay(10);
4194                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4195                             aux_stat)
4196                                 break;
4197                 }
4198
4199                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4200                                              &current_speed,
4201                                              &current_duplex);
4202
4203                 bmcr = 0;
4204                 for (i = 0; i < 200; i++) {
4205                         tg3_readphy(tp, MII_BMCR, &bmcr);
4206                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4207                                 continue;
4208                         if (bmcr && bmcr != 0x7fff)
4209                                 break;
4210                         udelay(10);
4211                 }
4212
4213                 lcl_adv = 0;
4214                 rmt_adv = 0;
4215
4216                 tp->link_config.active_speed = current_speed;
4217                 tp->link_config.active_duplex = current_duplex;
4218
4219                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4220                         if ((bmcr & BMCR_ANENABLE) &&
4221                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4222                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4223                                 current_link_up = 1;
4224                 } else {
4225                         if (!(bmcr & BMCR_ANENABLE) &&
4226                             tp->link_config.speed == current_speed &&
4227                             tp->link_config.duplex == current_duplex &&
4228                             tp->link_config.flowctrl ==
4229                             tp->link_config.active_flowctrl) {
4230                                 current_link_up = 1;
4231                         }
4232                 }
4233
4234                 if (current_link_up == 1 &&
4235                     tp->link_config.active_duplex == DUPLEX_FULL) {
4236                         u32 reg, bit;
4237
4238                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4239                                 reg = MII_TG3_FET_GEN_STAT;
4240                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4241                         } else {
4242                                 reg = MII_TG3_EXT_STAT;
4243                                 bit = MII_TG3_EXT_STAT_MDIX;
4244                         }
4245
4246                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4247                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4248
4249                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4250                 }
4251         }
4252
4253 relink:
4254         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4255                 tg3_phy_copper_begin(tp);
4256
4257                 tg3_readphy(tp, MII_BMSR, &bmsr);
4258                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4259                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4260                         current_link_up = 1;
4261         }
4262
4263         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4264         if (current_link_up == 1) {
4265                 if (tp->link_config.active_speed == SPEED_100 ||
4266                     tp->link_config.active_speed == SPEED_10)
4267                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4268                 else
4269                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4270         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4271                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4272         else
4273                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4274
4275         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4276         if (tp->link_config.active_duplex == DUPLEX_HALF)
4277                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4278
4279         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4280                 if (current_link_up == 1 &&
4281                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4282                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4283                 else
4284                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4285         }
4286
4287         /* ??? Without this setting Netgear GA302T PHY does not
4288          * ??? send/receive packets...
4289          */
4290         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4291             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4292                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4293                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4294                 udelay(80);
4295         }
4296
4297         tw32_f(MAC_MODE, tp->mac_mode);
4298         udelay(40);
4299
4300         tg3_phy_eee_adjust(tp, current_link_up);
4301
4302         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4303                 /* Polled via timer. */
4304                 tw32_f(MAC_EVENT, 0);
4305         } else {
4306                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4307         }
4308         udelay(40);
4309
4310         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4311             current_link_up == 1 &&
4312             tp->link_config.active_speed == SPEED_1000 &&
4313             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4314                 udelay(120);
4315                 tw32_f(MAC_STATUS,
4316                      (MAC_STATUS_SYNC_CHANGED |
4317                       MAC_STATUS_CFG_CHANGED));
4318                 udelay(40);
4319                 tg3_write_mem(tp,
4320                               NIC_SRAM_FIRMWARE_MBOX,
4321                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4322         }
4323
4324         /* Prevent send BD corruption. */
4325         if (tg3_flag(tp, CLKREQ_BUG)) {
4326                 u16 oldlnkctl, newlnkctl;
4327
4328                 pci_read_config_word(tp->pdev,
4329                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4330                                      &oldlnkctl);
4331                 if (tp->link_config.active_speed == SPEED_100 ||
4332                     tp->link_config.active_speed == SPEED_10)
4333                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4334                 else
4335                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4336                 if (newlnkctl != oldlnkctl)
4337                         pci_write_config_word(tp->pdev,
4338                                               pci_pcie_cap(tp->pdev) +
4339                                               PCI_EXP_LNKCTL, newlnkctl);
4340         }
4341
4342         if (current_link_up != netif_carrier_ok(tp->dev)) {
4343                 if (current_link_up)
4344                         netif_carrier_on(tp->dev);
4345                 else
4346                         netif_carrier_off(tp->dev);
4347                 tg3_link_report(tp);
4348         }
4349
4350         return 0;
4351 }
4352
4353 struct tg3_fiber_aneginfo {
4354         int state;
4355 #define ANEG_STATE_UNKNOWN              0
4356 #define ANEG_STATE_AN_ENABLE            1
4357 #define ANEG_STATE_RESTART_INIT         2
4358 #define ANEG_STATE_RESTART              3
4359 #define ANEG_STATE_DISABLE_LINK_OK      4
4360 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4361 #define ANEG_STATE_ABILITY_DETECT       6
4362 #define ANEG_STATE_ACK_DETECT_INIT      7
4363 #define ANEG_STATE_ACK_DETECT           8
4364 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4365 #define ANEG_STATE_COMPLETE_ACK         10
4366 #define ANEG_STATE_IDLE_DETECT_INIT     11
4367 #define ANEG_STATE_IDLE_DETECT          12
4368 #define ANEG_STATE_LINK_OK              13
4369 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4370 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4371
4372         u32 flags;
4373 #define MR_AN_ENABLE            0x00000001
4374 #define MR_RESTART_AN           0x00000002
4375 #define MR_AN_COMPLETE          0x00000004
4376 #define MR_PAGE_RX              0x00000008
4377 #define MR_NP_LOADED            0x00000010
4378 #define MR_TOGGLE_TX            0x00000020
4379 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4380 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4381 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4382 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4383 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4384 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4385 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4386 #define MR_TOGGLE_RX            0x00002000
4387 #define MR_NP_RX                0x00004000
4388
4389 #define MR_LINK_OK              0x80000000
4390
4391         unsigned long link_time, cur_time;
4392
4393         u32 ability_match_cfg;
4394         int ability_match_count;
4395
4396         char ability_match, idle_match, ack_match;
4397
4398         u32 txconfig, rxconfig;
4399 #define ANEG_CFG_NP             0x00000080
4400 #define ANEG_CFG_ACK            0x00000040
4401 #define ANEG_CFG_RF2            0x00000020
4402 #define ANEG_CFG_RF1            0x00000010
4403 #define ANEG_CFG_PS2            0x00000001
4404 #define ANEG_CFG_PS1            0x00008000
4405 #define ANEG_CFG_HD             0x00004000
4406 #define ANEG_CFG_FD             0x00002000
4407 #define ANEG_CFG_INVAL          0x00001f06
4408
4409 };
4410 #define ANEG_OK         0
4411 #define ANEG_DONE       1
4412 #define ANEG_TIMER_ENAB 2
4413 #define ANEG_FAILED     -1
4414
4415 #define ANEG_STATE_SETTLE_TIME  10000
4416
4417 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4418                                    struct tg3_fiber_aneginfo *ap)
4419 {
4420         u16 flowctrl;
4421         unsigned long delta;
4422         u32 rx_cfg_reg;
4423         int ret;
4424
4425         if (ap->state == ANEG_STATE_UNKNOWN) {
4426                 ap->rxconfig = 0;
4427                 ap->link_time = 0;
4428                 ap->cur_time = 0;
4429                 ap->ability_match_cfg = 0;
4430                 ap->ability_match_count = 0;
4431                 ap->ability_match = 0;
4432                 ap->idle_match = 0;
4433                 ap->ack_match = 0;
4434         }
4435         ap->cur_time++;
4436
4437         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4438                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4439
4440                 if (rx_cfg_reg != ap->ability_match_cfg) {
4441                         ap->ability_match_cfg = rx_cfg_reg;
4442                         ap->ability_match = 0;
4443                         ap->ability_match_count = 0;
4444                 } else {
4445                         if (++ap->ability_match_count > 1) {
4446                                 ap->ability_match = 1;
4447                                 ap->ability_match_cfg = rx_cfg_reg;
4448                         }
4449                 }
4450                 if (rx_cfg_reg & ANEG_CFG_ACK)
4451                         ap->ack_match = 1;
4452                 else
4453                         ap->ack_match = 0;
4454
4455                 ap->idle_match = 0;
4456         } else {
4457                 ap->idle_match = 1;
4458                 ap->ability_match_cfg = 0;
4459                 ap->ability_match_count = 0;
4460                 ap->ability_match = 0;
4461                 ap->ack_match = 0;
4462
4463                 rx_cfg_reg = 0;
4464         }
4465
4466         ap->rxconfig = rx_cfg_reg;
4467         ret = ANEG_OK;
4468
4469         switch (ap->state) {
4470         case ANEG_STATE_UNKNOWN:
4471                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4472                         ap->state = ANEG_STATE_AN_ENABLE;
4473
4474                 /* fallthru */
4475         case ANEG_STATE_AN_ENABLE:
4476                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4477                 if (ap->flags & MR_AN_ENABLE) {
4478                         ap->link_time = 0;
4479                         ap->cur_time = 0;
4480                         ap->ability_match_cfg = 0;
4481                         ap->ability_match_count = 0;
4482                         ap->ability_match = 0;
4483                         ap->idle_match = 0;
4484                         ap->ack_match = 0;
4485
4486                         ap->state = ANEG_STATE_RESTART_INIT;
4487                 } else {
4488                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4489                 }
4490                 break;
4491
4492         case ANEG_STATE_RESTART_INIT:
4493                 ap->link_time = ap->cur_time;
4494                 ap->flags &= ~(MR_NP_LOADED);
4495                 ap->txconfig = 0;
4496                 tw32(MAC_TX_AUTO_NEG, 0);
4497                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4498                 tw32_f(MAC_MODE, tp->mac_mode);
4499                 udelay(40);
4500
4501                 ret = ANEG_TIMER_ENAB;
4502                 ap->state = ANEG_STATE_RESTART;
4503
4504                 /* fallthru */
4505         case ANEG_STATE_RESTART:
4506                 delta = ap->cur_time - ap->link_time;
4507                 if (delta > ANEG_STATE_SETTLE_TIME)
4508                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4509                 else
4510                         ret = ANEG_TIMER_ENAB;
4511                 break;
4512
4513         case ANEG_STATE_DISABLE_LINK_OK:
4514                 ret = ANEG_DONE;
4515                 break;
4516
4517         case ANEG_STATE_ABILITY_DETECT_INIT:
4518                 ap->flags &= ~(MR_TOGGLE_TX);
4519                 ap->txconfig = ANEG_CFG_FD;
4520                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4521                 if (flowctrl & ADVERTISE_1000XPAUSE)
4522                         ap->txconfig |= ANEG_CFG_PS1;
4523                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4524                         ap->txconfig |= ANEG_CFG_PS2;
4525                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4526                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4527                 tw32_f(MAC_MODE, tp->mac_mode);
4528                 udelay(40);
4529
4530                 ap->state = ANEG_STATE_ABILITY_DETECT;
4531                 break;
4532
4533         case ANEG_STATE_ABILITY_DETECT:
4534                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4535                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4536                 break;
4537
4538         case ANEG_STATE_ACK_DETECT_INIT:
4539                 ap->txconfig |= ANEG_CFG_ACK;
4540                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4541                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4542                 tw32_f(MAC_MODE, tp->mac_mode);
4543                 udelay(40);
4544
4545                 ap->state = ANEG_STATE_ACK_DETECT;
4546
4547                 /* fallthru */
4548         case ANEG_STATE_ACK_DETECT:
4549                 if (ap->ack_match != 0) {
4550                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4551                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4552                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4553                         } else {
4554                                 ap->state = ANEG_STATE_AN_ENABLE;
4555                         }
4556                 } else if (ap->ability_match != 0 &&
4557                            ap->rxconfig == 0) {
4558                         ap->state = ANEG_STATE_AN_ENABLE;
4559                 }
4560                 break;
4561
4562         case ANEG_STATE_COMPLETE_ACK_INIT:
4563                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4564                         ret = ANEG_FAILED;
4565                         break;
4566                 }
4567                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4568                                MR_LP_ADV_HALF_DUPLEX |
4569                                MR_LP_ADV_SYM_PAUSE |
4570                                MR_LP_ADV_ASYM_PAUSE |
4571                                MR_LP_ADV_REMOTE_FAULT1 |
4572                                MR_LP_ADV_REMOTE_FAULT2 |
4573                                MR_LP_ADV_NEXT_PAGE |
4574                                MR_TOGGLE_RX |
4575                                MR_NP_RX);
4576                 if (ap->rxconfig & ANEG_CFG_FD)
4577                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4578                 if (ap->rxconfig & ANEG_CFG_HD)
4579                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4580                 if (ap->rxconfig & ANEG_CFG_PS1)
4581                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4582                 if (ap->rxconfig & ANEG_CFG_PS2)
4583                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4584                 if (ap->rxconfig & ANEG_CFG_RF1)
4585                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4586                 if (ap->rxconfig & ANEG_CFG_RF2)
4587                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4588                 if (ap->rxconfig & ANEG_CFG_NP)
4589                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4590
4591                 ap->link_time = ap->cur_time;
4592
4593                 ap->flags ^= (MR_TOGGLE_TX);
4594                 if (ap->rxconfig & 0x0008)
4595                         ap->flags |= MR_TOGGLE_RX;
4596                 if (ap->rxconfig & ANEG_CFG_NP)
4597                         ap->flags |= MR_NP_RX;
4598                 ap->flags |= MR_PAGE_RX;
4599
4600                 ap->state = ANEG_STATE_COMPLETE_ACK;
4601                 ret = ANEG_TIMER_ENAB;
4602                 break;
4603
4604         case ANEG_STATE_COMPLETE_ACK:
4605                 if (ap->ability_match != 0 &&
4606                     ap->rxconfig == 0) {
4607                         ap->state = ANEG_STATE_AN_ENABLE;
4608                         break;
4609                 }
4610                 delta = ap->cur_time - ap->link_time;
4611                 if (delta > ANEG_STATE_SETTLE_TIME) {
4612                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4613                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4614                         } else {
4615                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4616                                     !(ap->flags & MR_NP_RX)) {
4617                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4618                                 } else {
4619                                         ret = ANEG_FAILED;
4620                                 }
4621                         }
4622                 }
4623                 break;
4624
4625         case ANEG_STATE_IDLE_DETECT_INIT:
4626                 ap->link_time = ap->cur_time;
4627                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4628                 tw32_f(MAC_MODE, tp->mac_mode);
4629                 udelay(40);
4630
4631                 ap->state = ANEG_STATE_IDLE_DETECT;
4632                 ret = ANEG_TIMER_ENAB;
4633                 break;
4634
4635         case ANEG_STATE_IDLE_DETECT:
4636                 if (ap->ability_match != 0 &&
4637                     ap->rxconfig == 0) {
4638                         ap->state = ANEG_STATE_AN_ENABLE;
4639                         break;
4640                 }
4641                 delta = ap->cur_time - ap->link_time;
4642                 if (delta > ANEG_STATE_SETTLE_TIME) {
4643                         /* XXX another gem from the Broadcom driver :( */
4644                         ap->state = ANEG_STATE_LINK_OK;
4645                 }
4646                 break;
4647
4648         case ANEG_STATE_LINK_OK:
4649                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4650                 ret = ANEG_DONE;
4651                 break;
4652
4653         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4654                 /* ??? unimplemented */
4655                 break;
4656
4657         case ANEG_STATE_NEXT_PAGE_WAIT:
4658                 /* ??? unimplemented */
4659                 break;
4660
4661         default:
4662                 ret = ANEG_FAILED;
4663                 break;
4664         }
4665
4666         return ret;
4667 }
4668
4669 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4670 {
4671         int res = 0;
4672         struct tg3_fiber_aneginfo aninfo;
4673         int status = ANEG_FAILED;
4674         unsigned int tick;
4675         u32 tmp;
4676
4677         tw32_f(MAC_TX_AUTO_NEG, 0);
4678
4679         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4680         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4681         udelay(40);
4682
4683         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4684         udelay(40);
4685
4686         memset(&aninfo, 0, sizeof(aninfo));
4687         aninfo.flags |= MR_AN_ENABLE;
4688         aninfo.state = ANEG_STATE_UNKNOWN;
4689         aninfo.cur_time = 0;
4690         tick = 0;
4691         while (++tick < 195000) {
4692                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4693                 if (status == ANEG_DONE || status == ANEG_FAILED)
4694                         break;
4695
4696                 udelay(1);
4697         }
4698
4699         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4700         tw32_f(MAC_MODE, tp->mac_mode);
4701         udelay(40);
4702
4703         *txflags = aninfo.txconfig;
4704         *rxflags = aninfo.flags;
4705
4706         if (status == ANEG_DONE &&
4707             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4708                              MR_LP_ADV_FULL_DUPLEX)))
4709                 res = 1;
4710
4711         return res;
4712 }
4713
4714 static void tg3_init_bcm8002(struct tg3 *tp)
4715 {
4716         u32 mac_status = tr32(MAC_STATUS);
4717         int i;
4718
4719         /* Reset when initting first time or we have a link. */
4720         if (tg3_flag(tp, INIT_COMPLETE) &&
4721             !(mac_status & MAC_STATUS_PCS_SYNCED))
4722                 return;
4723
4724         /* Set PLL lock range. */
4725         tg3_writephy(tp, 0x16, 0x8007);
4726
4727         /* SW reset */
4728         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4729
4730         /* Wait for reset to complete. */
4731         /* XXX schedule_timeout() ... */
4732         for (i = 0; i < 500; i++)
4733                 udelay(10);
4734
4735         /* Config mode; select PMA/Ch 1 regs. */
4736         tg3_writephy(tp, 0x10, 0x8411);
4737
4738         /* Enable auto-lock and comdet, select txclk for tx. */
4739         tg3_writephy(tp, 0x11, 0x0a10);
4740
4741         tg3_writephy(tp, 0x18, 0x00a0);
4742         tg3_writephy(tp, 0x16, 0x41ff);
4743
4744         /* Assert and deassert POR. */
4745         tg3_writephy(tp, 0x13, 0x0400);
4746         udelay(40);
4747         tg3_writephy(tp, 0x13, 0x0000);
4748
4749         tg3_writephy(tp, 0x11, 0x0a50);
4750         udelay(40);
4751         tg3_writephy(tp, 0x11, 0x0a10);
4752
4753         /* Wait for signal to stabilize */
4754         /* XXX schedule_timeout() ... */
4755         for (i = 0; i < 15000; i++)
4756                 udelay(10);
4757
4758         /* Deselect the channel register so we can read the PHYID
4759          * later.
4760          */
4761         tg3_writephy(tp, 0x10, 0x8011);
4762 }
4763
4764 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4765 {
4766         u16 flowctrl;
4767         u32 sg_dig_ctrl, sg_dig_status;
4768         u32 serdes_cfg, expected_sg_dig_ctrl;
4769         int workaround, port_a;
4770         int current_link_up;
4771
4772         serdes_cfg = 0;
4773         expected_sg_dig_ctrl = 0;
4774         workaround = 0;
4775         port_a = 1;
4776         current_link_up = 0;
4777
4778         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4779             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4780                 workaround = 1;
4781                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4782                         port_a = 0;
4783
4784                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4785                 /* preserve bits 20-23 for voltage regulator */
4786                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4787         }
4788
4789         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4790
4791         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4792                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4793                         if (workaround) {
4794                                 u32 val = serdes_cfg;
4795
4796                                 if (port_a)
4797                                         val |= 0xc010000;
4798                                 else
4799                                         val |= 0x4010000;
4800                                 tw32_f(MAC_SERDES_CFG, val);
4801                         }
4802
4803                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4804                 }
4805                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4806                         tg3_setup_flow_control(tp, 0, 0);
4807                         current_link_up = 1;
4808                 }
4809                 goto out;
4810         }
4811
4812         /* Want auto-negotiation.  */
4813         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4814
4815         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4816         if (flowctrl & ADVERTISE_1000XPAUSE)
4817                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4818         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4819                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4820
4821         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4822                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4823                     tp->serdes_counter &&
4824                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4825                                     MAC_STATUS_RCVD_CFG)) ==
4826                      MAC_STATUS_PCS_SYNCED)) {
4827                         tp->serdes_counter--;
4828                         current_link_up = 1;
4829                         goto out;
4830                 }
4831 restart_autoneg:
4832                 if (workaround)
4833                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4834                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4835                 udelay(5);
4836                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4837
4838                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4839                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4840         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4841                                  MAC_STATUS_SIGNAL_DET)) {
4842                 sg_dig_status = tr32(SG_DIG_STATUS);
4843                 mac_status = tr32(MAC_STATUS);
4844
4845                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4846                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4847                         u32 local_adv = 0, remote_adv = 0;
4848
4849                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4850                                 local_adv |= ADVERTISE_1000XPAUSE;
4851                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4852                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4853
4854                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4855                                 remote_adv |= LPA_1000XPAUSE;
4856                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4857                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4858
4859                         tp->link_config.rmt_adv =
4860                                            mii_adv_to_ethtool_adv_x(remote_adv);
4861
4862                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4863                         current_link_up = 1;
4864                         tp->serdes_counter = 0;
4865                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4866                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4867                         if (tp->serdes_counter)
4868                                 tp->serdes_counter--;
4869                         else {
4870                                 if (workaround) {
4871                                         u32 val = serdes_cfg;
4872
4873                                         if (port_a)
4874                                                 val |= 0xc010000;
4875                                         else
4876                                                 val |= 0x4010000;
4877
4878                                         tw32_f(MAC_SERDES_CFG, val);
4879                                 }
4880
4881                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4882                                 udelay(40);
4883
4884                                 /* Link parallel detection - link is up */
4885                                 /* only if we have PCS_SYNC and not */
4886                                 /* receiving config code words */
4887                                 mac_status = tr32(MAC_STATUS);
4888                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4889                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4890                                         tg3_setup_flow_control(tp, 0, 0);
4891                                         current_link_up = 1;
4892                                         tp->phy_flags |=
4893                                                 TG3_PHYFLG_PARALLEL_DETECT;
4894                                         tp->serdes_counter =
4895                                                 SERDES_PARALLEL_DET_TIMEOUT;
4896                                 } else
4897                                         goto restart_autoneg;
4898                         }
4899                 }
4900         } else {
4901                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4902                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4903         }
4904
4905 out:
4906         return current_link_up;
4907 }
4908
4909 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4910 {
4911         int current_link_up = 0;
4912
4913         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4914                 goto out;
4915
4916         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4917                 u32 txflags, rxflags;
4918                 int i;
4919
4920                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4921                         u32 local_adv = 0, remote_adv = 0;
4922
4923                         if (txflags & ANEG_CFG_PS1)
4924                                 local_adv |= ADVERTISE_1000XPAUSE;
4925                         if (txflags & ANEG_CFG_PS2)
4926                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4927
4928                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4929                                 remote_adv |= LPA_1000XPAUSE;
4930                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4931                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4932
4933                         tp->link_config.rmt_adv =
4934                                            mii_adv_to_ethtool_adv_x(remote_adv);
4935
4936                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4937
4938                         current_link_up = 1;
4939                 }
4940                 for (i = 0; i < 30; i++) {
4941                         udelay(20);
4942                         tw32_f(MAC_STATUS,
4943                                (MAC_STATUS_SYNC_CHANGED |
4944                                 MAC_STATUS_CFG_CHANGED));
4945                         udelay(40);
4946                         if ((tr32(MAC_STATUS) &
4947                              (MAC_STATUS_SYNC_CHANGED |
4948                               MAC_STATUS_CFG_CHANGED)) == 0)
4949                                 break;
4950                 }
4951
4952                 mac_status = tr32(MAC_STATUS);
4953                 if (current_link_up == 0 &&
4954                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4955                     !(mac_status & MAC_STATUS_RCVD_CFG))
4956                         current_link_up = 1;
4957         } else {
4958                 tg3_setup_flow_control(tp, 0, 0);
4959
4960                 /* Forcing 1000FD link up. */
4961                 current_link_up = 1;
4962
4963                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4964                 udelay(40);
4965
4966                 tw32_f(MAC_MODE, tp->mac_mode);
4967                 udelay(40);
4968         }
4969
4970 out:
4971         return current_link_up;
4972 }
4973
4974 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4975 {
4976         u32 orig_pause_cfg;
4977         u16 orig_active_speed;
4978         u8 orig_active_duplex;
4979         u32 mac_status;
4980         int current_link_up;
4981         int i;
4982
4983         orig_pause_cfg = tp->link_config.active_flowctrl;
4984         orig_active_speed = tp->link_config.active_speed;
4985         orig_active_duplex = tp->link_config.active_duplex;
4986
4987         if (!tg3_flag(tp, HW_AUTONEG) &&
4988             netif_carrier_ok(tp->dev) &&
4989             tg3_flag(tp, INIT_COMPLETE)) {
4990                 mac_status = tr32(MAC_STATUS);
4991                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4992                                MAC_STATUS_SIGNAL_DET |
4993                                MAC_STATUS_CFG_CHANGED |
4994                                MAC_STATUS_RCVD_CFG);
4995                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4996                                    MAC_STATUS_SIGNAL_DET)) {
4997                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4998                                             MAC_STATUS_CFG_CHANGED));
4999                         return 0;
5000                 }
5001         }
5002
5003         tw32_f(MAC_TX_AUTO_NEG, 0);
5004
5005         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5006         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5007         tw32_f(MAC_MODE, tp->mac_mode);
5008         udelay(40);
5009
5010         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5011                 tg3_init_bcm8002(tp);
5012
5013         /* Enable link change event even when serdes polling.  */
5014         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5015         udelay(40);
5016
5017         current_link_up = 0;
5018         tp->link_config.rmt_adv = 0;
5019         mac_status = tr32(MAC_STATUS);
5020
5021         if (tg3_flag(tp, HW_AUTONEG))
5022                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5023         else
5024                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5025
5026         tp->napi[0].hw_status->status =
5027                 (SD_STATUS_UPDATED |
5028                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5029
5030         for (i = 0; i < 100; i++) {
5031                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5032                                     MAC_STATUS_CFG_CHANGED));
5033                 udelay(5);
5034                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5035                                          MAC_STATUS_CFG_CHANGED |
5036                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5037                         break;
5038         }
5039
5040         mac_status = tr32(MAC_STATUS);
5041         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5042                 current_link_up = 0;
5043                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5044                     tp->serdes_counter == 0) {
5045                         tw32_f(MAC_MODE, (tp->mac_mode |
5046                                           MAC_MODE_SEND_CONFIGS));
5047                         udelay(1);
5048                         tw32_f(MAC_MODE, tp->mac_mode);
5049                 }
5050         }
5051
5052         if (current_link_up == 1) {
5053                 tp->link_config.active_speed = SPEED_1000;
5054                 tp->link_config.active_duplex = DUPLEX_FULL;
5055                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5056                                     LED_CTRL_LNKLED_OVERRIDE |
5057                                     LED_CTRL_1000MBPS_ON));
5058         } else {
5059                 tp->link_config.active_speed = SPEED_UNKNOWN;
5060                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5061                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5062                                     LED_CTRL_LNKLED_OVERRIDE |
5063                                     LED_CTRL_TRAFFIC_OVERRIDE));
5064         }
5065
5066         if (current_link_up != netif_carrier_ok(tp->dev)) {
5067                 if (current_link_up)
5068                         netif_carrier_on(tp->dev);
5069                 else
5070                         netif_carrier_off(tp->dev);
5071                 tg3_link_report(tp);
5072         } else {
5073                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5074                 if (orig_pause_cfg != now_pause_cfg ||
5075                     orig_active_speed != tp->link_config.active_speed ||
5076                     orig_active_duplex != tp->link_config.active_duplex)
5077                         tg3_link_report(tp);
5078         }
5079
5080         return 0;
5081 }
5082
5083 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5084 {
5085         int current_link_up, err = 0;
5086         u32 bmsr, bmcr;
5087         u16 current_speed;
5088         u8 current_duplex;
5089         u32 local_adv, remote_adv;
5090
5091         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5092         tw32_f(MAC_MODE, tp->mac_mode);
5093         udelay(40);
5094
5095         tw32(MAC_EVENT, 0);
5096
5097         tw32_f(MAC_STATUS,
5098              (MAC_STATUS_SYNC_CHANGED |
5099               MAC_STATUS_CFG_CHANGED |
5100               MAC_STATUS_MI_COMPLETION |
5101               MAC_STATUS_LNKSTATE_CHANGED));
5102         udelay(40);
5103
5104         if (force_reset)
5105                 tg3_phy_reset(tp);
5106
5107         current_link_up = 0;
5108         current_speed = SPEED_UNKNOWN;
5109         current_duplex = DUPLEX_UNKNOWN;
5110         tp->link_config.rmt_adv = 0;
5111
5112         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5113         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5114         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5115                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5116                         bmsr |= BMSR_LSTATUS;
5117                 else
5118                         bmsr &= ~BMSR_LSTATUS;
5119         }
5120
5121         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5122
5123         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5124             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5125                 /* do nothing, just check for link up at the end */
5126         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5127                 u32 adv, newadv;
5128
5129                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5130                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5131                                  ADVERTISE_1000XPAUSE |
5132                                  ADVERTISE_1000XPSE_ASYM |
5133                                  ADVERTISE_SLCT);
5134
5135                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5136                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5137
5138                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5139                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5140                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5141                         tg3_writephy(tp, MII_BMCR, bmcr);
5142
5143                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5144                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5145                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5146
5147                         return err;
5148                 }
5149         } else {
5150                 u32 new_bmcr;
5151
5152                 bmcr &= ~BMCR_SPEED1000;
5153                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5154
5155                 if (tp->link_config.duplex == DUPLEX_FULL)
5156                         new_bmcr |= BMCR_FULLDPLX;
5157
5158                 if (new_bmcr != bmcr) {
5159                         /* BMCR_SPEED1000 is a reserved bit that needs
5160                          * to be set on write.
5161                          */
5162                         new_bmcr |= BMCR_SPEED1000;
5163
5164                         /* Force a linkdown */
5165                         if (netif_carrier_ok(tp->dev)) {
5166                                 u32 adv;
5167
5168                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5169                                 adv &= ~(ADVERTISE_1000XFULL |
5170                                          ADVERTISE_1000XHALF |
5171                                          ADVERTISE_SLCT);
5172                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5173                                 tg3_writephy(tp, MII_BMCR, bmcr |
5174                                                            BMCR_ANRESTART |
5175                                                            BMCR_ANENABLE);
5176                                 udelay(10);
5177                                 netif_carrier_off(tp->dev);
5178                         }
5179                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5180                         bmcr = new_bmcr;
5181                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5182                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5183                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5184                             ASIC_REV_5714) {
5185                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5186                                         bmsr |= BMSR_LSTATUS;
5187                                 else
5188                                         bmsr &= ~BMSR_LSTATUS;
5189                         }
5190                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5191                 }
5192         }
5193
5194         if (bmsr & BMSR_LSTATUS) {
5195                 current_speed = SPEED_1000;
5196                 current_link_up = 1;
5197                 if (bmcr & BMCR_FULLDPLX)
5198                         current_duplex = DUPLEX_FULL;
5199                 else
5200                         current_duplex = DUPLEX_HALF;
5201
5202                 local_adv = 0;
5203                 remote_adv = 0;
5204
5205                 if (bmcr & BMCR_ANENABLE) {
5206                         u32 common;
5207
5208                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5209                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5210                         common = local_adv & remote_adv;
5211                         if (common & (ADVERTISE_1000XHALF |
5212                                       ADVERTISE_1000XFULL)) {
5213                                 if (common & ADVERTISE_1000XFULL)
5214                                         current_duplex = DUPLEX_FULL;
5215                                 else
5216                                         current_duplex = DUPLEX_HALF;
5217
5218                                 tp->link_config.rmt_adv =
5219                                            mii_adv_to_ethtool_adv_x(remote_adv);
5220                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5221                                 /* Link is up via parallel detect */
5222                         } else {
5223                                 current_link_up = 0;
5224                         }
5225                 }
5226         }
5227
5228         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5229                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5230
5231         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5232         if (tp->link_config.active_duplex == DUPLEX_HALF)
5233                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5234
5235         tw32_f(MAC_MODE, tp->mac_mode);
5236         udelay(40);
5237
5238         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5239
5240         tp->link_config.active_speed = current_speed;
5241         tp->link_config.active_duplex = current_duplex;
5242
5243         if (current_link_up != netif_carrier_ok(tp->dev)) {
5244                 if (current_link_up)
5245                         netif_carrier_on(tp->dev);
5246                 else {
5247                         netif_carrier_off(tp->dev);
5248                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5249                 }
5250                 tg3_link_report(tp);
5251         }
5252         return err;
5253 }
5254
5255 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5256 {
5257         if (tp->serdes_counter) {
5258                 /* Give autoneg time to complete. */
5259                 tp->serdes_counter--;
5260                 return;
5261         }
5262
5263         if (!netif_carrier_ok(tp->dev) &&
5264             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5265                 u32 bmcr;
5266
5267                 tg3_readphy(tp, MII_BMCR, &bmcr);
5268                 if (bmcr & BMCR_ANENABLE) {
5269                         u32 phy1, phy2;
5270
5271                         /* Select shadow register 0x1f */
5272                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5273                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5274
5275                         /* Select expansion interrupt status register */
5276                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5277                                          MII_TG3_DSP_EXP1_INT_STAT);
5278                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5279                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5280
5281                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5282                                 /* We have signal detect and not receiving
5283                                  * config code words, link is up by parallel
5284                                  * detection.
5285                                  */
5286
5287                                 bmcr &= ~BMCR_ANENABLE;
5288                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5289                                 tg3_writephy(tp, MII_BMCR, bmcr);
5290                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5291                         }
5292                 }
5293         } else if (netif_carrier_ok(tp->dev) &&
5294                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5295                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5296                 u32 phy2;
5297
5298                 /* Select expansion interrupt status register */
5299                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5300                                  MII_TG3_DSP_EXP1_INT_STAT);
5301                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5302                 if (phy2 & 0x20) {
5303                         u32 bmcr;
5304
5305                         /* Config code words received, turn on autoneg. */
5306                         tg3_readphy(tp, MII_BMCR, &bmcr);
5307                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5308
5309                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5310
5311                 }
5312         }
5313 }
5314
5315 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5316 {
5317         u32 val;
5318         int err;
5319
5320         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5321                 err = tg3_setup_fiber_phy(tp, force_reset);
5322         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5323                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5324         else
5325                 err = tg3_setup_copper_phy(tp, force_reset);
5326
5327         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5328                 u32 scale;
5329
5330                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5331                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5332                         scale = 65;
5333                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5334                         scale = 6;
5335                 else
5336                         scale = 12;
5337
5338                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5339                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5340                 tw32(GRC_MISC_CFG, val);
5341         }
5342
5343         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5344               (6 << TX_LENGTHS_IPG_SHIFT);
5345         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5346                 val |= tr32(MAC_TX_LENGTHS) &
5347                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5348                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5349
5350         if (tp->link_config.active_speed == SPEED_1000 &&
5351             tp->link_config.active_duplex == DUPLEX_HALF)
5352                 tw32(MAC_TX_LENGTHS, val |
5353                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5354         else
5355                 tw32(MAC_TX_LENGTHS, val |
5356                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5357
5358         if (!tg3_flag(tp, 5705_PLUS)) {
5359                 if (netif_carrier_ok(tp->dev)) {
5360                         tw32(HOSTCC_STAT_COAL_TICKS,
5361                              tp->coal.stats_block_coalesce_usecs);
5362                 } else {
5363                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5364                 }
5365         }
5366
5367         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5368                 val = tr32(PCIE_PWR_MGMT_THRESH);
5369                 if (!netif_carrier_ok(tp->dev))
5370                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5371                               tp->pwrmgmt_thresh;
5372                 else
5373                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5374                 tw32(PCIE_PWR_MGMT_THRESH, val);
5375         }
5376
5377         return err;
5378 }
5379
5380 static inline int tg3_irq_sync(struct tg3 *tp)
5381 {
5382         return tp->irq_sync;
5383 }
5384
5385 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5386 {
5387         int i;
5388
5389         dst = (u32 *)((u8 *)dst + off);
5390         for (i = 0; i < len; i += sizeof(u32))
5391                 *dst++ = tr32(off + i);
5392 }
5393
5394 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5395 {
5396         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5397         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5398         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5399         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5400         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5401         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5402         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5403         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5404         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5405         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5406         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5407         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5408         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5409         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5410         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5411         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5412         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5413         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5414         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5415
5416         if (tg3_flag(tp, SUPPORT_MSIX))
5417                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5418
5419         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5420         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5421         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5422         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5423         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5424         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5425         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5426         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5427
5428         if (!tg3_flag(tp, 5705_PLUS)) {
5429                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5430                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5431                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5432         }
5433
5434         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5435         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5436         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5437         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5438         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5439
5440         if (tg3_flag(tp, NVRAM))
5441                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5442 }
5443
5444 static void tg3_dump_state(struct tg3 *tp)
5445 {
5446         int i;
5447         u32 *regs;
5448
5449         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5450         if (!regs) {
5451                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5452                 return;
5453         }
5454
5455         if (tg3_flag(tp, PCI_EXPRESS)) {
5456                 /* Read up to but not including private PCI registers */
5457                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5458                         regs[i / sizeof(u32)] = tr32(i);
5459         } else
5460                 tg3_dump_legacy_regs(tp, regs);
5461
5462         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5463                 if (!regs[i + 0] && !regs[i + 1] &&
5464                     !regs[i + 2] && !regs[i + 3])
5465                         continue;
5466
5467                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5468                            i * 4,
5469                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5470         }
5471
5472         kfree(regs);
5473
5474         for (i = 0; i < tp->irq_cnt; i++) {
5475                 struct tg3_napi *tnapi = &tp->napi[i];
5476
5477                 /* SW status block */
5478                 netdev_err(tp->dev,
5479                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5480                            i,
5481                            tnapi->hw_status->status,
5482                            tnapi->hw_status->status_tag,
5483                            tnapi->hw_status->rx_jumbo_consumer,
5484                            tnapi->hw_status->rx_consumer,
5485                            tnapi->hw_status->rx_mini_consumer,
5486                            tnapi->hw_status->idx[0].rx_producer,
5487                            tnapi->hw_status->idx[0].tx_consumer);
5488
5489                 netdev_err(tp->dev,
5490                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5491                            i,
5492                            tnapi->last_tag, tnapi->last_irq_tag,
5493                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5494                            tnapi->rx_rcb_ptr,
5495                            tnapi->prodring.rx_std_prod_idx,
5496                            tnapi->prodring.rx_std_cons_idx,
5497                            tnapi->prodring.rx_jmb_prod_idx,
5498                            tnapi->prodring.rx_jmb_cons_idx);
5499         }
5500 }
5501
5502 /* This is called whenever we suspect that the system chipset is re-
5503  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5504  * is bogus tx completions. We try to recover by setting the
5505  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5506  * in the workqueue.
5507  */
5508 static void tg3_tx_recover(struct tg3 *tp)
5509 {
5510         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5511                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5512
5513         netdev_warn(tp->dev,
5514                     "The system may be re-ordering memory-mapped I/O "
5515                     "cycles to the network device, attempting to recover. "
5516                     "Please report the problem to the driver maintainer "
5517                     "and include system chipset information.\n");
5518
5519         spin_lock(&tp->lock);
5520         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5521         spin_unlock(&tp->lock);
5522 }
5523
5524 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5525 {
5526         /* Tell compiler to fetch tx indices from memory. */
5527         barrier();
5528         return tnapi->tx_pending -
5529                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5530 }
5531
5532 /* Tigon3 never reports partial packet sends.  So we do not
5533  * need special logic to handle SKBs that have not had all
5534  * of their frags sent yet, like SunGEM does.
5535  */
5536 static void tg3_tx(struct tg3_napi *tnapi)
5537 {
5538         struct tg3 *tp = tnapi->tp;
5539         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5540         u32 sw_idx = tnapi->tx_cons;
5541         struct netdev_queue *txq;
5542         int index = tnapi - tp->napi;
5543         unsigned int pkts_compl = 0, bytes_compl = 0;
5544
5545         if (tg3_flag(tp, ENABLE_TSS))
5546                 index--;
5547
5548         txq = netdev_get_tx_queue(tp->dev, index);
5549
5550         while (sw_idx != hw_idx) {
5551                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5552                 struct sk_buff *skb = ri->skb;
5553                 int i, tx_bug = 0;
5554
5555                 if (unlikely(skb == NULL)) {
5556                         tg3_tx_recover(tp);
5557                         return;
5558                 }
5559
5560                 pci_unmap_single(tp->pdev,
5561                                  dma_unmap_addr(ri, mapping),
5562                                  skb_headlen(skb),
5563                                  PCI_DMA_TODEVICE);
5564
5565                 ri->skb = NULL;
5566
5567                 while (ri->fragmented) {
5568                         ri->fragmented = false;
5569                         sw_idx = NEXT_TX(sw_idx);
5570                         ri = &tnapi->tx_buffers[sw_idx];
5571                 }
5572
5573                 sw_idx = NEXT_TX(sw_idx);
5574
5575                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5576                         ri = &tnapi->tx_buffers[sw_idx];
5577                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5578                                 tx_bug = 1;
5579
5580                         pci_unmap_page(tp->pdev,
5581                                        dma_unmap_addr(ri, mapping),
5582                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5583                                        PCI_DMA_TODEVICE);
5584
5585                         while (ri->fragmented) {
5586                                 ri->fragmented = false;
5587                                 sw_idx = NEXT_TX(sw_idx);
5588                                 ri = &tnapi->tx_buffers[sw_idx];
5589                         }
5590
5591                         sw_idx = NEXT_TX(sw_idx);
5592                 }
5593
5594                 pkts_compl++;
5595                 bytes_compl += skb->len;
5596
5597                 dev_kfree_skb(skb);
5598
5599                 if (unlikely(tx_bug)) {
5600                         tg3_tx_recover(tp);
5601                         return;
5602                 }
5603         }
5604
5605         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5606
5607         tnapi->tx_cons = sw_idx;
5608
5609         /* Need to make the tx_cons update visible to tg3_start_xmit()
5610          * before checking for netif_queue_stopped().  Without the
5611          * memory barrier, there is a small possibility that tg3_start_xmit()
5612          * will miss it and cause the queue to be stopped forever.
5613          */
5614         smp_mb();
5615
5616         if (unlikely(netif_tx_queue_stopped(txq) &&
5617                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5618                 __netif_tx_lock(txq, smp_processor_id());
5619                 if (netif_tx_queue_stopped(txq) &&
5620                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5621                         netif_tx_wake_queue(txq);
5622                 __netif_tx_unlock(txq);
5623         }
5624 }
5625
5626 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5627 {
5628         if (!ri->data)
5629                 return;
5630
5631         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5632                          map_sz, PCI_DMA_FROMDEVICE);
5633         kfree(ri->data);
5634         ri->data = NULL;
5635 }
5636
5637 /* Returns size of skb allocated or < 0 on error.
5638  *
5639  * We only need to fill in the address because the other members
5640  * of the RX descriptor are invariant, see tg3_init_rings.
5641  *
5642  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5643  * posting buffers we only dirty the first cache line of the RX
5644  * descriptor (containing the address).  Whereas for the RX status
5645  * buffers the cpu only reads the last cacheline of the RX descriptor
5646  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5647  */
5648 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5649                             u32 opaque_key, u32 dest_idx_unmasked)
5650 {
5651         struct tg3_rx_buffer_desc *desc;
5652         struct ring_info *map;
5653         u8 *data;
5654         dma_addr_t mapping;
5655         int skb_size, data_size, dest_idx;
5656
5657         switch (opaque_key) {
5658         case RXD_OPAQUE_RING_STD:
5659                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5660                 desc = &tpr->rx_std[dest_idx];
5661                 map = &tpr->rx_std_buffers[dest_idx];
5662                 data_size = tp->rx_pkt_map_sz;
5663                 break;
5664
5665         case RXD_OPAQUE_RING_JUMBO:
5666                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5667                 desc = &tpr->rx_jmb[dest_idx].std;
5668                 map = &tpr->rx_jmb_buffers[dest_idx];
5669                 data_size = TG3_RX_JMB_MAP_SZ;
5670                 break;
5671
5672         default:
5673                 return -EINVAL;
5674         }
5675
5676         /* Do not overwrite any of the map or rp information
5677          * until we are sure we can commit to a new buffer.
5678          *
5679          * Callers depend upon this behavior and assume that
5680          * we leave everything unchanged if we fail.
5681          */
5682         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5683                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5684         data = kmalloc(skb_size, GFP_ATOMIC);
5685         if (!data)
5686                 return -ENOMEM;
5687
5688         mapping = pci_map_single(tp->pdev,
5689                                  data + TG3_RX_OFFSET(tp),
5690                                  data_size,
5691                                  PCI_DMA_FROMDEVICE);
5692         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5693                 kfree(data);
5694                 return -EIO;
5695         }
5696
5697         map->data = data;
5698         dma_unmap_addr_set(map, mapping, mapping);
5699
5700         desc->addr_hi = ((u64)mapping >> 32);
5701         desc->addr_lo = ((u64)mapping & 0xffffffff);
5702
5703         return data_size;
5704 }
5705
5706 /* We only need to move over in the address because the other
5707  * members of the RX descriptor are invariant.  See notes above
5708  * tg3_alloc_rx_data for full details.
5709  */
5710 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5711                            struct tg3_rx_prodring_set *dpr,
5712                            u32 opaque_key, int src_idx,
5713                            u32 dest_idx_unmasked)
5714 {
5715         struct tg3 *tp = tnapi->tp;
5716         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5717         struct ring_info *src_map, *dest_map;
5718         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5719         int dest_idx;
5720
5721         switch (opaque_key) {
5722         case RXD_OPAQUE_RING_STD:
5723                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5724                 dest_desc = &dpr->rx_std[dest_idx];
5725                 dest_map = &dpr->rx_std_buffers[dest_idx];
5726                 src_desc = &spr->rx_std[src_idx];
5727                 src_map = &spr->rx_std_buffers[src_idx];
5728                 break;
5729
5730         case RXD_OPAQUE_RING_JUMBO:
5731                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5732                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5733                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5734                 src_desc = &spr->rx_jmb[src_idx].std;
5735                 src_map = &spr->rx_jmb_buffers[src_idx];
5736                 break;
5737
5738         default:
5739                 return;
5740         }
5741
5742         dest_map->data = src_map->data;
5743         dma_unmap_addr_set(dest_map, mapping,
5744                            dma_unmap_addr(src_map, mapping));
5745         dest_desc->addr_hi = src_desc->addr_hi;
5746         dest_desc->addr_lo = src_desc->addr_lo;
5747
5748         /* Ensure that the update to the skb happens after the physical
5749          * addresses have been transferred to the new BD location.
5750          */
5751         smp_wmb();
5752
5753         src_map->data = NULL;
5754 }
5755
5756 /* The RX ring scheme is composed of multiple rings which post fresh
5757  * buffers to the chip, and one special ring the chip uses to report
5758  * status back to the host.
5759  *
5760  * The special ring reports the status of received packets to the
5761  * host.  The chip does not write into the original descriptor the
5762  * RX buffer was obtained from.  The chip simply takes the original
5763  * descriptor as provided by the host, updates the status and length
5764  * field, then writes this into the next status ring entry.
5765  *
5766  * Each ring the host uses to post buffers to the chip is described
5767  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5768  * it is first placed into the on-chip ram.  When the packet's length
5769  * is known, it walks down the TG3_BDINFO entries to select the ring.
5770  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5771  * which is within the range of the new packet's length is chosen.
5772  *
5773  * The "separate ring for rx status" scheme may sound queer, but it makes
5774  * sense from a cache coherency perspective.  If only the host writes
5775  * to the buffer post rings, and only the chip writes to the rx status
5776  * rings, then cache lines never move beyond shared-modified state.
5777  * If both the host and chip were to write into the same ring, cache line
5778  * eviction could occur since both entities want it in an exclusive state.
5779  */
5780 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5781 {
5782         struct tg3 *tp = tnapi->tp;
5783         u32 work_mask, rx_std_posted = 0;
5784         u32 std_prod_idx, jmb_prod_idx;
5785         u32 sw_idx = tnapi->rx_rcb_ptr;
5786         u16 hw_idx;
5787         int received;
5788         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5789
5790         hw_idx = *(tnapi->rx_rcb_prod_idx);
5791         /*
5792          * We need to order the read of hw_idx and the read of
5793          * the opaque cookie.
5794          */
5795         rmb();
5796         work_mask = 0;
5797         received = 0;
5798         std_prod_idx = tpr->rx_std_prod_idx;
5799         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5800         while (sw_idx != hw_idx && budget > 0) {
5801                 struct ring_info *ri;
5802                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5803                 unsigned int len;
5804                 struct sk_buff *skb;
5805                 dma_addr_t dma_addr;
5806                 u32 opaque_key, desc_idx, *post_ptr;
5807                 u8 *data;
5808
5809                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5810                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5811                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5812                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5813                         dma_addr = dma_unmap_addr(ri, mapping);
5814                         data = ri->data;
5815                         post_ptr = &std_prod_idx;
5816                         rx_std_posted++;
5817                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5818                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5819                         dma_addr = dma_unmap_addr(ri, mapping);
5820                         data = ri->data;
5821                         post_ptr = &jmb_prod_idx;
5822                 } else
5823                         goto next_pkt_nopost;
5824
5825                 work_mask |= opaque_key;
5826
5827                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5828                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5829                 drop_it:
5830                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5831                                        desc_idx, *post_ptr);
5832                 drop_it_no_recycle:
5833                         /* Other statistics kept track of by card. */
5834                         tp->rx_dropped++;
5835                         goto next_pkt;
5836                 }
5837
5838                 prefetch(data + TG3_RX_OFFSET(tp));
5839                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5840                       ETH_FCS_LEN;
5841
5842                 if (len > TG3_RX_COPY_THRESH(tp)) {
5843                         int skb_size;
5844
5845                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5846                                                     *post_ptr);
5847                         if (skb_size < 0)
5848                                 goto drop_it;
5849
5850                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5851                                          PCI_DMA_FROMDEVICE);
5852
5853                         skb = build_skb(data);
5854                         if (!skb) {
5855                                 kfree(data);
5856                                 goto drop_it_no_recycle;
5857                         }
5858                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5859                         /* Ensure that the update to the data happens
5860                          * after the usage of the old DMA mapping.
5861                          */
5862                         smp_wmb();
5863
5864                         ri->data = NULL;
5865
5866                 } else {
5867                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5868                                        desc_idx, *post_ptr);
5869
5870                         skb = netdev_alloc_skb(tp->dev,
5871                                                len + TG3_RAW_IP_ALIGN);
5872                         if (skb == NULL)
5873                                 goto drop_it_no_recycle;
5874
5875                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5876                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5877                         memcpy(skb->data,
5878                                data + TG3_RX_OFFSET(tp),
5879                                len);
5880                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5881                 }
5882
5883                 skb_put(skb, len);
5884                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5885                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5886                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5887                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5888                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5889                 else
5890                         skb_checksum_none_assert(skb);
5891
5892                 skb->protocol = eth_type_trans(skb, tp->dev);
5893
5894                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5895                     skb->protocol != htons(ETH_P_8021Q)) {
5896                         dev_kfree_skb(skb);
5897                         goto drop_it_no_recycle;
5898                 }
5899
5900                 if (desc->type_flags & RXD_FLAG_VLAN &&
5901                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5902                         __vlan_hwaccel_put_tag(skb,
5903                                                desc->err_vlan & RXD_VLAN_MASK);
5904
5905                 napi_gro_receive(&tnapi->napi, skb);
5906
5907                 received++;
5908                 budget--;
5909
5910 next_pkt:
5911                 (*post_ptr)++;
5912
5913                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5914                         tpr->rx_std_prod_idx = std_prod_idx &
5915                                                tp->rx_std_ring_mask;
5916                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5917                                      tpr->rx_std_prod_idx);
5918                         work_mask &= ~RXD_OPAQUE_RING_STD;
5919                         rx_std_posted = 0;
5920                 }
5921 next_pkt_nopost:
5922                 sw_idx++;
5923                 sw_idx &= tp->rx_ret_ring_mask;
5924
5925                 /* Refresh hw_idx to see if there is new work */
5926                 if (sw_idx == hw_idx) {
5927                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5928                         rmb();
5929                 }
5930         }
5931
5932         /* ACK the status ring. */
5933         tnapi->rx_rcb_ptr = sw_idx;
5934         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5935
5936         /* Refill RX ring(s). */
5937         if (!tg3_flag(tp, ENABLE_RSS)) {
5938                 /* Sync BD data before updating mailbox */
5939                 wmb();
5940
5941                 if (work_mask & RXD_OPAQUE_RING_STD) {
5942                         tpr->rx_std_prod_idx = std_prod_idx &
5943                                                tp->rx_std_ring_mask;
5944                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5945                                      tpr->rx_std_prod_idx);
5946                 }
5947                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5948                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5949                                                tp->rx_jmb_ring_mask;
5950                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5951                                      tpr->rx_jmb_prod_idx);
5952                 }
5953                 mmiowb();
5954         } else if (work_mask) {
5955                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5956                  * updated before the producer indices can be updated.
5957                  */
5958                 smp_wmb();
5959
5960                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5961                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5962
5963                 if (tnapi != &tp->napi[1]) {
5964                         tp->rx_refill = true;
5965                         napi_schedule(&tp->napi[1].napi);
5966                 }
5967         }
5968
5969         return received;
5970 }
5971
5972 static void tg3_poll_link(struct tg3 *tp)
5973 {
5974         /* handle link change and other phy events */
5975         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5976                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5977
5978                 if (sblk->status & SD_STATUS_LINK_CHG) {
5979                         sblk->status = SD_STATUS_UPDATED |
5980                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5981                         spin_lock(&tp->lock);
5982                         if (tg3_flag(tp, USE_PHYLIB)) {
5983                                 tw32_f(MAC_STATUS,
5984                                      (MAC_STATUS_SYNC_CHANGED |
5985                                       MAC_STATUS_CFG_CHANGED |
5986                                       MAC_STATUS_MI_COMPLETION |
5987                                       MAC_STATUS_LNKSTATE_CHANGED));
5988                                 udelay(40);
5989                         } else
5990                                 tg3_setup_phy(tp, 0);
5991                         spin_unlock(&tp->lock);
5992                 }
5993         }
5994 }
5995
5996 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5997                                 struct tg3_rx_prodring_set *dpr,
5998                                 struct tg3_rx_prodring_set *spr)
5999 {
6000         u32 si, di, cpycnt, src_prod_idx;
6001         int i, err = 0;
6002
6003         while (1) {
6004                 src_prod_idx = spr->rx_std_prod_idx;
6005
6006                 /* Make sure updates to the rx_std_buffers[] entries and the
6007                  * standard producer index are seen in the correct order.
6008                  */
6009                 smp_rmb();
6010
6011                 if (spr->rx_std_cons_idx == src_prod_idx)
6012                         break;
6013
6014                 if (spr->rx_std_cons_idx < src_prod_idx)
6015                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6016                 else
6017                         cpycnt = tp->rx_std_ring_mask + 1 -
6018                                  spr->rx_std_cons_idx;
6019
6020                 cpycnt = min(cpycnt,
6021                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6022
6023                 si = spr->rx_std_cons_idx;
6024                 di = dpr->rx_std_prod_idx;
6025
6026                 for (i = di; i < di + cpycnt; i++) {
6027                         if (dpr->rx_std_buffers[i].data) {
6028                                 cpycnt = i - di;
6029                                 err = -ENOSPC;
6030                                 break;
6031                         }
6032                 }
6033
6034                 if (!cpycnt)
6035                         break;
6036
6037                 /* Ensure that updates to the rx_std_buffers ring and the
6038                  * shadowed hardware producer ring from tg3_recycle_skb() are
6039                  * ordered correctly WRT the skb check above.
6040                  */
6041                 smp_rmb();
6042
6043                 memcpy(&dpr->rx_std_buffers[di],
6044                        &spr->rx_std_buffers[si],
6045                        cpycnt * sizeof(struct ring_info));
6046
6047                 for (i = 0; i < cpycnt; i++, di++, si++) {
6048                         struct tg3_rx_buffer_desc *sbd, *dbd;
6049                         sbd = &spr->rx_std[si];
6050                         dbd = &dpr->rx_std[di];
6051                         dbd->addr_hi = sbd->addr_hi;
6052                         dbd->addr_lo = sbd->addr_lo;
6053                 }
6054
6055                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6056                                        tp->rx_std_ring_mask;
6057                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6058                                        tp->rx_std_ring_mask;
6059         }
6060
6061         while (1) {
6062                 src_prod_idx = spr->rx_jmb_prod_idx;
6063
6064                 /* Make sure updates to the rx_jmb_buffers[] entries and
6065                  * the jumbo producer index are seen in the correct order.
6066                  */
6067                 smp_rmb();
6068
6069                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6070                         break;
6071
6072                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6073                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6074                 else
6075                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6076                                  spr->rx_jmb_cons_idx;
6077
6078                 cpycnt = min(cpycnt,
6079                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6080
6081                 si = spr->rx_jmb_cons_idx;
6082                 di = dpr->rx_jmb_prod_idx;
6083
6084                 for (i = di; i < di + cpycnt; i++) {
6085                         if (dpr->rx_jmb_buffers[i].data) {
6086                                 cpycnt = i - di;
6087                                 err = -ENOSPC;
6088                                 break;
6089                         }
6090                 }
6091
6092                 if (!cpycnt)
6093                         break;
6094
6095                 /* Ensure that updates to the rx_jmb_buffers ring and the
6096                  * shadowed hardware producer ring from tg3_recycle_skb() are
6097                  * ordered correctly WRT the skb check above.
6098                  */
6099                 smp_rmb();
6100
6101                 memcpy(&dpr->rx_jmb_buffers[di],
6102                        &spr->rx_jmb_buffers[si],
6103                        cpycnt * sizeof(struct ring_info));
6104
6105                 for (i = 0; i < cpycnt; i++, di++, si++) {
6106                         struct tg3_rx_buffer_desc *sbd, *dbd;
6107                         sbd = &spr->rx_jmb[si].std;
6108                         dbd = &dpr->rx_jmb[di].std;
6109                         dbd->addr_hi = sbd->addr_hi;
6110                         dbd->addr_lo = sbd->addr_lo;
6111                 }
6112
6113                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6114                                        tp->rx_jmb_ring_mask;
6115                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6116                                        tp->rx_jmb_ring_mask;
6117         }
6118
6119         return err;
6120 }
6121
6122 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6123 {
6124         struct tg3 *tp = tnapi->tp;
6125
6126         /* run TX completion thread */
6127         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6128                 tg3_tx(tnapi);
6129                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6130                         return work_done;
6131         }
6132
6133         if (!tnapi->rx_rcb_prod_idx)
6134                 return work_done;
6135
6136         /* run RX thread, within the bounds set by NAPI.
6137          * All RX "locking" is done by ensuring outside
6138          * code synchronizes with tg3->napi.poll()
6139          */
6140         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6141                 work_done += tg3_rx(tnapi, budget - work_done);
6142
6143         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6144                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6145                 int i, err = 0;
6146                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6147                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6148
6149                 tp->rx_refill = false;
6150                 for (i = 1; i < tp->irq_cnt; i++)
6151                         err |= tg3_rx_prodring_xfer(tp, dpr,
6152                                                     &tp->napi[i].prodring);
6153
6154                 wmb();
6155
6156                 if (std_prod_idx != dpr->rx_std_prod_idx)
6157                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6158                                      dpr->rx_std_prod_idx);
6159
6160                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6161                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6162                                      dpr->rx_jmb_prod_idx);
6163
6164                 mmiowb();
6165
6166                 if (err)
6167                         tw32_f(HOSTCC_MODE, tp->coal_now);
6168         }
6169
6170         return work_done;
6171 }
6172
6173 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6174 {
6175         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6176                 schedule_work(&tp->reset_task);
6177 }
6178
6179 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6180 {
6181         cancel_work_sync(&tp->reset_task);
6182         tg3_flag_clear(tp, RESET_TASK_PENDING);
6183         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6184 }
6185
6186 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6187 {
6188         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6189         struct tg3 *tp = tnapi->tp;
6190         int work_done = 0;
6191         struct tg3_hw_status *sblk = tnapi->hw_status;
6192
6193         while (1) {
6194                 work_done = tg3_poll_work(tnapi, work_done, budget);
6195
6196                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6197                         goto tx_recovery;
6198
6199                 if (unlikely(work_done >= budget))
6200                         break;
6201
6202                 /* tp->last_tag is used in tg3_int_reenable() below
6203                  * to tell the hw how much work has been processed,
6204                  * so we must read it before checking for more work.
6205                  */
6206                 tnapi->last_tag = sblk->status_tag;
6207                 tnapi->last_irq_tag = tnapi->last_tag;
6208                 rmb();
6209
6210                 /* check for RX/TX work to do */
6211                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6212                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6213
6214                         /* This test here is not race free, but will reduce
6215                          * the number of interrupts by looping again.
6216                          */
6217                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6218                                 continue;
6219
6220                         napi_complete(napi);
6221                         /* Reenable interrupts. */
6222                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6223
6224                         /* This test here is synchronized by napi_schedule()
6225                          * and napi_complete() to close the race condition.
6226                          */
6227                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6228                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6229                                                   HOSTCC_MODE_ENABLE |
6230                                                   tnapi->coal_now);
6231                         }
6232                         mmiowb();
6233                         break;
6234                 }
6235         }
6236
6237         return work_done;
6238
6239 tx_recovery:
6240         /* work_done is guaranteed to be less than budget. */
6241         napi_complete(napi);
6242         tg3_reset_task_schedule(tp);
6243         return work_done;
6244 }
6245
6246 static void tg3_process_error(struct tg3 *tp)
6247 {
6248         u32 val;
6249         bool real_error = false;
6250
6251         if (tg3_flag(tp, ERROR_PROCESSED))
6252                 return;
6253
6254         /* Check Flow Attention register */
6255         val = tr32(HOSTCC_FLOW_ATTN);
6256         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6257                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6258                 real_error = true;
6259         }
6260
6261         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6262                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6263                 real_error = true;
6264         }
6265
6266         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6267                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6268                 real_error = true;
6269         }
6270
6271         if (!real_error)
6272                 return;
6273
6274         tg3_dump_state(tp);
6275
6276         tg3_flag_set(tp, ERROR_PROCESSED);
6277         tg3_reset_task_schedule(tp);
6278 }
6279
6280 static int tg3_poll(struct napi_struct *napi, int budget)
6281 {
6282         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6283         struct tg3 *tp = tnapi->tp;
6284         int work_done = 0;
6285         struct tg3_hw_status *sblk = tnapi->hw_status;
6286
6287         while (1) {
6288                 if (sblk->status & SD_STATUS_ERROR)
6289                         tg3_process_error(tp);
6290
6291                 tg3_poll_link(tp);
6292
6293                 work_done = tg3_poll_work(tnapi, work_done, budget);
6294
6295                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6296                         goto tx_recovery;
6297
6298                 if (unlikely(work_done >= budget))
6299                         break;
6300
6301                 if (tg3_flag(tp, TAGGED_STATUS)) {
6302                         /* tp->last_tag is used in tg3_int_reenable() below
6303                          * to tell the hw how much work has been processed,
6304                          * so we must read it before checking for more work.
6305                          */
6306                         tnapi->last_tag = sblk->status_tag;
6307                         tnapi->last_irq_tag = tnapi->last_tag;
6308                         rmb();
6309                 } else
6310                         sblk->status &= ~SD_STATUS_UPDATED;
6311
6312                 if (likely(!tg3_has_work(tnapi))) {
6313                         napi_complete(napi);
6314                         tg3_int_reenable(tnapi);
6315                         break;
6316                 }
6317         }
6318
6319         return work_done;
6320
6321 tx_recovery:
6322         /* work_done is guaranteed to be less than budget. */
6323         napi_complete(napi);
6324         tg3_reset_task_schedule(tp);
6325         return work_done;
6326 }
6327
6328 static void tg3_napi_disable(struct tg3 *tp)
6329 {
6330         int i;
6331
6332         for (i = tp->irq_cnt - 1; i >= 0; i--)
6333                 napi_disable(&tp->napi[i].napi);
6334 }
6335
6336 static void tg3_napi_enable(struct tg3 *tp)
6337 {
6338         int i;
6339
6340         for (i = 0; i < tp->irq_cnt; i++)
6341                 napi_enable(&tp->napi[i].napi);
6342 }
6343
6344 static void tg3_napi_init(struct tg3 *tp)
6345 {
6346         int i;
6347
6348         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6349         for (i = 1; i < tp->irq_cnt; i++)
6350                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6351 }
6352
6353 static void tg3_napi_fini(struct tg3 *tp)
6354 {
6355         int i;
6356
6357         for (i = 0; i < tp->irq_cnt; i++)
6358                 netif_napi_del(&tp->napi[i].napi);
6359 }
6360
6361 static inline void tg3_netif_stop(struct tg3 *tp)
6362 {
6363         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6364         tg3_napi_disable(tp);
6365         netif_tx_disable(tp->dev);
6366 }
6367
6368 static inline void tg3_netif_start(struct tg3 *tp)
6369 {
6370         /* NOTE: unconditional netif_tx_wake_all_queues is only
6371          * appropriate so long as all callers are assured to
6372          * have free tx slots (such as after tg3_init_hw)
6373          */
6374         netif_tx_wake_all_queues(tp->dev);
6375
6376         tg3_napi_enable(tp);
6377         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6378         tg3_enable_ints(tp);
6379 }
6380
6381 static void tg3_irq_quiesce(struct tg3 *tp)
6382 {
6383         int i;
6384
6385         BUG_ON(tp->irq_sync);
6386
6387         tp->irq_sync = 1;
6388         smp_mb();
6389
6390         for (i = 0; i < tp->irq_cnt; i++)
6391                 synchronize_irq(tp->napi[i].irq_vec);
6392 }
6393
6394 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6395  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6396  * with as well.  Most of the time, this is not necessary except when
6397  * shutting down the device.
6398  */
6399 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6400 {
6401         spin_lock_bh(&tp->lock);
6402         if (irq_sync)
6403                 tg3_irq_quiesce(tp);
6404 }
6405
6406 static inline void tg3_full_unlock(struct tg3 *tp)
6407 {
6408         spin_unlock_bh(&tp->lock);
6409 }
6410
6411 /* One-shot MSI handler - Chip automatically disables interrupt
6412  * after sending MSI so driver doesn't have to do it.
6413  */
6414 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6415 {
6416         struct tg3_napi *tnapi = dev_id;
6417         struct tg3 *tp = tnapi->tp;
6418
6419         prefetch(tnapi->hw_status);
6420         if (tnapi->rx_rcb)
6421                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6422
6423         if (likely(!tg3_irq_sync(tp)))
6424                 napi_schedule(&tnapi->napi);
6425
6426         return IRQ_HANDLED;
6427 }
6428
6429 /* MSI ISR - No need to check for interrupt sharing and no need to
6430  * flush status block and interrupt mailbox. PCI ordering rules
6431  * guarantee that MSI will arrive after the status block.
6432  */
6433 static irqreturn_t tg3_msi(int irq, void *dev_id)
6434 {
6435         struct tg3_napi *tnapi = dev_id;
6436         struct tg3 *tp = tnapi->tp;
6437
6438         prefetch(tnapi->hw_status);
6439         if (tnapi->rx_rcb)
6440                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6441         /*
6442          * Writing any value to intr-mbox-0 clears PCI INTA# and
6443          * chip-internal interrupt pending events.
6444          * Writing non-zero to intr-mbox-0 additional tells the
6445          * NIC to stop sending us irqs, engaging "in-intr-handler"
6446          * event coalescing.
6447          */
6448         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6449         if (likely(!tg3_irq_sync(tp)))
6450                 napi_schedule(&tnapi->napi);
6451
6452         return IRQ_RETVAL(1);
6453 }
6454
6455 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6456 {
6457         struct tg3_napi *tnapi = dev_id;
6458         struct tg3 *tp = tnapi->tp;
6459         struct tg3_hw_status *sblk = tnapi->hw_status;
6460         unsigned int handled = 1;
6461
6462         /* In INTx mode, it is possible for the interrupt to arrive at
6463          * the CPU before the status block posted prior to the interrupt.
6464          * Reading the PCI State register will confirm whether the
6465          * interrupt is ours and will flush the status block.
6466          */
6467         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6468                 if (tg3_flag(tp, CHIP_RESETTING) ||
6469                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6470                         handled = 0;
6471                         goto out;
6472                 }
6473         }
6474
6475         /*
6476          * Writing any value to intr-mbox-0 clears PCI INTA# and
6477          * chip-internal interrupt pending events.
6478          * Writing non-zero to intr-mbox-0 additional tells the
6479          * NIC to stop sending us irqs, engaging "in-intr-handler"
6480          * event coalescing.
6481          *
6482          * Flush the mailbox to de-assert the IRQ immediately to prevent
6483          * spurious interrupts.  The flush impacts performance but
6484          * excessive spurious interrupts can be worse in some cases.
6485          */
6486         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6487         if (tg3_irq_sync(tp))
6488                 goto out;
6489         sblk->status &= ~SD_STATUS_UPDATED;
6490         if (likely(tg3_has_work(tnapi))) {
6491                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6492                 napi_schedule(&tnapi->napi);
6493         } else {
6494                 /* No work, shared interrupt perhaps?  re-enable
6495                  * interrupts, and flush that PCI write
6496                  */
6497                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6498                                0x00000000);
6499         }
6500 out:
6501         return IRQ_RETVAL(handled);
6502 }
6503
6504 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6505 {
6506         struct tg3_napi *tnapi = dev_id;
6507         struct tg3 *tp = tnapi->tp;
6508         struct tg3_hw_status *sblk = tnapi->hw_status;
6509         unsigned int handled = 1;
6510
6511         /* In INTx mode, it is possible for the interrupt to arrive at
6512          * the CPU before the status block posted prior to the interrupt.
6513          * Reading the PCI State register will confirm whether the
6514          * interrupt is ours and will flush the status block.
6515          */
6516         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6517                 if (tg3_flag(tp, CHIP_RESETTING) ||
6518                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6519                         handled = 0;
6520                         goto out;
6521                 }
6522         }
6523
6524         /*
6525          * writing any value to intr-mbox-0 clears PCI INTA# and
6526          * chip-internal interrupt pending events.
6527          * writing non-zero to intr-mbox-0 additional tells the
6528          * NIC to stop sending us irqs, engaging "in-intr-handler"
6529          * event coalescing.
6530          *
6531          * Flush the mailbox to de-assert the IRQ immediately to prevent
6532          * spurious interrupts.  The flush impacts performance but
6533          * excessive spurious interrupts can be worse in some cases.
6534          */
6535         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6536
6537         /*
6538          * In a shared interrupt configuration, sometimes other devices'
6539          * interrupts will scream.  We record the current status tag here
6540          * so that the above check can report that the screaming interrupts
6541          * are unhandled.  Eventually they will be silenced.
6542          */
6543         tnapi->last_irq_tag = sblk->status_tag;
6544
6545         if (tg3_irq_sync(tp))
6546                 goto out;
6547
6548         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6549
6550         napi_schedule(&tnapi->napi);
6551
6552 out:
6553         return IRQ_RETVAL(handled);
6554 }
6555
6556 /* ISR for interrupt test */
6557 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6558 {
6559         struct tg3_napi *tnapi = dev_id;
6560         struct tg3 *tp = tnapi->tp;
6561         struct tg3_hw_status *sblk = tnapi->hw_status;
6562
6563         if ((sblk->status & SD_STATUS_UPDATED) ||
6564             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6565                 tg3_disable_ints(tp);
6566                 return IRQ_RETVAL(1);
6567         }
6568         return IRQ_RETVAL(0);
6569 }
6570
6571 #ifdef CONFIG_NET_POLL_CONTROLLER
6572 static void tg3_poll_controller(struct net_device *dev)
6573 {
6574         int i;
6575         struct tg3 *tp = netdev_priv(dev);
6576
6577         for (i = 0; i < tp->irq_cnt; i++)
6578                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6579 }
6580 #endif
6581
6582 static void tg3_tx_timeout(struct net_device *dev)
6583 {
6584         struct tg3 *tp = netdev_priv(dev);
6585
6586         if (netif_msg_tx_err(tp)) {
6587                 netdev_err(dev, "transmit timed out, resetting\n");
6588                 tg3_dump_state(tp);
6589         }
6590
6591         tg3_reset_task_schedule(tp);
6592 }
6593
6594 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6595 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6596 {
6597         u32 base = (u32) mapping & 0xffffffff;
6598
6599         return (base > 0xffffdcc0) && (base + len + 8 < base);
6600 }
6601
6602 /* Test for DMA addresses > 40-bit */
6603 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6604                                           int len)
6605 {
6606 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6607         if (tg3_flag(tp, 40BIT_DMA_BUG))
6608                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6609         return 0;
6610 #else
6611         return 0;
6612 #endif
6613 }
6614
6615 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6616                                  dma_addr_t mapping, u32 len, u32 flags,
6617                                  u32 mss, u32 vlan)
6618 {
6619         txbd->addr_hi = ((u64) mapping >> 32);
6620         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6621         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6622         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6623 }
6624
6625 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6626                             dma_addr_t map, u32 len, u32 flags,
6627                             u32 mss, u32 vlan)
6628 {
6629         struct tg3 *tp = tnapi->tp;
6630         bool hwbug = false;
6631
6632         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6633                 hwbug = true;
6634
6635         if (tg3_4g_overflow_test(map, len))
6636                 hwbug = true;
6637
6638         if (tg3_40bit_overflow_test(tp, map, len))
6639                 hwbug = true;
6640
6641         if (tp->dma_limit) {
6642                 u32 prvidx = *entry;
6643                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6644                 while (len > tp->dma_limit && *budget) {
6645                         u32 frag_len = tp->dma_limit;
6646                         len -= tp->dma_limit;
6647
6648                         /* Avoid the 8byte DMA problem */
6649                         if (len <= 8) {
6650                                 len += tp->dma_limit / 2;
6651                                 frag_len = tp->dma_limit / 2;
6652                         }
6653
6654                         tnapi->tx_buffers[*entry].fragmented = true;
6655
6656                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6657                                       frag_len, tmp_flag, mss, vlan);
6658                         *budget -= 1;
6659                         prvidx = *entry;
6660                         *entry = NEXT_TX(*entry);
6661
6662                         map += frag_len;
6663                 }
6664
6665                 if (len) {
6666                         if (*budget) {
6667                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6668                                               len, flags, mss, vlan);
6669                                 *budget -= 1;
6670                                 *entry = NEXT_TX(*entry);
6671                         } else {
6672                                 hwbug = true;
6673                                 tnapi->tx_buffers[prvidx].fragmented = false;
6674                         }
6675                 }
6676         } else {
6677                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6678                               len, flags, mss, vlan);
6679                 *entry = NEXT_TX(*entry);
6680         }
6681
6682         return hwbug;
6683 }
6684
6685 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6686 {
6687         int i;
6688         struct sk_buff *skb;
6689         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6690
6691         skb = txb->skb;
6692         txb->skb = NULL;
6693
6694         pci_unmap_single(tnapi->tp->pdev,
6695                          dma_unmap_addr(txb, mapping),
6696                          skb_headlen(skb),
6697                          PCI_DMA_TODEVICE);
6698
6699         while (txb->fragmented) {
6700                 txb->fragmented = false;
6701                 entry = NEXT_TX(entry);
6702                 txb = &tnapi->tx_buffers[entry];
6703         }
6704
6705         for (i = 0; i <= last; i++) {
6706                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6707
6708                 entry = NEXT_TX(entry);
6709                 txb = &tnapi->tx_buffers[entry];
6710
6711                 pci_unmap_page(tnapi->tp->pdev,
6712                                dma_unmap_addr(txb, mapping),
6713                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6714
6715                 while (txb->fragmented) {
6716                         txb->fragmented = false;
6717                         entry = NEXT_TX(entry);
6718                         txb = &tnapi->tx_buffers[entry];
6719                 }
6720         }
6721 }
6722
6723 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6724 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6725                                        struct sk_buff **pskb,
6726                                        u32 *entry, u32 *budget,
6727                                        u32 base_flags, u32 mss, u32 vlan)
6728 {
6729         struct tg3 *tp = tnapi->tp;
6730         struct sk_buff *new_skb, *skb = *pskb;
6731         dma_addr_t new_addr = 0;
6732         int ret = 0;
6733
6734         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6735                 new_skb = skb_copy(skb, GFP_ATOMIC);
6736         else {
6737                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6738
6739                 new_skb = skb_copy_expand(skb,
6740                                           skb_headroom(skb) + more_headroom,
6741                                           skb_tailroom(skb), GFP_ATOMIC);
6742         }
6743
6744         if (!new_skb) {
6745                 ret = -1;
6746         } else {
6747                 /* New SKB is guaranteed to be linear. */
6748                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6749                                           PCI_DMA_TODEVICE);
6750                 /* Make sure the mapping succeeded */
6751                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6752                         dev_kfree_skb(new_skb);
6753                         ret = -1;
6754                 } else {
6755                         u32 save_entry = *entry;
6756
6757                         base_flags |= TXD_FLAG_END;
6758
6759                         tnapi->tx_buffers[*entry].skb = new_skb;
6760                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6761                                            mapping, new_addr);
6762
6763                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6764                                             new_skb->len, base_flags,
6765                                             mss, vlan)) {
6766                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6767                                 dev_kfree_skb(new_skb);
6768                                 ret = -1;
6769                         }
6770                 }
6771         }
6772
6773         dev_kfree_skb(skb);
6774         *pskb = new_skb;
6775         return ret;
6776 }
6777
6778 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6779
6780 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6781  * TSO header is greater than 80 bytes.
6782  */
6783 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6784 {
6785         struct sk_buff *segs, *nskb;
6786         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6787
6788         /* Estimate the number of fragments in the worst case */
6789         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6790                 netif_stop_queue(tp->dev);
6791
6792                 /* netif_tx_stop_queue() must be done before checking
6793                  * checking tx index in tg3_tx_avail() below, because in
6794                  * tg3_tx(), we update tx index before checking for
6795                  * netif_tx_queue_stopped().
6796                  */
6797                 smp_mb();
6798                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6799                         return NETDEV_TX_BUSY;
6800
6801                 netif_wake_queue(tp->dev);
6802         }
6803
6804         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6805         if (IS_ERR(segs))
6806                 goto tg3_tso_bug_end;
6807
6808         do {
6809                 nskb = segs;
6810                 segs = segs->next;
6811                 nskb->next = NULL;
6812                 tg3_start_xmit(nskb, tp->dev);
6813         } while (segs);
6814
6815 tg3_tso_bug_end:
6816         dev_kfree_skb(skb);
6817
6818         return NETDEV_TX_OK;
6819 }
6820
6821 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6822  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6823  */
6824 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6825 {
6826         struct tg3 *tp = netdev_priv(dev);
6827         u32 len, entry, base_flags, mss, vlan = 0;
6828         u32 budget;
6829         int i = -1, would_hit_hwbug;
6830         dma_addr_t mapping;
6831         struct tg3_napi *tnapi;
6832         struct netdev_queue *txq;
6833         unsigned int last;
6834
6835         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6836         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6837         if (tg3_flag(tp, ENABLE_TSS))
6838                 tnapi++;
6839
6840         budget = tg3_tx_avail(tnapi);
6841
6842         /* We are running in BH disabled context with netif_tx_lock
6843          * and TX reclaim runs via tp->napi.poll inside of a software
6844          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6845          * no IRQ context deadlocks to worry about either.  Rejoice!
6846          */
6847         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6848                 if (!netif_tx_queue_stopped(txq)) {
6849                         netif_tx_stop_queue(txq);
6850
6851                         /* This is a hard error, log it. */
6852                         netdev_err(dev,
6853                                    "BUG! Tx Ring full when queue awake!\n");
6854                 }
6855                 return NETDEV_TX_BUSY;
6856         }
6857
6858         entry = tnapi->tx_prod;
6859         base_flags = 0;
6860         if (skb->ip_summed == CHECKSUM_PARTIAL)
6861                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6862
6863         mss = skb_shinfo(skb)->gso_size;
6864         if (mss) {
6865                 struct iphdr *iph;
6866                 u32 tcp_opt_len, hdr_len;
6867
6868                 if (skb_header_cloned(skb) &&
6869                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6870                         goto drop;
6871
6872                 iph = ip_hdr(skb);
6873                 tcp_opt_len = tcp_optlen(skb);
6874
6875                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6876
6877                 if (!skb_is_gso_v6(skb)) {
6878                         iph->check = 0;
6879                         iph->tot_len = htons(mss + hdr_len);
6880                 }
6881
6882                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6883                     tg3_flag(tp, TSO_BUG))
6884                         return tg3_tso_bug(tp, skb);
6885
6886                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6887                                TXD_FLAG_CPU_POST_DMA);
6888
6889                 if (tg3_flag(tp, HW_TSO_1) ||
6890                     tg3_flag(tp, HW_TSO_2) ||
6891                     tg3_flag(tp, HW_TSO_3)) {
6892                         tcp_hdr(skb)->check = 0;
6893                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6894                 } else
6895                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6896                                                                  iph->daddr, 0,
6897                                                                  IPPROTO_TCP,
6898                                                                  0);
6899
6900                 if (tg3_flag(tp, HW_TSO_3)) {
6901                         mss |= (hdr_len & 0xc) << 12;
6902                         if (hdr_len & 0x10)
6903                                 base_flags |= 0x00000010;
6904                         base_flags |= (hdr_len & 0x3e0) << 5;
6905                 } else if (tg3_flag(tp, HW_TSO_2))
6906                         mss |= hdr_len << 9;
6907                 else if (tg3_flag(tp, HW_TSO_1) ||
6908                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6909                         if (tcp_opt_len || iph->ihl > 5) {
6910                                 int tsflags;
6911
6912                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6913                                 mss |= (tsflags << 11);
6914                         }
6915                 } else {
6916                         if (tcp_opt_len || iph->ihl > 5) {
6917                                 int tsflags;
6918
6919                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6920                                 base_flags |= tsflags << 12;
6921                         }
6922                 }
6923         }
6924
6925         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6926             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6927                 base_flags |= TXD_FLAG_JMB_PKT;
6928
6929         if (vlan_tx_tag_present(skb)) {
6930                 base_flags |= TXD_FLAG_VLAN;
6931                 vlan = vlan_tx_tag_get(skb);
6932         }
6933
6934         len = skb_headlen(skb);
6935
6936         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6937         if (pci_dma_mapping_error(tp->pdev, mapping))
6938                 goto drop;
6939
6940
6941         tnapi->tx_buffers[entry].skb = skb;
6942         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6943
6944         would_hit_hwbug = 0;
6945
6946         if (tg3_flag(tp, 5701_DMA_BUG))
6947                 would_hit_hwbug = 1;
6948
6949         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6950                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6951                             mss, vlan)) {
6952                 would_hit_hwbug = 1;
6953         } else if (skb_shinfo(skb)->nr_frags > 0) {
6954                 u32 tmp_mss = mss;
6955
6956                 if (!tg3_flag(tp, HW_TSO_1) &&
6957                     !tg3_flag(tp, HW_TSO_2) &&
6958                     !tg3_flag(tp, HW_TSO_3))
6959                         tmp_mss = 0;
6960
6961                 /* Now loop through additional data
6962                  * fragments, and queue them.
6963                  */
6964                 last = skb_shinfo(skb)->nr_frags - 1;
6965                 for (i = 0; i <= last; i++) {
6966                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6967
6968                         len = skb_frag_size(frag);
6969                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6970                                                    len, DMA_TO_DEVICE);
6971
6972                         tnapi->tx_buffers[entry].skb = NULL;
6973                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6974                                            mapping);
6975                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6976                                 goto dma_error;
6977
6978                         if (!budget ||
6979                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6980                                             len, base_flags |
6981                                             ((i == last) ? TXD_FLAG_END : 0),
6982                                             tmp_mss, vlan)) {
6983                                 would_hit_hwbug = 1;
6984                                 break;
6985                         }
6986                 }
6987         }
6988
6989         if (would_hit_hwbug) {
6990                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6991
6992                 /* If the workaround fails due to memory/mapping
6993                  * failure, silently drop this packet.
6994                  */
6995                 entry = tnapi->tx_prod;
6996                 budget = tg3_tx_avail(tnapi);
6997                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6998                                                 base_flags, mss, vlan))
6999                         goto drop_nofree;
7000         }
7001
7002         skb_tx_timestamp(skb);
7003         netdev_tx_sent_queue(txq, skb->len);
7004
7005         /* Sync BD data before updating mailbox */
7006         wmb();
7007
7008         /* Packets are ready, update Tx producer idx local and on card. */
7009         tw32_tx_mbox(tnapi->prodmbox, entry);
7010
7011         tnapi->tx_prod = entry;
7012         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7013                 netif_tx_stop_queue(txq);
7014
7015                 /* netif_tx_stop_queue() must be done before checking
7016                  * checking tx index in tg3_tx_avail() below, because in
7017                  * tg3_tx(), we update tx index before checking for
7018                  * netif_tx_queue_stopped().
7019                  */
7020                 smp_mb();
7021                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7022                         netif_tx_wake_queue(txq);
7023         }
7024
7025         mmiowb();
7026         return NETDEV_TX_OK;
7027
7028 dma_error:
7029         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7030         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7031 drop:
7032         dev_kfree_skb(skb);
7033 drop_nofree:
7034         tp->tx_dropped++;
7035         return NETDEV_TX_OK;
7036 }
7037
7038 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7039 {
7040         if (enable) {
7041                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7042                                   MAC_MODE_PORT_MODE_MASK);
7043
7044                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7045
7046                 if (!tg3_flag(tp, 5705_PLUS))
7047                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7048
7049                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7050                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7051                 else
7052                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7053         } else {
7054                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7055
7056                 if (tg3_flag(tp, 5705_PLUS) ||
7057                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7058                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7059                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7060         }
7061
7062         tw32(MAC_MODE, tp->mac_mode);
7063         udelay(40);
7064 }
7065
7066 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7067 {
7068         u32 val, bmcr, mac_mode, ptest = 0;
7069
7070         tg3_phy_toggle_apd(tp, false);
7071         tg3_phy_toggle_automdix(tp, 0);
7072
7073         if (extlpbk && tg3_phy_set_extloopbk(tp))
7074                 return -EIO;
7075
7076         bmcr = BMCR_FULLDPLX;
7077         switch (speed) {
7078         case SPEED_10:
7079                 break;
7080         case SPEED_100:
7081                 bmcr |= BMCR_SPEED100;
7082                 break;
7083         case SPEED_1000:
7084         default:
7085                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7086                         speed = SPEED_100;
7087                         bmcr |= BMCR_SPEED100;
7088                 } else {
7089                         speed = SPEED_1000;
7090                         bmcr |= BMCR_SPEED1000;
7091                 }
7092         }
7093
7094         if (extlpbk) {
7095                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7096                         tg3_readphy(tp, MII_CTRL1000, &val);
7097                         val |= CTL1000_AS_MASTER |
7098                                CTL1000_ENABLE_MASTER;
7099                         tg3_writephy(tp, MII_CTRL1000, val);
7100                 } else {
7101                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7102                                 MII_TG3_FET_PTEST_TRIM_2;
7103                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7104                 }
7105         } else
7106                 bmcr |= BMCR_LOOPBACK;
7107
7108         tg3_writephy(tp, MII_BMCR, bmcr);
7109
7110         /* The write needs to be flushed for the FETs */
7111         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7112                 tg3_readphy(tp, MII_BMCR, &bmcr);
7113
7114         udelay(40);
7115
7116         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7117             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7118                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7119                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7120                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7121
7122                 /* The write needs to be flushed for the AC131 */
7123                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7124         }
7125
7126         /* Reset to prevent losing 1st rx packet intermittently */
7127         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7128             tg3_flag(tp, 5780_CLASS)) {
7129                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7130                 udelay(10);
7131                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7132         }
7133
7134         mac_mode = tp->mac_mode &
7135                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7136         if (speed == SPEED_1000)
7137                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7138         else
7139                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7140
7141         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7142                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7143
7144                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7145                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7146                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7147                         mac_mode |= MAC_MODE_LINK_POLARITY;
7148
7149                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7150                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7151         }
7152
7153         tw32(MAC_MODE, mac_mode);
7154         udelay(40);
7155
7156         return 0;
7157 }
7158
7159 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7160 {
7161         struct tg3 *tp = netdev_priv(dev);
7162
7163         if (features & NETIF_F_LOOPBACK) {
7164                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7165                         return;
7166
7167                 spin_lock_bh(&tp->lock);
7168                 tg3_mac_loopback(tp, true);
7169                 netif_carrier_on(tp->dev);
7170                 spin_unlock_bh(&tp->lock);
7171                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7172         } else {
7173                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7174                         return;
7175
7176                 spin_lock_bh(&tp->lock);
7177                 tg3_mac_loopback(tp, false);
7178                 /* Force link status check */
7179                 tg3_setup_phy(tp, 1);
7180                 spin_unlock_bh(&tp->lock);
7181                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7182         }
7183 }
7184
7185 static netdev_features_t tg3_fix_features(struct net_device *dev,
7186         netdev_features_t features)
7187 {
7188         struct tg3 *tp = netdev_priv(dev);
7189
7190         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7191                 features &= ~NETIF_F_ALL_TSO;
7192
7193         return features;
7194 }
7195
7196 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7197 {
7198         netdev_features_t changed = dev->features ^ features;
7199
7200         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7201                 tg3_set_loopback(dev, features);
7202
7203         return 0;
7204 }
7205
7206 static void tg3_rx_prodring_free(struct tg3 *tp,
7207                                  struct tg3_rx_prodring_set *tpr)
7208 {
7209         int i;
7210
7211         if (tpr != &tp->napi[0].prodring) {
7212                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7213                      i = (i + 1) & tp->rx_std_ring_mask)
7214                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7215                                         tp->rx_pkt_map_sz);
7216
7217                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7218                         for (i = tpr->rx_jmb_cons_idx;
7219                              i != tpr->rx_jmb_prod_idx;
7220                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7221                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7222                                                 TG3_RX_JMB_MAP_SZ);
7223                         }
7224                 }
7225
7226                 return;
7227         }
7228
7229         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7230                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7231                                 tp->rx_pkt_map_sz);
7232
7233         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7234                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7235                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7236                                         TG3_RX_JMB_MAP_SZ);
7237         }
7238 }
7239
7240 /* Initialize rx rings for packet processing.
7241  *
7242  * The chip has been shut down and the driver detached from
7243  * the networking, so no interrupts or new tx packets will
7244  * end up in the driver.  tp->{tx,}lock are held and thus
7245  * we may not sleep.
7246  */
7247 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7248                                  struct tg3_rx_prodring_set *tpr)
7249 {
7250         u32 i, rx_pkt_dma_sz;
7251
7252         tpr->rx_std_cons_idx = 0;
7253         tpr->rx_std_prod_idx = 0;
7254         tpr->rx_jmb_cons_idx = 0;
7255         tpr->rx_jmb_prod_idx = 0;
7256
7257         if (tpr != &tp->napi[0].prodring) {
7258                 memset(&tpr->rx_std_buffers[0], 0,
7259                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7260                 if (tpr->rx_jmb_buffers)
7261                         memset(&tpr->rx_jmb_buffers[0], 0,
7262                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7263                 goto done;
7264         }
7265
7266         /* Zero out all descriptors. */
7267         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7268
7269         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7270         if (tg3_flag(tp, 5780_CLASS) &&
7271             tp->dev->mtu > ETH_DATA_LEN)
7272                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7273         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7274
7275         /* Initialize invariants of the rings, we only set this
7276          * stuff once.  This works because the card does not
7277          * write into the rx buffer posting rings.
7278          */
7279         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7280                 struct tg3_rx_buffer_desc *rxd;
7281
7282                 rxd = &tpr->rx_std[i];
7283                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7284                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7285                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7286                                (i << RXD_OPAQUE_INDEX_SHIFT));
7287         }
7288
7289         /* Now allocate fresh SKBs for each rx ring. */
7290         for (i = 0; i < tp->rx_pending; i++) {
7291                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7292                         netdev_warn(tp->dev,
7293                                     "Using a smaller RX standard ring. Only "
7294                                     "%d out of %d buffers were allocated "
7295                                     "successfully\n", i, tp->rx_pending);
7296                         if (i == 0)
7297                                 goto initfail;
7298                         tp->rx_pending = i;
7299                         break;
7300                 }
7301         }
7302
7303         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7304                 goto done;
7305
7306         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7307
7308         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7309                 goto done;
7310
7311         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7312                 struct tg3_rx_buffer_desc *rxd;
7313
7314                 rxd = &tpr->rx_jmb[i].std;
7315                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7316                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7317                                   RXD_FLAG_JUMBO;
7318                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7319                        (i << RXD_OPAQUE_INDEX_SHIFT));
7320         }
7321
7322         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7323                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7324                         netdev_warn(tp->dev,
7325                                     "Using a smaller RX jumbo ring. Only %d "
7326                                     "out of %d buffers were allocated "
7327                                     "successfully\n", i, tp->rx_jumbo_pending);
7328                         if (i == 0)
7329                                 goto initfail;
7330                         tp->rx_jumbo_pending = i;
7331                         break;
7332                 }
7333         }
7334
7335 done:
7336         return 0;
7337
7338 initfail:
7339         tg3_rx_prodring_free(tp, tpr);
7340         return -ENOMEM;
7341 }
7342
7343 static void tg3_rx_prodring_fini(struct tg3 *tp,
7344                                  struct tg3_rx_prodring_set *tpr)
7345 {
7346         kfree(tpr->rx_std_buffers);
7347         tpr->rx_std_buffers = NULL;
7348         kfree(tpr->rx_jmb_buffers);
7349         tpr->rx_jmb_buffers = NULL;
7350         if (tpr->rx_std) {
7351                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7352                                   tpr->rx_std, tpr->rx_std_mapping);
7353                 tpr->rx_std = NULL;
7354         }
7355         if (tpr->rx_jmb) {
7356                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7357                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7358                 tpr->rx_jmb = NULL;
7359         }
7360 }
7361
7362 static int tg3_rx_prodring_init(struct tg3 *tp,
7363                                 struct tg3_rx_prodring_set *tpr)
7364 {
7365         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7366                                       GFP_KERNEL);
7367         if (!tpr->rx_std_buffers)
7368                 return -ENOMEM;
7369
7370         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7371                                          TG3_RX_STD_RING_BYTES(tp),
7372                                          &tpr->rx_std_mapping,
7373                                          GFP_KERNEL);
7374         if (!tpr->rx_std)
7375                 goto err_out;
7376
7377         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7378                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7379                                               GFP_KERNEL);
7380                 if (!tpr->rx_jmb_buffers)
7381                         goto err_out;
7382
7383                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7384                                                  TG3_RX_JMB_RING_BYTES(tp),
7385                                                  &tpr->rx_jmb_mapping,
7386                                                  GFP_KERNEL);
7387                 if (!tpr->rx_jmb)
7388                         goto err_out;
7389         }
7390
7391         return 0;
7392
7393 err_out:
7394         tg3_rx_prodring_fini(tp, tpr);
7395         return -ENOMEM;
7396 }
7397
7398 /* Free up pending packets in all rx/tx rings.
7399  *
7400  * The chip has been shut down and the driver detached from
7401  * the networking, so no interrupts or new tx packets will
7402  * end up in the driver.  tp->{tx,}lock is not held and we are not
7403  * in an interrupt context and thus may sleep.
7404  */
7405 static void tg3_free_rings(struct tg3 *tp)
7406 {
7407         int i, j;
7408
7409         for (j = 0; j < tp->irq_cnt; j++) {
7410                 struct tg3_napi *tnapi = &tp->napi[j];
7411
7412                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7413
7414                 if (!tnapi->tx_buffers)
7415                         continue;
7416
7417                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7418                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7419
7420                         if (!skb)
7421                                 continue;
7422
7423                         tg3_tx_skb_unmap(tnapi, i,
7424                                          skb_shinfo(skb)->nr_frags - 1);
7425
7426                         dev_kfree_skb_any(skb);
7427                 }
7428                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7429         }
7430 }
7431
7432 /* Initialize tx/rx rings for packet processing.
7433  *
7434  * The chip has been shut down and the driver detached from
7435  * the networking, so no interrupts or new tx packets will
7436  * end up in the driver.  tp->{tx,}lock are held and thus
7437  * we may not sleep.
7438  */
7439 static int tg3_init_rings(struct tg3 *tp)
7440 {
7441         int i;
7442
7443         /* Free up all the SKBs. */
7444         tg3_free_rings(tp);
7445
7446         for (i = 0; i < tp->irq_cnt; i++) {
7447                 struct tg3_napi *tnapi = &tp->napi[i];
7448
7449                 tnapi->last_tag = 0;
7450                 tnapi->last_irq_tag = 0;
7451                 tnapi->hw_status->status = 0;
7452                 tnapi->hw_status->status_tag = 0;
7453                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7454
7455                 tnapi->tx_prod = 0;
7456                 tnapi->tx_cons = 0;
7457                 if (tnapi->tx_ring)
7458                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7459
7460                 tnapi->rx_rcb_ptr = 0;
7461                 if (tnapi->rx_rcb)
7462                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7463
7464                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7465                         tg3_free_rings(tp);
7466                         return -ENOMEM;
7467                 }
7468         }
7469
7470         return 0;
7471 }
7472
7473 /*
7474  * Must not be invoked with interrupt sources disabled and
7475  * the hardware shutdown down.
7476  */
7477 static void tg3_free_consistent(struct tg3 *tp)
7478 {
7479         int i;
7480
7481         for (i = 0; i < tp->irq_cnt; i++) {
7482                 struct tg3_napi *tnapi = &tp->napi[i];
7483
7484                 if (tnapi->tx_ring) {
7485                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7486                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7487                         tnapi->tx_ring = NULL;
7488                 }
7489
7490                 kfree(tnapi->tx_buffers);
7491                 tnapi->tx_buffers = NULL;
7492
7493                 if (tnapi->rx_rcb) {
7494                         dma_free_coherent(&tp->pdev->dev,
7495                                           TG3_RX_RCB_RING_BYTES(tp),
7496                                           tnapi->rx_rcb,
7497                                           tnapi->rx_rcb_mapping);
7498                         tnapi->rx_rcb = NULL;
7499                 }
7500
7501                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7502
7503                 if (tnapi->hw_status) {
7504                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7505                                           tnapi->hw_status,
7506                                           tnapi->status_mapping);
7507                         tnapi->hw_status = NULL;
7508                 }
7509         }
7510
7511         if (tp->hw_stats) {
7512                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7513                                   tp->hw_stats, tp->stats_mapping);
7514                 tp->hw_stats = NULL;
7515         }
7516 }
7517
7518 /*
7519  * Must not be invoked with interrupt sources disabled and
7520  * the hardware shutdown down.  Can sleep.
7521  */
7522 static int tg3_alloc_consistent(struct tg3 *tp)
7523 {
7524         int i;
7525
7526         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7527                                           sizeof(struct tg3_hw_stats),
7528                                           &tp->stats_mapping,
7529                                           GFP_KERNEL);
7530         if (!tp->hw_stats)
7531                 goto err_out;
7532
7533         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7534
7535         for (i = 0; i < tp->irq_cnt; i++) {
7536                 struct tg3_napi *tnapi = &tp->napi[i];
7537                 struct tg3_hw_status *sblk;
7538
7539                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7540                                                       TG3_HW_STATUS_SIZE,
7541                                                       &tnapi->status_mapping,
7542                                                       GFP_KERNEL);
7543                 if (!tnapi->hw_status)
7544                         goto err_out;
7545
7546                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7547                 sblk = tnapi->hw_status;
7548
7549                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7550                         goto err_out;
7551
7552                 /* If multivector TSS is enabled, vector 0 does not handle
7553                  * tx interrupts.  Don't allocate any resources for it.
7554                  */
7555                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7556                     (i && tg3_flag(tp, ENABLE_TSS))) {
7557                         tnapi->tx_buffers = kzalloc(
7558                                                sizeof(struct tg3_tx_ring_info) *
7559                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7560                         if (!tnapi->tx_buffers)
7561                                 goto err_out;
7562
7563                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7564                                                             TG3_TX_RING_BYTES,
7565                                                         &tnapi->tx_desc_mapping,
7566                                                             GFP_KERNEL);
7567                         if (!tnapi->tx_ring)
7568                                 goto err_out;
7569                 }
7570
7571                 /*
7572                  * When RSS is enabled, the status block format changes
7573                  * slightly.  The "rx_jumbo_consumer", "reserved",
7574                  * and "rx_mini_consumer" members get mapped to the
7575                  * other three rx return ring producer indexes.
7576                  */
7577                 switch (i) {
7578                 default:
7579                         if (tg3_flag(tp, ENABLE_RSS)) {
7580                                 tnapi->rx_rcb_prod_idx = NULL;
7581                                 break;
7582                         }
7583                         /* Fall through */
7584                 case 1:
7585                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7586                         break;
7587                 case 2:
7588                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7589                         break;
7590                 case 3:
7591                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7592                         break;
7593                 case 4:
7594                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7595                         break;
7596                 }
7597
7598                 /*
7599                  * If multivector RSS is enabled, vector 0 does not handle
7600                  * rx or tx interrupts.  Don't allocate any resources for it.
7601                  */
7602                 if (!i && tg3_flag(tp, ENABLE_RSS))
7603                         continue;
7604
7605                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7606                                                    TG3_RX_RCB_RING_BYTES(tp),
7607                                                    &tnapi->rx_rcb_mapping,
7608                                                    GFP_KERNEL);
7609                 if (!tnapi->rx_rcb)
7610                         goto err_out;
7611
7612                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7613         }
7614
7615         return 0;
7616
7617 err_out:
7618         tg3_free_consistent(tp);
7619         return -ENOMEM;
7620 }
7621
7622 #define MAX_WAIT_CNT 1000
7623
7624 /* To stop a block, clear the enable bit and poll till it
7625  * clears.  tp->lock is held.
7626  */
7627 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7628 {
7629         unsigned int i;
7630         u32 val;
7631
7632         if (tg3_flag(tp, 5705_PLUS)) {
7633                 switch (ofs) {
7634                 case RCVLSC_MODE:
7635                 case DMAC_MODE:
7636                 case MBFREE_MODE:
7637                 case BUFMGR_MODE:
7638                 case MEMARB_MODE:
7639                         /* We can't enable/disable these bits of the
7640                          * 5705/5750, just say success.
7641                          */
7642                         return 0;
7643
7644                 default:
7645                         break;
7646                 }
7647         }
7648
7649         val = tr32(ofs);
7650         val &= ~enable_bit;
7651         tw32_f(ofs, val);
7652
7653         for (i = 0; i < MAX_WAIT_CNT; i++) {
7654                 udelay(100);
7655                 val = tr32(ofs);
7656                 if ((val & enable_bit) == 0)
7657                         break;
7658         }
7659
7660         if (i == MAX_WAIT_CNT && !silent) {
7661                 dev_err(&tp->pdev->dev,
7662                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7663                         ofs, enable_bit);
7664                 return -ENODEV;
7665         }
7666
7667         return 0;
7668 }
7669
7670 /* tp->lock is held. */
7671 static int tg3_abort_hw(struct tg3 *tp, int silent)
7672 {
7673         int i, err;
7674
7675         tg3_disable_ints(tp);
7676
7677         tp->rx_mode &= ~RX_MODE_ENABLE;
7678         tw32_f(MAC_RX_MODE, tp->rx_mode);
7679         udelay(10);
7680
7681         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7682         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7683         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7684         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7685         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7686         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7687
7688         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7689         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7690         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7691         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7692         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7693         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7694         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7695
7696         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7697         tw32_f(MAC_MODE, tp->mac_mode);
7698         udelay(40);
7699
7700         tp->tx_mode &= ~TX_MODE_ENABLE;
7701         tw32_f(MAC_TX_MODE, tp->tx_mode);
7702
7703         for (i = 0; i < MAX_WAIT_CNT; i++) {
7704                 udelay(100);
7705                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7706                         break;
7707         }
7708         if (i >= MAX_WAIT_CNT) {
7709                 dev_err(&tp->pdev->dev,
7710                         "%s timed out, TX_MODE_ENABLE will not clear "
7711                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7712                 err |= -ENODEV;
7713         }
7714
7715         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7716         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7717         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7718
7719         tw32(FTQ_RESET, 0xffffffff);
7720         tw32(FTQ_RESET, 0x00000000);
7721
7722         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7723         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7724
7725         for (i = 0; i < tp->irq_cnt; i++) {
7726                 struct tg3_napi *tnapi = &tp->napi[i];
7727                 if (tnapi->hw_status)
7728                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7729         }
7730
7731         return err;
7732 }
7733
7734 /* Save PCI command register before chip reset */
7735 static void tg3_save_pci_state(struct tg3 *tp)
7736 {
7737         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7738 }
7739
7740 /* Restore PCI state after chip reset */
7741 static void tg3_restore_pci_state(struct tg3 *tp)
7742 {
7743         u32 val;
7744
7745         /* Re-enable indirect register accesses. */
7746         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7747                                tp->misc_host_ctrl);
7748
7749         /* Set MAX PCI retry to zero. */
7750         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7751         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7752             tg3_flag(tp, PCIX_MODE))
7753                 val |= PCISTATE_RETRY_SAME_DMA;
7754         /* Allow reads and writes to the APE register and memory space. */
7755         if (tg3_flag(tp, ENABLE_APE))
7756                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7757                        PCISTATE_ALLOW_APE_SHMEM_WR |
7758                        PCISTATE_ALLOW_APE_PSPACE_WR;
7759         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7760
7761         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7762
7763         if (!tg3_flag(tp, PCI_EXPRESS)) {
7764                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7765                                       tp->pci_cacheline_sz);
7766                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7767                                       tp->pci_lat_timer);
7768         }
7769
7770         /* Make sure PCI-X relaxed ordering bit is clear. */
7771         if (tg3_flag(tp, PCIX_MODE)) {
7772                 u16 pcix_cmd;
7773
7774                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7775                                      &pcix_cmd);
7776                 pcix_cmd &= ~PCI_X_CMD_ERO;
7777                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7778                                       pcix_cmd);
7779         }
7780
7781         if (tg3_flag(tp, 5780_CLASS)) {
7782
7783                 /* Chip reset on 5780 will reset MSI enable bit,
7784                  * so need to restore it.
7785                  */
7786                 if (tg3_flag(tp, USING_MSI)) {
7787                         u16 ctrl;
7788
7789                         pci_read_config_word(tp->pdev,
7790                                              tp->msi_cap + PCI_MSI_FLAGS,
7791                                              &ctrl);
7792                         pci_write_config_word(tp->pdev,
7793                                               tp->msi_cap + PCI_MSI_FLAGS,
7794                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7795                         val = tr32(MSGINT_MODE);
7796                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7797                 }
7798         }
7799 }
7800
7801 /* tp->lock is held. */
7802 static int tg3_chip_reset(struct tg3 *tp)
7803 {
7804         u32 val;
7805         void (*write_op)(struct tg3 *, u32, u32);
7806         int i, err;
7807
7808         tg3_nvram_lock(tp);
7809
7810         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7811
7812         /* No matching tg3_nvram_unlock() after this because
7813          * chip reset below will undo the nvram lock.
7814          */
7815         tp->nvram_lock_cnt = 0;
7816
7817         /* GRC_MISC_CFG core clock reset will clear the memory
7818          * enable bit in PCI register 4 and the MSI enable bit
7819          * on some chips, so we save relevant registers here.
7820          */
7821         tg3_save_pci_state(tp);
7822
7823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7824             tg3_flag(tp, 5755_PLUS))
7825                 tw32(GRC_FASTBOOT_PC, 0);
7826
7827         /*
7828          * We must avoid the readl() that normally takes place.
7829          * It locks machines, causes machine checks, and other
7830          * fun things.  So, temporarily disable the 5701
7831          * hardware workaround, while we do the reset.
7832          */
7833         write_op = tp->write32;
7834         if (write_op == tg3_write_flush_reg32)
7835                 tp->write32 = tg3_write32;
7836
7837         /* Prevent the irq handler from reading or writing PCI registers
7838          * during chip reset when the memory enable bit in the PCI command
7839          * register may be cleared.  The chip does not generate interrupt
7840          * at this time, but the irq handler may still be called due to irq
7841          * sharing or irqpoll.
7842          */
7843         tg3_flag_set(tp, CHIP_RESETTING);
7844         for (i = 0; i < tp->irq_cnt; i++) {
7845                 struct tg3_napi *tnapi = &tp->napi[i];
7846                 if (tnapi->hw_status) {
7847                         tnapi->hw_status->status = 0;
7848                         tnapi->hw_status->status_tag = 0;
7849                 }
7850                 tnapi->last_tag = 0;
7851                 tnapi->last_irq_tag = 0;
7852         }
7853         smp_mb();
7854
7855         for (i = 0; i < tp->irq_cnt; i++)
7856                 synchronize_irq(tp->napi[i].irq_vec);
7857
7858         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7859                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7860                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7861         }
7862
7863         /* do the reset */
7864         val = GRC_MISC_CFG_CORECLK_RESET;
7865
7866         if (tg3_flag(tp, PCI_EXPRESS)) {
7867                 /* Force PCIe 1.0a mode */
7868                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7869                     !tg3_flag(tp, 57765_PLUS) &&
7870                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7871                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7872                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7873
7874                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7875                         tw32(GRC_MISC_CFG, (1 << 29));
7876                         val |= (1 << 29);
7877                 }
7878         }
7879
7880         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7881                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7882                 tw32(GRC_VCPU_EXT_CTRL,
7883                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7884         }
7885
7886         /* Manage gphy power for all CPMU absent PCIe devices. */
7887         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7888                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7889
7890         tw32(GRC_MISC_CFG, val);
7891
7892         /* restore 5701 hardware bug workaround write method */
7893         tp->write32 = write_op;
7894
7895         /* Unfortunately, we have to delay before the PCI read back.
7896          * Some 575X chips even will not respond to a PCI cfg access
7897          * when the reset command is given to the chip.
7898          *
7899          * How do these hardware designers expect things to work
7900          * properly if the PCI write is posted for a long period
7901          * of time?  It is always necessary to have some method by
7902          * which a register read back can occur to push the write
7903          * out which does the reset.
7904          *
7905          * For most tg3 variants the trick below was working.
7906          * Ho hum...
7907          */
7908         udelay(120);
7909
7910         /* Flush PCI posted writes.  The normal MMIO registers
7911          * are inaccessible at this time so this is the only
7912          * way to make this reliably (actually, this is no longer
7913          * the case, see above).  I tried to use indirect
7914          * register read/write but this upset some 5701 variants.
7915          */
7916         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7917
7918         udelay(120);
7919
7920         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7921                 u16 val16;
7922
7923                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7924                         int i;
7925                         u32 cfg_val;
7926
7927                         /* Wait for link training to complete.  */
7928                         for (i = 0; i < 5000; i++)
7929                                 udelay(100);
7930
7931                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7932                         pci_write_config_dword(tp->pdev, 0xc4,
7933                                                cfg_val | (1 << 15));
7934                 }
7935
7936                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7937                 pci_read_config_word(tp->pdev,
7938                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7939                                      &val16);
7940                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7941                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7942                 /*
7943                  * Older PCIe devices only support the 128 byte
7944                  * MPS setting.  Enforce the restriction.
7945                  */
7946                 if (!tg3_flag(tp, CPMU_PRESENT))
7947                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7948                 pci_write_config_word(tp->pdev,
7949                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7950                                       val16);
7951
7952                 /* Clear error status */
7953                 pci_write_config_word(tp->pdev,
7954                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7955                                       PCI_EXP_DEVSTA_CED |
7956                                       PCI_EXP_DEVSTA_NFED |
7957                                       PCI_EXP_DEVSTA_FED |
7958                                       PCI_EXP_DEVSTA_URD);
7959         }
7960
7961         tg3_restore_pci_state(tp);
7962
7963         tg3_flag_clear(tp, CHIP_RESETTING);
7964         tg3_flag_clear(tp, ERROR_PROCESSED);
7965
7966         val = 0;
7967         if (tg3_flag(tp, 5780_CLASS))
7968                 val = tr32(MEMARB_MODE);
7969         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7970
7971         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7972                 tg3_stop_fw(tp);
7973                 tw32(0x5000, 0x400);
7974         }
7975
7976         tw32(GRC_MODE, tp->grc_mode);
7977
7978         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7979                 val = tr32(0xc4);
7980
7981                 tw32(0xc4, val | (1 << 15));
7982         }
7983
7984         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7985             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7986                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7987                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7988                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7989                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7990         }
7991
7992         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7993                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7994                 val = tp->mac_mode;
7995         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7996                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7997                 val = tp->mac_mode;
7998         } else
7999                 val = 0;
8000
8001         tw32_f(MAC_MODE, val);
8002         udelay(40);
8003
8004         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8005
8006         err = tg3_poll_fw(tp);
8007         if (err)
8008                 return err;
8009
8010         tg3_mdio_start(tp);
8011
8012         if (tg3_flag(tp, PCI_EXPRESS) &&
8013             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8014             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8015             !tg3_flag(tp, 57765_PLUS)) {
8016                 val = tr32(0x7c00);
8017
8018                 tw32(0x7c00, val | (1 << 25));
8019         }
8020
8021         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8022                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8023                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8024         }
8025
8026         /* Reprobe ASF enable state.  */
8027         tg3_flag_clear(tp, ENABLE_ASF);
8028         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8029         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8030         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8031                 u32 nic_cfg;
8032
8033                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8034                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8035                         tg3_flag_set(tp, ENABLE_ASF);
8036                         tp->last_event_jiffies = jiffies;
8037                         if (tg3_flag(tp, 5750_PLUS))
8038                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8039                 }
8040         }
8041
8042         return 0;
8043 }
8044
8045 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8046 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8047
8048 /* tp->lock is held. */
8049 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8050 {
8051         int err;
8052
8053         tg3_stop_fw(tp);
8054
8055         tg3_write_sig_pre_reset(tp, kind);
8056
8057         tg3_abort_hw(tp, silent);
8058         err = tg3_chip_reset(tp);
8059
8060         __tg3_set_mac_addr(tp, 0);
8061
8062         tg3_write_sig_legacy(tp, kind);
8063         tg3_write_sig_post_reset(tp, kind);
8064
8065         if (tp->hw_stats) {
8066                 /* Save the stats across chip resets... */
8067                 tg3_get_nstats(tp, &tp->net_stats_prev);
8068                 tg3_get_estats(tp, &tp->estats_prev);
8069
8070                 /* And make sure the next sample is new data */
8071                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8072         }
8073
8074         if (err)
8075                 return err;
8076
8077         return 0;
8078 }
8079
8080 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8081 {
8082         struct tg3 *tp = netdev_priv(dev);
8083         struct sockaddr *addr = p;
8084         int err = 0, skip_mac_1 = 0;
8085
8086         if (!is_valid_ether_addr(addr->sa_data))
8087                 return -EADDRNOTAVAIL;
8088
8089         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8090
8091         if (!netif_running(dev))
8092                 return 0;
8093
8094         if (tg3_flag(tp, ENABLE_ASF)) {
8095                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8096
8097                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8098                 addr0_low = tr32(MAC_ADDR_0_LOW);
8099                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8100                 addr1_low = tr32(MAC_ADDR_1_LOW);
8101
8102                 /* Skip MAC addr 1 if ASF is using it. */
8103                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8104                     !(addr1_high == 0 && addr1_low == 0))
8105                         skip_mac_1 = 1;
8106         }
8107         spin_lock_bh(&tp->lock);
8108         __tg3_set_mac_addr(tp, skip_mac_1);
8109         spin_unlock_bh(&tp->lock);
8110
8111         return err;
8112 }
8113
8114 /* tp->lock is held. */
8115 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8116                            dma_addr_t mapping, u32 maxlen_flags,
8117                            u32 nic_addr)
8118 {
8119         tg3_write_mem(tp,
8120                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8121                       ((u64) mapping >> 32));
8122         tg3_write_mem(tp,
8123                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8124                       ((u64) mapping & 0xffffffff));
8125         tg3_write_mem(tp,
8126                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8127                        maxlen_flags);
8128
8129         if (!tg3_flag(tp, 5705_PLUS))
8130                 tg3_write_mem(tp,
8131                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8132                               nic_addr);
8133 }
8134
8135 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8136 {
8137         int i;
8138
8139         if (!tg3_flag(tp, ENABLE_TSS)) {
8140                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8141                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8142                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8143         } else {
8144                 tw32(HOSTCC_TXCOL_TICKS, 0);
8145                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8146                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8147         }
8148
8149         if (!tg3_flag(tp, ENABLE_RSS)) {
8150                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8151                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8152                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8153         } else {
8154                 tw32(HOSTCC_RXCOL_TICKS, 0);
8155                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8156                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8157         }
8158
8159         if (!tg3_flag(tp, 5705_PLUS)) {
8160                 u32 val = ec->stats_block_coalesce_usecs;
8161
8162                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8163                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8164
8165                 if (!netif_carrier_ok(tp->dev))
8166                         val = 0;
8167
8168                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8169         }
8170
8171         for (i = 0; i < tp->irq_cnt - 1; i++) {
8172                 u32 reg;
8173
8174                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8175                 tw32(reg, ec->rx_coalesce_usecs);
8176                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8177                 tw32(reg, ec->rx_max_coalesced_frames);
8178                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8179                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8180
8181                 if (tg3_flag(tp, ENABLE_TSS)) {
8182                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8183                         tw32(reg, ec->tx_coalesce_usecs);
8184                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8185                         tw32(reg, ec->tx_max_coalesced_frames);
8186                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8187                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8188                 }
8189         }
8190
8191         for (; i < tp->irq_max - 1; i++) {
8192                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8193                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8194                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8195
8196                 if (tg3_flag(tp, ENABLE_TSS)) {
8197                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8198                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8199                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8200                 }
8201         }
8202 }
8203
8204 /* tp->lock is held. */
8205 static void tg3_rings_reset(struct tg3 *tp)
8206 {
8207         int i;
8208         u32 stblk, txrcb, rxrcb, limit;
8209         struct tg3_napi *tnapi = &tp->napi[0];
8210
8211         /* Disable all transmit rings but the first. */
8212         if (!tg3_flag(tp, 5705_PLUS))
8213                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8214         else if (tg3_flag(tp, 5717_PLUS))
8215                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8216         else if (tg3_flag(tp, 57765_CLASS))
8217                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8218         else
8219                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8220
8221         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8222              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8223                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8224                               BDINFO_FLAGS_DISABLED);
8225
8226
8227         /* Disable all receive return rings but the first. */
8228         if (tg3_flag(tp, 5717_PLUS))
8229                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8230         else if (!tg3_flag(tp, 5705_PLUS))
8231                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8232         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8233                  tg3_flag(tp, 57765_CLASS))
8234                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8235         else
8236                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8237
8238         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8239              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8240                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8241                               BDINFO_FLAGS_DISABLED);
8242
8243         /* Disable interrupts */
8244         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8245         tp->napi[0].chk_msi_cnt = 0;
8246         tp->napi[0].last_rx_cons = 0;
8247         tp->napi[0].last_tx_cons = 0;
8248
8249         /* Zero mailbox registers. */
8250         if (tg3_flag(tp, SUPPORT_MSIX)) {
8251                 for (i = 1; i < tp->irq_max; i++) {
8252                         tp->napi[i].tx_prod = 0;
8253                         tp->napi[i].tx_cons = 0;
8254                         if (tg3_flag(tp, ENABLE_TSS))
8255                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8256                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8257                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8258                         tp->napi[i].chk_msi_cnt = 0;
8259                         tp->napi[i].last_rx_cons = 0;
8260                         tp->napi[i].last_tx_cons = 0;
8261                 }
8262                 if (!tg3_flag(tp, ENABLE_TSS))
8263                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8264         } else {
8265                 tp->napi[0].tx_prod = 0;
8266                 tp->napi[0].tx_cons = 0;
8267                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8268                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8269         }
8270
8271         /* Make sure the NIC-based send BD rings are disabled. */
8272         if (!tg3_flag(tp, 5705_PLUS)) {
8273                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8274                 for (i = 0; i < 16; i++)
8275                         tw32_tx_mbox(mbox + i * 8, 0);
8276         }
8277
8278         txrcb = NIC_SRAM_SEND_RCB;
8279         rxrcb = NIC_SRAM_RCV_RET_RCB;
8280
8281         /* Clear status block in ram. */
8282         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8283
8284         /* Set status block DMA address */
8285         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8286              ((u64) tnapi->status_mapping >> 32));
8287         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8288              ((u64) tnapi->status_mapping & 0xffffffff));
8289
8290         if (tnapi->tx_ring) {
8291                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8292                                (TG3_TX_RING_SIZE <<
8293                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8294                                NIC_SRAM_TX_BUFFER_DESC);
8295                 txrcb += TG3_BDINFO_SIZE;
8296         }
8297
8298         if (tnapi->rx_rcb) {
8299                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8300                                (tp->rx_ret_ring_mask + 1) <<
8301                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8302                 rxrcb += TG3_BDINFO_SIZE;
8303         }
8304
8305         stblk = HOSTCC_STATBLCK_RING1;
8306
8307         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8308                 u64 mapping = (u64)tnapi->status_mapping;
8309                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8310                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8311
8312                 /* Clear status block in ram. */
8313                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8314
8315                 if (tnapi->tx_ring) {
8316                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8317                                        (TG3_TX_RING_SIZE <<
8318                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8319                                        NIC_SRAM_TX_BUFFER_DESC);
8320                         txrcb += TG3_BDINFO_SIZE;
8321                 }
8322
8323                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8324                                ((tp->rx_ret_ring_mask + 1) <<
8325                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8326
8327                 stblk += 8;
8328                 rxrcb += TG3_BDINFO_SIZE;
8329         }
8330 }
8331
8332 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8333 {
8334         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8335
8336         if (!tg3_flag(tp, 5750_PLUS) ||
8337             tg3_flag(tp, 5780_CLASS) ||
8338             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8339             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8340             tg3_flag(tp, 57765_PLUS))
8341                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8342         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8343                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8344                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8345         else
8346                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8347
8348         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8349         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8350
8351         val = min(nic_rep_thresh, host_rep_thresh);
8352         tw32(RCVBDI_STD_THRESH, val);
8353
8354         if (tg3_flag(tp, 57765_PLUS))
8355                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8356
8357         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8358                 return;
8359
8360         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8361
8362         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8363
8364         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8365         tw32(RCVBDI_JUMBO_THRESH, val);
8366
8367         if (tg3_flag(tp, 57765_PLUS))
8368                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8369 }
8370
8371 static inline u32 calc_crc(unsigned char *buf, int len)
8372 {
8373         u32 reg;
8374         u32 tmp;
8375         int j, k;
8376
8377         reg = 0xffffffff;
8378
8379         for (j = 0; j < len; j++) {
8380                 reg ^= buf[j];
8381
8382                 for (k = 0; k < 8; k++) {
8383                         tmp = reg & 0x01;
8384
8385                         reg >>= 1;
8386
8387                         if (tmp)
8388                                 reg ^= 0xedb88320;
8389                 }
8390         }
8391
8392         return ~reg;
8393 }
8394
8395 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8396 {
8397         /* accept or reject all multicast frames */
8398         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8399         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8400         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8401         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8402 }
8403
8404 static void __tg3_set_rx_mode(struct net_device *dev)
8405 {
8406         struct tg3 *tp = netdev_priv(dev);
8407         u32 rx_mode;
8408
8409         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8410                                   RX_MODE_KEEP_VLAN_TAG);
8411
8412 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8413         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8414          * flag clear.
8415          */
8416         if (!tg3_flag(tp, ENABLE_ASF))
8417                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8418 #endif
8419
8420         if (dev->flags & IFF_PROMISC) {
8421                 /* Promiscuous mode. */
8422                 rx_mode |= RX_MODE_PROMISC;
8423         } else if (dev->flags & IFF_ALLMULTI) {
8424                 /* Accept all multicast. */
8425                 tg3_set_multi(tp, 1);
8426         } else if (netdev_mc_empty(dev)) {
8427                 /* Reject all multicast. */
8428                 tg3_set_multi(tp, 0);
8429         } else {
8430                 /* Accept one or more multicast(s). */
8431                 struct netdev_hw_addr *ha;
8432                 u32 mc_filter[4] = { 0, };
8433                 u32 regidx;
8434                 u32 bit;
8435                 u32 crc;
8436
8437                 netdev_for_each_mc_addr(ha, dev) {
8438                         crc = calc_crc(ha->addr, ETH_ALEN);
8439                         bit = ~crc & 0x7f;
8440                         regidx = (bit & 0x60) >> 5;
8441                         bit &= 0x1f;
8442                         mc_filter[regidx] |= (1 << bit);
8443                 }
8444
8445                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8446                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8447                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8448                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8449         }
8450
8451         if (rx_mode != tp->rx_mode) {
8452                 tp->rx_mode = rx_mode;
8453                 tw32_f(MAC_RX_MODE, rx_mode);
8454                 udelay(10);
8455         }
8456 }
8457
8458 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8459 {
8460         int i;
8461
8462         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8463                 tp->rss_ind_tbl[i] =
8464                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8465 }
8466
8467 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8468 {
8469         int i;
8470
8471         if (!tg3_flag(tp, SUPPORT_MSIX))
8472                 return;
8473
8474         if (tp->irq_cnt <= 2) {
8475                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8476                 return;
8477         }
8478
8479         /* Validate table against current IRQ count */
8480         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8481                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8482                         break;
8483         }
8484
8485         if (i != TG3_RSS_INDIR_TBL_SIZE)
8486                 tg3_rss_init_dflt_indir_tbl(tp);
8487 }
8488
8489 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8490 {
8491         int i = 0;
8492         u32 reg = MAC_RSS_INDIR_TBL_0;
8493
8494         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8495                 u32 val = tp->rss_ind_tbl[i];
8496                 i++;
8497                 for (; i % 8; i++) {
8498                         val <<= 4;
8499                         val |= tp->rss_ind_tbl[i];
8500                 }
8501                 tw32(reg, val);
8502                 reg += 4;
8503         }
8504 }
8505
8506 /* tp->lock is held. */
8507 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8508 {
8509         u32 val, rdmac_mode;
8510         int i, err, limit;
8511         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8512
8513         tg3_disable_ints(tp);
8514
8515         tg3_stop_fw(tp);
8516
8517         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8518
8519         if (tg3_flag(tp, INIT_COMPLETE))
8520                 tg3_abort_hw(tp, 1);
8521
8522         /* Enable MAC control of LPI */
8523         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8524                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8525                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8526                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8527
8528                 tw32_f(TG3_CPMU_EEE_CTRL,
8529                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8530
8531                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8532                       TG3_CPMU_EEEMD_LPI_IN_TX |
8533                       TG3_CPMU_EEEMD_LPI_IN_RX |
8534                       TG3_CPMU_EEEMD_EEE_ENABLE;
8535
8536                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8537                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8538
8539                 if (tg3_flag(tp, ENABLE_APE))
8540                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8541
8542                 tw32_f(TG3_CPMU_EEE_MODE, val);
8543
8544                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8545                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8546                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8547
8548                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8549                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8550                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8551         }
8552
8553         if (reset_phy)
8554                 tg3_phy_reset(tp);
8555
8556         err = tg3_chip_reset(tp);
8557         if (err)
8558                 return err;
8559
8560         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8561
8562         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8563                 val = tr32(TG3_CPMU_CTRL);
8564                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8565                 tw32(TG3_CPMU_CTRL, val);
8566
8567                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8568                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8569                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8570                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8571
8572                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8573                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8574                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8575                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8576
8577                 val = tr32(TG3_CPMU_HST_ACC);
8578                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8579                 val |= CPMU_HST_ACC_MACCLK_6_25;
8580                 tw32(TG3_CPMU_HST_ACC, val);
8581         }
8582
8583         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8584                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8585                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8586                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8587                 tw32(PCIE_PWR_MGMT_THRESH, val);
8588
8589                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8590                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8591
8592                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8593
8594                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8595                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8596         }
8597
8598         if (tg3_flag(tp, L1PLLPD_EN)) {
8599                 u32 grc_mode = tr32(GRC_MODE);
8600
8601                 /* Access the lower 1K of PL PCIE block registers. */
8602                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8603                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8604
8605                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8606                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8607                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8608
8609                 tw32(GRC_MODE, grc_mode);
8610         }
8611
8612         if (tg3_flag(tp, 57765_CLASS)) {
8613                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8614                         u32 grc_mode = tr32(GRC_MODE);
8615
8616                         /* Access the lower 1K of PL PCIE block registers. */
8617                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8618                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8619
8620                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8621                                    TG3_PCIE_PL_LO_PHYCTL5);
8622                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8623                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8624
8625                         tw32(GRC_MODE, grc_mode);
8626                 }
8627
8628                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8629                         u32 grc_mode = tr32(GRC_MODE);
8630
8631                         /* Access the lower 1K of DL PCIE block registers. */
8632                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8633                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8634
8635                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8636                                    TG3_PCIE_DL_LO_FTSMAX);
8637                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8638                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8639                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8640
8641                         tw32(GRC_MODE, grc_mode);
8642                 }
8643
8644                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8645                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8646                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8647                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8648         }
8649
8650         /* This works around an issue with Athlon chipsets on
8651          * B3 tigon3 silicon.  This bit has no effect on any
8652          * other revision.  But do not set this on PCI Express
8653          * chips and don't even touch the clocks if the CPMU is present.
8654          */
8655         if (!tg3_flag(tp, CPMU_PRESENT)) {
8656                 if (!tg3_flag(tp, PCI_EXPRESS))
8657                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8658                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8659         }
8660
8661         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8662             tg3_flag(tp, PCIX_MODE)) {
8663                 val = tr32(TG3PCI_PCISTATE);
8664                 val |= PCISTATE_RETRY_SAME_DMA;
8665                 tw32(TG3PCI_PCISTATE, val);
8666         }
8667
8668         if (tg3_flag(tp, ENABLE_APE)) {
8669                 /* Allow reads and writes to the
8670                  * APE register and memory space.
8671                  */
8672                 val = tr32(TG3PCI_PCISTATE);
8673                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8674                        PCISTATE_ALLOW_APE_SHMEM_WR |
8675                        PCISTATE_ALLOW_APE_PSPACE_WR;
8676                 tw32(TG3PCI_PCISTATE, val);
8677         }
8678
8679         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8680                 /* Enable some hw fixes.  */
8681                 val = tr32(TG3PCI_MSI_DATA);
8682                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8683                 tw32(TG3PCI_MSI_DATA, val);
8684         }
8685
8686         /* Descriptor ring init may make accesses to the
8687          * NIC SRAM area to setup the TX descriptors, so we
8688          * can only do this after the hardware has been
8689          * successfully reset.
8690          */
8691         err = tg3_init_rings(tp);
8692         if (err)
8693                 return err;
8694
8695         if (tg3_flag(tp, 57765_PLUS)) {
8696                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8697                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8698                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8699                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8700                 if (!tg3_flag(tp, 57765_CLASS) &&
8701                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8702                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8703                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8704         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8705                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8706                 /* This value is determined during the probe time DMA
8707                  * engine test, tg3_test_dma.
8708                  */
8709                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8710         }
8711
8712         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8713                           GRC_MODE_4X_NIC_SEND_RINGS |
8714                           GRC_MODE_NO_TX_PHDR_CSUM |
8715                           GRC_MODE_NO_RX_PHDR_CSUM);
8716         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8717
8718         /* Pseudo-header checksum is done by hardware logic and not
8719          * the offload processers, so make the chip do the pseudo-
8720          * header checksums on receive.  For transmit it is more
8721          * convenient to do the pseudo-header checksum in software
8722          * as Linux does that on transmit for us in all cases.
8723          */
8724         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8725
8726         tw32(GRC_MODE,
8727              tp->grc_mode |
8728              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8729
8730         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8731         val = tr32(GRC_MISC_CFG);
8732         val &= ~0xff;
8733         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8734         tw32(GRC_MISC_CFG, val);
8735
8736         /* Initialize MBUF/DESC pool. */
8737         if (tg3_flag(tp, 5750_PLUS)) {
8738                 /* Do nothing.  */
8739         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8740                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8741                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8742                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8743                 else
8744                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8745                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8746                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8747         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8748                 int fw_len;
8749
8750                 fw_len = tp->fw_len;
8751                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8752                 tw32(BUFMGR_MB_POOL_ADDR,
8753                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8754                 tw32(BUFMGR_MB_POOL_SIZE,
8755                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8756         }
8757
8758         if (tp->dev->mtu <= ETH_DATA_LEN) {
8759                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8760                      tp->bufmgr_config.mbuf_read_dma_low_water);
8761                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8762                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8763                 tw32(BUFMGR_MB_HIGH_WATER,
8764                      tp->bufmgr_config.mbuf_high_water);
8765         } else {
8766                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8767                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8768                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8769                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8770                 tw32(BUFMGR_MB_HIGH_WATER,
8771                      tp->bufmgr_config.mbuf_high_water_jumbo);
8772         }
8773         tw32(BUFMGR_DMA_LOW_WATER,
8774              tp->bufmgr_config.dma_low_water);
8775         tw32(BUFMGR_DMA_HIGH_WATER,
8776              tp->bufmgr_config.dma_high_water);
8777
8778         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8779         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8780                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8782             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8783             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8784                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8785         tw32(BUFMGR_MODE, val);
8786         for (i = 0; i < 2000; i++) {
8787                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8788                         break;
8789                 udelay(10);
8790         }
8791         if (i >= 2000) {
8792                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8793                 return -ENODEV;
8794         }
8795
8796         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8797                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8798
8799         tg3_setup_rxbd_thresholds(tp);
8800
8801         /* Initialize TG3_BDINFO's at:
8802          *  RCVDBDI_STD_BD:     standard eth size rx ring
8803          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8804          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8805          *
8806          * like so:
8807          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8808          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8809          *                              ring attribute flags
8810          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8811          *
8812          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8813          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8814          *
8815          * The size of each ring is fixed in the firmware, but the location is
8816          * configurable.
8817          */
8818         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8819              ((u64) tpr->rx_std_mapping >> 32));
8820         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8821              ((u64) tpr->rx_std_mapping & 0xffffffff));
8822         if (!tg3_flag(tp, 5717_PLUS))
8823                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8824                      NIC_SRAM_RX_BUFFER_DESC);
8825
8826         /* Disable the mini ring */
8827         if (!tg3_flag(tp, 5705_PLUS))
8828                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8829                      BDINFO_FLAGS_DISABLED);
8830
8831         /* Program the jumbo buffer descriptor ring control
8832          * blocks on those devices that have them.
8833          */
8834         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8835             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8836
8837                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8838                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8839                              ((u64) tpr->rx_jmb_mapping >> 32));
8840                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8841                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8842                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8843                               BDINFO_FLAGS_MAXLEN_SHIFT;
8844                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8845                              val | BDINFO_FLAGS_USE_EXT_RECV);
8846                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8847                             tg3_flag(tp, 57765_CLASS))
8848                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8849                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8850                 } else {
8851                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8852                              BDINFO_FLAGS_DISABLED);
8853                 }
8854
8855                 if (tg3_flag(tp, 57765_PLUS)) {
8856                         val = TG3_RX_STD_RING_SIZE(tp);
8857                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8858                         val |= (TG3_RX_STD_DMA_SZ << 2);
8859                 } else
8860                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8861         } else
8862                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8863
8864         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8865
8866         tpr->rx_std_prod_idx = tp->rx_pending;
8867         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8868
8869         tpr->rx_jmb_prod_idx =
8870                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8871         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8872
8873         tg3_rings_reset(tp);
8874
8875         /* Initialize MAC address and backoff seed. */
8876         __tg3_set_mac_addr(tp, 0);
8877
8878         /* MTU + ethernet header + FCS + optional VLAN tag */
8879         tw32(MAC_RX_MTU_SIZE,
8880              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8881
8882         /* The slot time is changed by tg3_setup_phy if we
8883          * run at gigabit with half duplex.
8884          */
8885         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8886               (6 << TX_LENGTHS_IPG_SHIFT) |
8887               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8888
8889         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8890                 val |= tr32(MAC_TX_LENGTHS) &
8891                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8892                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8893
8894         tw32(MAC_TX_LENGTHS, val);
8895
8896         /* Receive rules. */
8897         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8898         tw32(RCVLPC_CONFIG, 0x0181);
8899
8900         /* Calculate RDMAC_MODE setting early, we need it to determine
8901          * the RCVLPC_STATE_ENABLE mask.
8902          */
8903         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8904                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8905                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8906                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8907                       RDMAC_MODE_LNGREAD_ENAB);
8908
8909         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8910                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8911
8912         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8913             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8914             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8915                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8916                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8917                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8918
8919         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8920             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8921                 if (tg3_flag(tp, TSO_CAPABLE) &&
8922                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8923                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8924                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8925                            !tg3_flag(tp, IS_5788)) {
8926                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8927                 }
8928         }
8929
8930         if (tg3_flag(tp, PCI_EXPRESS))
8931                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8932
8933         if (tg3_flag(tp, HW_TSO_1) ||
8934             tg3_flag(tp, HW_TSO_2) ||
8935             tg3_flag(tp, HW_TSO_3))
8936                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8937
8938         if (tg3_flag(tp, 57765_PLUS) ||
8939             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8940             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8941                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8942
8943         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8944                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8945
8946         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8947             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8948             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8950             tg3_flag(tp, 57765_PLUS)) {
8951                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8952                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
8953                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8954                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8955                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8956                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8957                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8958                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8959                 }
8960                 tw32(TG3_RDMA_RSRVCTRL_REG,
8961                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8962         }
8963
8964         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8965             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8966                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8967                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8968                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8969                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8970         }
8971
8972         /* Receive/send statistics. */
8973         if (tg3_flag(tp, 5750_PLUS)) {
8974                 val = tr32(RCVLPC_STATS_ENABLE);
8975                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8976                 tw32(RCVLPC_STATS_ENABLE, val);
8977         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8978                    tg3_flag(tp, TSO_CAPABLE)) {
8979                 val = tr32(RCVLPC_STATS_ENABLE);
8980                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8981                 tw32(RCVLPC_STATS_ENABLE, val);
8982         } else {
8983                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8984         }
8985         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8986         tw32(SNDDATAI_STATSENAB, 0xffffff);
8987         tw32(SNDDATAI_STATSCTRL,
8988              (SNDDATAI_SCTRL_ENABLE |
8989               SNDDATAI_SCTRL_FASTUPD));
8990
8991         /* Setup host coalescing engine. */
8992         tw32(HOSTCC_MODE, 0);
8993         for (i = 0; i < 2000; i++) {
8994                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8995                         break;
8996                 udelay(10);
8997         }
8998
8999         __tg3_set_coalesce(tp, &tp->coal);
9000
9001         if (!tg3_flag(tp, 5705_PLUS)) {
9002                 /* Status/statistics block address.  See tg3_timer,
9003                  * the tg3_periodic_fetch_stats call there, and
9004                  * tg3_get_stats to see how this works for 5705/5750 chips.
9005                  */
9006                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9007                      ((u64) tp->stats_mapping >> 32));
9008                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9009                      ((u64) tp->stats_mapping & 0xffffffff));
9010                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9011
9012                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9013
9014                 /* Clear statistics and status block memory areas */
9015                 for (i = NIC_SRAM_STATS_BLK;
9016                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9017                      i += sizeof(u32)) {
9018                         tg3_write_mem(tp, i, 0);
9019                         udelay(40);
9020                 }
9021         }
9022
9023         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9024
9025         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9026         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9027         if (!tg3_flag(tp, 5705_PLUS))
9028                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9029
9030         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9031                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9032                 /* reset to prevent losing 1st rx packet intermittently */
9033                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9034                 udelay(10);
9035         }
9036
9037         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9038                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9039                         MAC_MODE_FHDE_ENABLE;
9040         if (tg3_flag(tp, ENABLE_APE))
9041                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9042         if (!tg3_flag(tp, 5705_PLUS) &&
9043             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9044             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9045                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9046         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9047         udelay(40);
9048
9049         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9050          * If TG3_FLAG_IS_NIC is zero, we should read the
9051          * register to preserve the GPIO settings for LOMs. The GPIOs,
9052          * whether used as inputs or outputs, are set by boot code after
9053          * reset.
9054          */
9055         if (!tg3_flag(tp, IS_NIC)) {
9056                 u32 gpio_mask;
9057
9058                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9059                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9060                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9061
9062                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9063                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9064                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9065
9066                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9067                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9068
9069                 tp->grc_local_ctrl &= ~gpio_mask;
9070                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9071
9072                 /* GPIO1 must be driven high for eeprom write protect */
9073                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9074                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9075                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9076         }
9077         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9078         udelay(100);
9079
9080         if (tg3_flag(tp, USING_MSIX)) {
9081                 val = tr32(MSGINT_MODE);
9082                 val |= MSGINT_MODE_ENABLE;
9083                 if (tp->irq_cnt > 1)
9084                         val |= MSGINT_MODE_MULTIVEC_EN;
9085                 if (!tg3_flag(tp, 1SHOT_MSI))
9086                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9087                 tw32(MSGINT_MODE, val);
9088         }
9089
9090         if (!tg3_flag(tp, 5705_PLUS)) {
9091                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9092                 udelay(40);
9093         }
9094
9095         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9096                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9097                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9098                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9099                WDMAC_MODE_LNGREAD_ENAB);
9100
9101         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9102             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9103                 if (tg3_flag(tp, TSO_CAPABLE) &&
9104                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9105                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9106                         /* nothing */
9107                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9108                            !tg3_flag(tp, IS_5788)) {
9109                         val |= WDMAC_MODE_RX_ACCEL;
9110                 }
9111         }
9112
9113         /* Enable host coalescing bug fix */
9114         if (tg3_flag(tp, 5755_PLUS))
9115                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9116
9117         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9118                 val |= WDMAC_MODE_BURST_ALL_DATA;
9119
9120         tw32_f(WDMAC_MODE, val);
9121         udelay(40);
9122
9123         if (tg3_flag(tp, PCIX_MODE)) {
9124                 u16 pcix_cmd;
9125
9126                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9127                                      &pcix_cmd);
9128                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9129                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9130                         pcix_cmd |= PCI_X_CMD_READ_2K;
9131                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9132                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9133                         pcix_cmd |= PCI_X_CMD_READ_2K;
9134                 }
9135                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9136                                       pcix_cmd);
9137         }
9138
9139         tw32_f(RDMAC_MODE, rdmac_mode);
9140         udelay(40);
9141
9142         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9143         if (!tg3_flag(tp, 5705_PLUS))
9144                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9145
9146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9147                 tw32(SNDDATAC_MODE,
9148                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9149         else
9150                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9151
9152         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9153         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9154         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9155         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9156                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9157         tw32(RCVDBDI_MODE, val);
9158         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9159         if (tg3_flag(tp, HW_TSO_1) ||
9160             tg3_flag(tp, HW_TSO_2) ||
9161             tg3_flag(tp, HW_TSO_3))
9162                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9163         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9164         if (tg3_flag(tp, ENABLE_TSS))
9165                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9166         tw32(SNDBDI_MODE, val);
9167         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9168
9169         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9170                 err = tg3_load_5701_a0_firmware_fix(tp);
9171                 if (err)
9172                         return err;
9173         }
9174
9175         if (tg3_flag(tp, TSO_CAPABLE)) {
9176                 err = tg3_load_tso_firmware(tp);
9177                 if (err)
9178                         return err;
9179         }
9180
9181         tp->tx_mode = TX_MODE_ENABLE;
9182
9183         if (tg3_flag(tp, 5755_PLUS) ||
9184             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9185                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9186
9187         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9188                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9189                 tp->tx_mode &= ~val;
9190                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9191         }
9192
9193         tw32_f(MAC_TX_MODE, tp->tx_mode);
9194         udelay(100);
9195
9196         if (tg3_flag(tp, ENABLE_RSS)) {
9197                 tg3_rss_write_indir_tbl(tp);
9198
9199                 /* Setup the "secret" hash key. */
9200                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9201                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9202                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9203                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9204                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9205                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9206                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9207                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9208                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9209                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9210         }
9211
9212         tp->rx_mode = RX_MODE_ENABLE;
9213         if (tg3_flag(tp, 5755_PLUS))
9214                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9215
9216         if (tg3_flag(tp, ENABLE_RSS))
9217                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9218                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9219                                RX_MODE_RSS_IPV6_HASH_EN |
9220                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9221                                RX_MODE_RSS_IPV4_HASH_EN |
9222                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9223
9224         tw32_f(MAC_RX_MODE, tp->rx_mode);
9225         udelay(10);
9226
9227         tw32(MAC_LED_CTRL, tp->led_ctrl);
9228
9229         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9230         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9231                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9232                 udelay(10);
9233         }
9234         tw32_f(MAC_RX_MODE, tp->rx_mode);
9235         udelay(10);
9236
9237         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9238                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9239                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9240                         /* Set drive transmission level to 1.2V  */
9241                         /* only if the signal pre-emphasis bit is not set  */
9242                         val = tr32(MAC_SERDES_CFG);
9243                         val &= 0xfffff000;
9244                         val |= 0x880;
9245                         tw32(MAC_SERDES_CFG, val);
9246                 }
9247                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9248                         tw32(MAC_SERDES_CFG, 0x616000);
9249         }
9250
9251         /* Prevent chip from dropping frames when flow control
9252          * is enabled.
9253          */
9254         if (tg3_flag(tp, 57765_CLASS))
9255                 val = 1;
9256         else
9257                 val = 2;
9258         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9259
9260         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9261             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9262                 /* Use hardware link auto-negotiation */
9263                 tg3_flag_set(tp, HW_AUTONEG);
9264         }
9265
9266         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9267             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9268                 u32 tmp;
9269
9270                 tmp = tr32(SERDES_RX_CTRL);
9271                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9272                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9273                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9274                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9275         }
9276
9277         if (!tg3_flag(tp, USE_PHYLIB)) {
9278                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9279                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9280
9281                 err = tg3_setup_phy(tp, 0);
9282                 if (err)
9283                         return err;
9284
9285                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9286                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9287                         u32 tmp;
9288
9289                         /* Clear CRC stats. */
9290                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9291                                 tg3_writephy(tp, MII_TG3_TEST1,
9292                                              tmp | MII_TG3_TEST1_CRC_EN);
9293                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9294                         }
9295                 }
9296         }
9297
9298         __tg3_set_rx_mode(tp->dev);
9299
9300         /* Initialize receive rules. */
9301         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9302         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9303         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9304         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9305
9306         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9307                 limit = 8;
9308         else
9309                 limit = 16;
9310         if (tg3_flag(tp, ENABLE_ASF))
9311                 limit -= 4;
9312         switch (limit) {
9313         case 16:
9314                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9315         case 15:
9316                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9317         case 14:
9318                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9319         case 13:
9320                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9321         case 12:
9322                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9323         case 11:
9324                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9325         case 10:
9326                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9327         case 9:
9328                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9329         case 8:
9330                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9331         case 7:
9332                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9333         case 6:
9334                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9335         case 5:
9336                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9337         case 4:
9338                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9339         case 3:
9340                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9341         case 2:
9342         case 1:
9343
9344         default:
9345                 break;
9346         }
9347
9348         if (tg3_flag(tp, ENABLE_APE))
9349                 /* Write our heartbeat update interval to APE. */
9350                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9351                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9352
9353         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9354
9355         return 0;
9356 }
9357
9358 /* Called at device open time to get the chip ready for
9359  * packet processing.  Invoked with tp->lock held.
9360  */
9361 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9362 {
9363         tg3_switch_clocks(tp);
9364
9365         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9366
9367         return tg3_reset_hw(tp, reset_phy);
9368 }
9369
9370 #define TG3_STAT_ADD32(PSTAT, REG) \
9371 do {    u32 __val = tr32(REG); \
9372         (PSTAT)->low += __val; \
9373         if ((PSTAT)->low < __val) \
9374                 (PSTAT)->high += 1; \
9375 } while (0)
9376
9377 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9378 {
9379         struct tg3_hw_stats *sp = tp->hw_stats;
9380
9381         if (!netif_carrier_ok(tp->dev))
9382                 return;
9383
9384         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9385         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9386         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9387         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9388         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9389         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9390         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9391         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9392         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9393         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9394         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9395         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9396         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9397
9398         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9399         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9400         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9401         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9402         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9403         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9404         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9405         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9406         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9407         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9408         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9409         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9410         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9411         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9412
9413         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9414         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9415             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9416             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9417                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9418         } else {
9419                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9420                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9421                 if (val) {
9422                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9423                         sp->rx_discards.low += val;
9424                         if (sp->rx_discards.low < val)
9425                                 sp->rx_discards.high += 1;
9426                 }
9427                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9428         }
9429         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9430 }
9431
9432 static void tg3_chk_missed_msi(struct tg3 *tp)
9433 {
9434         u32 i;
9435
9436         for (i = 0; i < tp->irq_cnt; i++) {
9437                 struct tg3_napi *tnapi = &tp->napi[i];
9438
9439                 if (tg3_has_work(tnapi)) {
9440                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9441                             tnapi->last_tx_cons == tnapi->tx_cons) {
9442                                 if (tnapi->chk_msi_cnt < 1) {
9443                                         tnapi->chk_msi_cnt++;
9444                                         return;
9445                                 }
9446                                 tg3_msi(0, tnapi);
9447                         }
9448                 }
9449                 tnapi->chk_msi_cnt = 0;
9450                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9451                 tnapi->last_tx_cons = tnapi->tx_cons;
9452         }
9453 }
9454
9455 static void tg3_timer(unsigned long __opaque)
9456 {
9457         struct tg3 *tp = (struct tg3 *) __opaque;
9458
9459         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9460                 goto restart_timer;
9461
9462         spin_lock(&tp->lock);
9463
9464         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9465             tg3_flag(tp, 57765_CLASS))
9466                 tg3_chk_missed_msi(tp);
9467
9468         if (!tg3_flag(tp, TAGGED_STATUS)) {
9469                 /* All of this garbage is because when using non-tagged
9470                  * IRQ status the mailbox/status_block protocol the chip
9471                  * uses with the cpu is race prone.
9472                  */
9473                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9474                         tw32(GRC_LOCAL_CTRL,
9475                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9476                 } else {
9477                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9478                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9479                 }
9480
9481                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9482                         spin_unlock(&tp->lock);
9483                         tg3_reset_task_schedule(tp);
9484                         goto restart_timer;
9485                 }
9486         }
9487
9488         /* This part only runs once per second. */
9489         if (!--tp->timer_counter) {
9490                 if (tg3_flag(tp, 5705_PLUS))
9491                         tg3_periodic_fetch_stats(tp);
9492
9493                 if (tp->setlpicnt && !--tp->setlpicnt)
9494                         tg3_phy_eee_enable(tp);
9495
9496                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9497                         u32 mac_stat;
9498                         int phy_event;
9499
9500                         mac_stat = tr32(MAC_STATUS);
9501
9502                         phy_event = 0;
9503                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9504                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9505                                         phy_event = 1;
9506                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9507                                 phy_event = 1;
9508
9509                         if (phy_event)
9510                                 tg3_setup_phy(tp, 0);
9511                 } else if (tg3_flag(tp, POLL_SERDES)) {
9512                         u32 mac_stat = tr32(MAC_STATUS);
9513                         int need_setup = 0;
9514
9515                         if (netif_carrier_ok(tp->dev) &&
9516                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9517                                 need_setup = 1;
9518                         }
9519                         if (!netif_carrier_ok(tp->dev) &&
9520                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9521                                          MAC_STATUS_SIGNAL_DET))) {
9522                                 need_setup = 1;
9523                         }
9524                         if (need_setup) {
9525                                 if (!tp->serdes_counter) {
9526                                         tw32_f(MAC_MODE,
9527                                              (tp->mac_mode &
9528                                               ~MAC_MODE_PORT_MODE_MASK));
9529                                         udelay(40);
9530                                         tw32_f(MAC_MODE, tp->mac_mode);
9531                                         udelay(40);
9532                                 }
9533                                 tg3_setup_phy(tp, 0);
9534                         }
9535                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9536                            tg3_flag(tp, 5780_CLASS)) {
9537                         tg3_serdes_parallel_detect(tp);
9538                 }
9539
9540                 tp->timer_counter = tp->timer_multiplier;
9541         }
9542
9543         /* Heartbeat is only sent once every 2 seconds.
9544          *
9545          * The heartbeat is to tell the ASF firmware that the host
9546          * driver is still alive.  In the event that the OS crashes,
9547          * ASF needs to reset the hardware to free up the FIFO space
9548          * that may be filled with rx packets destined for the host.
9549          * If the FIFO is full, ASF will no longer function properly.
9550          *
9551          * Unintended resets have been reported on real time kernels
9552          * where the timer doesn't run on time.  Netpoll will also have
9553          * same problem.
9554          *
9555          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9556          * to check the ring condition when the heartbeat is expiring
9557          * before doing the reset.  This will prevent most unintended
9558          * resets.
9559          */
9560         if (!--tp->asf_counter) {
9561                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9562                         tg3_wait_for_event_ack(tp);
9563
9564                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9565                                       FWCMD_NICDRV_ALIVE3);
9566                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9567                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9568                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9569
9570                         tg3_generate_fw_event(tp);
9571                 }
9572                 tp->asf_counter = tp->asf_multiplier;
9573         }
9574
9575         spin_unlock(&tp->lock);
9576
9577 restart_timer:
9578         tp->timer.expires = jiffies + tp->timer_offset;
9579         add_timer(&tp->timer);
9580 }
9581
9582 static void __devinit tg3_timer_init(struct tg3 *tp)
9583 {
9584         if (tg3_flag(tp, TAGGED_STATUS) &&
9585             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9586             !tg3_flag(tp, 57765_CLASS))
9587                 tp->timer_offset = HZ;
9588         else
9589                 tp->timer_offset = HZ / 10;
9590
9591         BUG_ON(tp->timer_offset > HZ);
9592
9593         tp->timer_multiplier = (HZ / tp->timer_offset);
9594         tp->asf_multiplier = (HZ / tp->timer_offset) *
9595                              TG3_FW_UPDATE_FREQ_SEC;
9596
9597         init_timer(&tp->timer);
9598         tp->timer.data = (unsigned long) tp;
9599         tp->timer.function = tg3_timer;
9600 }
9601
9602 static void tg3_timer_start(struct tg3 *tp)
9603 {
9604         tp->asf_counter   = tp->asf_multiplier;
9605         tp->timer_counter = tp->timer_multiplier;
9606
9607         tp->timer.expires = jiffies + tp->timer_offset;
9608         add_timer(&tp->timer);
9609 }
9610
9611 static void tg3_timer_stop(struct tg3 *tp)
9612 {
9613         del_timer_sync(&tp->timer);
9614 }
9615
9616 /* Restart hardware after configuration changes, self-test, etc.
9617  * Invoked with tp->lock held.
9618  */
9619 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9620         __releases(tp->lock)
9621         __acquires(tp->lock)
9622 {
9623         int err;
9624
9625         err = tg3_init_hw(tp, reset_phy);
9626         if (err) {
9627                 netdev_err(tp->dev,
9628                            "Failed to re-initialize device, aborting\n");
9629                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9630                 tg3_full_unlock(tp);
9631                 tg3_timer_stop(tp);
9632                 tp->irq_sync = 0;
9633                 tg3_napi_enable(tp);
9634                 dev_close(tp->dev);
9635                 tg3_full_lock(tp, 0);
9636         }
9637         return err;
9638 }
9639
9640 static void tg3_reset_task(struct work_struct *work)
9641 {
9642         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9643         int err;
9644
9645         tg3_full_lock(tp, 0);
9646
9647         if (!netif_running(tp->dev)) {
9648                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9649                 tg3_full_unlock(tp);
9650                 return;
9651         }
9652
9653         tg3_full_unlock(tp);
9654
9655         tg3_phy_stop(tp);
9656
9657         tg3_netif_stop(tp);
9658
9659         tg3_full_lock(tp, 1);
9660
9661         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9662                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9663                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9664                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9665                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9666         }
9667
9668         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9669         err = tg3_init_hw(tp, 1);
9670         if (err)
9671                 goto out;
9672
9673         tg3_netif_start(tp);
9674
9675 out:
9676         tg3_full_unlock(tp);
9677
9678         if (!err)
9679                 tg3_phy_start(tp);
9680
9681         tg3_flag_clear(tp, RESET_TASK_PENDING);
9682 }
9683
9684 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9685 {
9686         irq_handler_t fn;
9687         unsigned long flags;
9688         char *name;
9689         struct tg3_napi *tnapi = &tp->napi[irq_num];
9690
9691         if (tp->irq_cnt == 1)
9692                 name = tp->dev->name;
9693         else {
9694                 name = &tnapi->irq_lbl[0];
9695                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9696                 name[IFNAMSIZ-1] = 0;
9697         }
9698
9699         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9700                 fn = tg3_msi;
9701                 if (tg3_flag(tp, 1SHOT_MSI))
9702                         fn = tg3_msi_1shot;
9703                 flags = 0;
9704         } else {
9705                 fn = tg3_interrupt;
9706                 if (tg3_flag(tp, TAGGED_STATUS))
9707                         fn = tg3_interrupt_tagged;
9708                 flags = IRQF_SHARED;
9709         }
9710
9711         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9712 }
9713
9714 static int tg3_test_interrupt(struct tg3 *tp)
9715 {
9716         struct tg3_napi *tnapi = &tp->napi[0];
9717         struct net_device *dev = tp->dev;
9718         int err, i, intr_ok = 0;
9719         u32 val;
9720
9721         if (!netif_running(dev))
9722                 return -ENODEV;
9723
9724         tg3_disable_ints(tp);
9725
9726         free_irq(tnapi->irq_vec, tnapi);
9727
9728         /*
9729          * Turn off MSI one shot mode.  Otherwise this test has no
9730          * observable way to know whether the interrupt was delivered.
9731          */
9732         if (tg3_flag(tp, 57765_PLUS)) {
9733                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9734                 tw32(MSGINT_MODE, val);
9735         }
9736
9737         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9738                           IRQF_SHARED, dev->name, tnapi);
9739         if (err)
9740                 return err;
9741
9742         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9743         tg3_enable_ints(tp);
9744
9745         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9746                tnapi->coal_now);
9747
9748         for (i = 0; i < 5; i++) {
9749                 u32 int_mbox, misc_host_ctrl;
9750
9751                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9752                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9753
9754                 if ((int_mbox != 0) ||
9755                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9756                         intr_ok = 1;
9757                         break;
9758                 }
9759
9760                 if (tg3_flag(tp, 57765_PLUS) &&
9761                     tnapi->hw_status->status_tag != tnapi->last_tag)
9762                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9763
9764                 msleep(10);
9765         }
9766
9767         tg3_disable_ints(tp);
9768
9769         free_irq(tnapi->irq_vec, tnapi);
9770
9771         err = tg3_request_irq(tp, 0);
9772
9773         if (err)
9774                 return err;
9775
9776         if (intr_ok) {
9777                 /* Reenable MSI one shot mode. */
9778                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9779                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9780                         tw32(MSGINT_MODE, val);
9781                 }
9782                 return 0;
9783         }
9784
9785         return -EIO;
9786 }
9787
9788 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9789  * successfully restored
9790  */
9791 static int tg3_test_msi(struct tg3 *tp)
9792 {
9793         int err;
9794         u16 pci_cmd;
9795
9796         if (!tg3_flag(tp, USING_MSI))
9797                 return 0;
9798
9799         /* Turn off SERR reporting in case MSI terminates with Master
9800          * Abort.
9801          */
9802         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9803         pci_write_config_word(tp->pdev, PCI_COMMAND,
9804                               pci_cmd & ~PCI_COMMAND_SERR);
9805
9806         err = tg3_test_interrupt(tp);
9807
9808         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9809
9810         if (!err)
9811                 return 0;
9812
9813         /* other failures */
9814         if (err != -EIO)
9815                 return err;
9816
9817         /* MSI test failed, go back to INTx mode */
9818         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9819                     "to INTx mode. Please report this failure to the PCI "
9820                     "maintainer and include system chipset information\n");
9821
9822         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9823
9824         pci_disable_msi(tp->pdev);
9825
9826         tg3_flag_clear(tp, USING_MSI);
9827         tp->napi[0].irq_vec = tp->pdev->irq;
9828
9829         err = tg3_request_irq(tp, 0);
9830         if (err)
9831                 return err;
9832
9833         /* Need to reset the chip because the MSI cycle may have terminated
9834          * with Master Abort.
9835          */
9836         tg3_full_lock(tp, 1);
9837
9838         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9839         err = tg3_init_hw(tp, 1);
9840
9841         tg3_full_unlock(tp);
9842
9843         if (err)
9844                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9845
9846         return err;
9847 }
9848
9849 static int tg3_request_firmware(struct tg3 *tp)
9850 {
9851         const __be32 *fw_data;
9852
9853         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9854                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9855                            tp->fw_needed);
9856                 return -ENOENT;
9857         }
9858
9859         fw_data = (void *)tp->fw->data;
9860
9861         /* Firmware blob starts with version numbers, followed by
9862          * start address and _full_ length including BSS sections
9863          * (which must be longer than the actual data, of course
9864          */
9865
9866         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9867         if (tp->fw_len < (tp->fw->size - 12)) {
9868                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9869                            tp->fw_len, tp->fw_needed);
9870                 release_firmware(tp->fw);
9871                 tp->fw = NULL;
9872                 return -EINVAL;
9873         }
9874
9875         /* We no longer need firmware; we have it. */
9876         tp->fw_needed = NULL;
9877         return 0;
9878 }
9879
9880 static bool tg3_enable_msix(struct tg3 *tp)
9881 {
9882         int i, rc;
9883         struct msix_entry msix_ent[tp->irq_max];
9884
9885         tp->irq_cnt = num_online_cpus();
9886         if (tp->irq_cnt > 1) {
9887                 /* We want as many rx rings enabled as there are cpus.
9888                  * In multiqueue MSI-X mode, the first MSI-X vector
9889                  * only deals with link interrupts, etc, so we add
9890                  * one to the number of vectors we are requesting.
9891                  */
9892                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9893         }
9894
9895         for (i = 0; i < tp->irq_max; i++) {
9896                 msix_ent[i].entry  = i;
9897                 msix_ent[i].vector = 0;
9898         }
9899
9900         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9901         if (rc < 0) {
9902                 return false;
9903         } else if (rc != 0) {
9904                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9905                         return false;
9906                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9907                               tp->irq_cnt, rc);
9908                 tp->irq_cnt = rc;
9909         }
9910
9911         for (i = 0; i < tp->irq_max; i++)
9912                 tp->napi[i].irq_vec = msix_ent[i].vector;
9913
9914         netif_set_real_num_tx_queues(tp->dev, 1);
9915         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9916         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9917                 pci_disable_msix(tp->pdev);
9918                 return false;
9919         }
9920
9921         if (tp->irq_cnt > 1) {
9922                 tg3_flag_set(tp, ENABLE_RSS);
9923
9924                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9925                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9926                         tg3_flag_set(tp, ENABLE_TSS);
9927                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9928                 }
9929         }
9930
9931         return true;
9932 }
9933
9934 static void tg3_ints_init(struct tg3 *tp)
9935 {
9936         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9937             !tg3_flag(tp, TAGGED_STATUS)) {
9938                 /* All MSI supporting chips should support tagged
9939                  * status.  Assert that this is the case.
9940                  */
9941                 netdev_warn(tp->dev,
9942                             "MSI without TAGGED_STATUS? Not using MSI\n");
9943                 goto defcfg;
9944         }
9945
9946         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9947                 tg3_flag_set(tp, USING_MSIX);
9948         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9949                 tg3_flag_set(tp, USING_MSI);
9950
9951         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9952                 u32 msi_mode = tr32(MSGINT_MODE);
9953                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9954                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9955                 if (!tg3_flag(tp, 1SHOT_MSI))
9956                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9957                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9958         }
9959 defcfg:
9960         if (!tg3_flag(tp, USING_MSIX)) {
9961                 tp->irq_cnt = 1;
9962                 tp->napi[0].irq_vec = tp->pdev->irq;
9963                 netif_set_real_num_tx_queues(tp->dev, 1);
9964                 netif_set_real_num_rx_queues(tp->dev, 1);
9965         }
9966 }
9967
9968 static void tg3_ints_fini(struct tg3 *tp)
9969 {
9970         if (tg3_flag(tp, USING_MSIX))
9971                 pci_disable_msix(tp->pdev);
9972         else if (tg3_flag(tp, USING_MSI))
9973                 pci_disable_msi(tp->pdev);
9974         tg3_flag_clear(tp, USING_MSI);
9975         tg3_flag_clear(tp, USING_MSIX);
9976         tg3_flag_clear(tp, ENABLE_RSS);
9977         tg3_flag_clear(tp, ENABLE_TSS);
9978 }
9979
9980 static int tg3_open(struct net_device *dev)
9981 {
9982         struct tg3 *tp = netdev_priv(dev);
9983         int i, err;
9984
9985         if (tp->fw_needed) {
9986                 err = tg3_request_firmware(tp);
9987                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9988                         if (err)
9989                                 return err;
9990                 } else if (err) {
9991                         netdev_warn(tp->dev, "TSO capability disabled\n");
9992                         tg3_flag_clear(tp, TSO_CAPABLE);
9993                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9994                         netdev_notice(tp->dev, "TSO capability restored\n");
9995                         tg3_flag_set(tp, TSO_CAPABLE);
9996                 }
9997         }
9998
9999         netif_carrier_off(tp->dev);
10000
10001         err = tg3_power_up(tp);
10002         if (err)
10003                 return err;
10004
10005         tg3_full_lock(tp, 0);
10006
10007         tg3_disable_ints(tp);
10008         tg3_flag_clear(tp, INIT_COMPLETE);
10009
10010         tg3_full_unlock(tp);
10011
10012         /*
10013          * Setup interrupts first so we know how
10014          * many NAPI resources to allocate
10015          */
10016         tg3_ints_init(tp);
10017
10018         tg3_rss_check_indir_tbl(tp);
10019
10020         /* The placement of this call is tied
10021          * to the setup and use of Host TX descriptors.
10022          */
10023         err = tg3_alloc_consistent(tp);
10024         if (err)
10025                 goto err_out1;
10026
10027         tg3_napi_init(tp);
10028
10029         tg3_napi_enable(tp);
10030
10031         for (i = 0; i < tp->irq_cnt; i++) {
10032                 struct tg3_napi *tnapi = &tp->napi[i];
10033                 err = tg3_request_irq(tp, i);
10034                 if (err) {
10035                         for (i--; i >= 0; i--) {
10036                                 tnapi = &tp->napi[i];
10037                                 free_irq(tnapi->irq_vec, tnapi);
10038                         }
10039                         goto err_out2;
10040                 }
10041         }
10042
10043         tg3_full_lock(tp, 0);
10044
10045         err = tg3_init_hw(tp, 1);
10046         if (err) {
10047                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10048                 tg3_free_rings(tp);
10049         }
10050
10051         tg3_full_unlock(tp);
10052
10053         if (err)
10054                 goto err_out3;
10055
10056         if (tg3_flag(tp, USING_MSI)) {
10057                 err = tg3_test_msi(tp);
10058
10059                 if (err) {
10060                         tg3_full_lock(tp, 0);
10061                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10062                         tg3_free_rings(tp);
10063                         tg3_full_unlock(tp);
10064
10065                         goto err_out2;
10066                 }
10067
10068                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10069                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10070
10071                         tw32(PCIE_TRANSACTION_CFG,
10072                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10073                 }
10074         }
10075
10076         tg3_phy_start(tp);
10077
10078         tg3_full_lock(tp, 0);
10079
10080         tg3_timer_start(tp);
10081         tg3_flag_set(tp, INIT_COMPLETE);
10082         tg3_enable_ints(tp);
10083
10084         tg3_full_unlock(tp);
10085
10086         netif_tx_start_all_queues(dev);
10087
10088         /*
10089          * Reset loopback feature if it was turned on while the device was down
10090          * make sure that it's installed properly now.
10091          */
10092         if (dev->features & NETIF_F_LOOPBACK)
10093                 tg3_set_loopback(dev, dev->features);
10094
10095         return 0;
10096
10097 err_out3:
10098         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10099                 struct tg3_napi *tnapi = &tp->napi[i];
10100                 free_irq(tnapi->irq_vec, tnapi);
10101         }
10102
10103 err_out2:
10104         tg3_napi_disable(tp);
10105         tg3_napi_fini(tp);
10106         tg3_free_consistent(tp);
10107
10108 err_out1:
10109         tg3_ints_fini(tp);
10110         tg3_frob_aux_power(tp, false);
10111         pci_set_power_state(tp->pdev, PCI_D3hot);
10112         return err;
10113 }
10114
10115 static int tg3_close(struct net_device *dev)
10116 {
10117         int i;
10118         struct tg3 *tp = netdev_priv(dev);
10119
10120         tg3_napi_disable(tp);
10121         tg3_reset_task_cancel(tp);
10122
10123         netif_tx_stop_all_queues(dev);
10124
10125         tg3_timer_stop(tp);
10126
10127         tg3_phy_stop(tp);
10128
10129         tg3_full_lock(tp, 1);
10130
10131         tg3_disable_ints(tp);
10132
10133         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10134         tg3_free_rings(tp);
10135         tg3_flag_clear(tp, INIT_COMPLETE);
10136
10137         tg3_full_unlock(tp);
10138
10139         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10140                 struct tg3_napi *tnapi = &tp->napi[i];
10141                 free_irq(tnapi->irq_vec, tnapi);
10142         }
10143
10144         tg3_ints_fini(tp);
10145
10146         /* Clear stats across close / open calls */
10147         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10148         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10149
10150         tg3_napi_fini(tp);
10151
10152         tg3_free_consistent(tp);
10153
10154         tg3_power_down(tp);
10155
10156         netif_carrier_off(tp->dev);
10157
10158         return 0;
10159 }
10160
10161 static inline u64 get_stat64(tg3_stat64_t *val)
10162 {
10163        return ((u64)val->high << 32) | ((u64)val->low);
10164 }
10165
10166 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10167 {
10168         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10169
10170         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10171             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10172              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10173                 u32 val;
10174
10175                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10176                         tg3_writephy(tp, MII_TG3_TEST1,
10177                                      val | MII_TG3_TEST1_CRC_EN);
10178                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10179                 } else
10180                         val = 0;
10181
10182                 tp->phy_crc_errors += val;
10183
10184                 return tp->phy_crc_errors;
10185         }
10186
10187         return get_stat64(&hw_stats->rx_fcs_errors);
10188 }
10189
10190 #define ESTAT_ADD(member) \
10191         estats->member =        old_estats->member + \
10192                                 get_stat64(&hw_stats->member)
10193
10194 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10195 {
10196         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10197         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10198
10199         ESTAT_ADD(rx_octets);
10200         ESTAT_ADD(rx_fragments);
10201         ESTAT_ADD(rx_ucast_packets);
10202         ESTAT_ADD(rx_mcast_packets);
10203         ESTAT_ADD(rx_bcast_packets);
10204         ESTAT_ADD(rx_fcs_errors);
10205         ESTAT_ADD(rx_align_errors);
10206         ESTAT_ADD(rx_xon_pause_rcvd);
10207         ESTAT_ADD(rx_xoff_pause_rcvd);
10208         ESTAT_ADD(rx_mac_ctrl_rcvd);
10209         ESTAT_ADD(rx_xoff_entered);
10210         ESTAT_ADD(rx_frame_too_long_errors);
10211         ESTAT_ADD(rx_jabbers);
10212         ESTAT_ADD(rx_undersize_packets);
10213         ESTAT_ADD(rx_in_length_errors);
10214         ESTAT_ADD(rx_out_length_errors);
10215         ESTAT_ADD(rx_64_or_less_octet_packets);
10216         ESTAT_ADD(rx_65_to_127_octet_packets);
10217         ESTAT_ADD(rx_128_to_255_octet_packets);
10218         ESTAT_ADD(rx_256_to_511_octet_packets);
10219         ESTAT_ADD(rx_512_to_1023_octet_packets);
10220         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10221         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10222         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10223         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10224         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10225
10226         ESTAT_ADD(tx_octets);
10227         ESTAT_ADD(tx_collisions);
10228         ESTAT_ADD(tx_xon_sent);
10229         ESTAT_ADD(tx_xoff_sent);
10230         ESTAT_ADD(tx_flow_control);
10231         ESTAT_ADD(tx_mac_errors);
10232         ESTAT_ADD(tx_single_collisions);
10233         ESTAT_ADD(tx_mult_collisions);
10234         ESTAT_ADD(tx_deferred);
10235         ESTAT_ADD(tx_excessive_collisions);
10236         ESTAT_ADD(tx_late_collisions);
10237         ESTAT_ADD(tx_collide_2times);
10238         ESTAT_ADD(tx_collide_3times);
10239         ESTAT_ADD(tx_collide_4times);
10240         ESTAT_ADD(tx_collide_5times);
10241         ESTAT_ADD(tx_collide_6times);
10242         ESTAT_ADD(tx_collide_7times);
10243         ESTAT_ADD(tx_collide_8times);
10244         ESTAT_ADD(tx_collide_9times);
10245         ESTAT_ADD(tx_collide_10times);
10246         ESTAT_ADD(tx_collide_11times);
10247         ESTAT_ADD(tx_collide_12times);
10248         ESTAT_ADD(tx_collide_13times);
10249         ESTAT_ADD(tx_collide_14times);
10250         ESTAT_ADD(tx_collide_15times);
10251         ESTAT_ADD(tx_ucast_packets);
10252         ESTAT_ADD(tx_mcast_packets);
10253         ESTAT_ADD(tx_bcast_packets);
10254         ESTAT_ADD(tx_carrier_sense_errors);
10255         ESTAT_ADD(tx_discards);
10256         ESTAT_ADD(tx_errors);
10257
10258         ESTAT_ADD(dma_writeq_full);
10259         ESTAT_ADD(dma_write_prioq_full);
10260         ESTAT_ADD(rxbds_empty);
10261         ESTAT_ADD(rx_discards);
10262         ESTAT_ADD(rx_errors);
10263         ESTAT_ADD(rx_threshold_hit);
10264
10265         ESTAT_ADD(dma_readq_full);
10266         ESTAT_ADD(dma_read_prioq_full);
10267         ESTAT_ADD(tx_comp_queue_full);
10268
10269         ESTAT_ADD(ring_set_send_prod_index);
10270         ESTAT_ADD(ring_status_update);
10271         ESTAT_ADD(nic_irqs);
10272         ESTAT_ADD(nic_avoided_irqs);
10273         ESTAT_ADD(nic_tx_threshold_hit);
10274
10275         ESTAT_ADD(mbuf_lwm_thresh_hit);
10276 }
10277
10278 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10279 {
10280         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10281         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10282
10283         stats->rx_packets = old_stats->rx_packets +
10284                 get_stat64(&hw_stats->rx_ucast_packets) +
10285                 get_stat64(&hw_stats->rx_mcast_packets) +
10286                 get_stat64(&hw_stats->rx_bcast_packets);
10287
10288         stats->tx_packets = old_stats->tx_packets +
10289                 get_stat64(&hw_stats->tx_ucast_packets) +
10290                 get_stat64(&hw_stats->tx_mcast_packets) +
10291                 get_stat64(&hw_stats->tx_bcast_packets);
10292
10293         stats->rx_bytes = old_stats->rx_bytes +
10294                 get_stat64(&hw_stats->rx_octets);
10295         stats->tx_bytes = old_stats->tx_bytes +
10296                 get_stat64(&hw_stats->tx_octets);
10297
10298         stats->rx_errors = old_stats->rx_errors +
10299                 get_stat64(&hw_stats->rx_errors);
10300         stats->tx_errors = old_stats->tx_errors +
10301                 get_stat64(&hw_stats->tx_errors) +
10302                 get_stat64(&hw_stats->tx_mac_errors) +
10303                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10304                 get_stat64(&hw_stats->tx_discards);
10305
10306         stats->multicast = old_stats->multicast +
10307                 get_stat64(&hw_stats->rx_mcast_packets);
10308         stats->collisions = old_stats->collisions +
10309                 get_stat64(&hw_stats->tx_collisions);
10310
10311         stats->rx_length_errors = old_stats->rx_length_errors +
10312                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10313                 get_stat64(&hw_stats->rx_undersize_packets);
10314
10315         stats->rx_over_errors = old_stats->rx_over_errors +
10316                 get_stat64(&hw_stats->rxbds_empty);
10317         stats->rx_frame_errors = old_stats->rx_frame_errors +
10318                 get_stat64(&hw_stats->rx_align_errors);
10319         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10320                 get_stat64(&hw_stats->tx_discards);
10321         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10322                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10323
10324         stats->rx_crc_errors = old_stats->rx_crc_errors +
10325                 tg3_calc_crc_errors(tp);
10326
10327         stats->rx_missed_errors = old_stats->rx_missed_errors +
10328                 get_stat64(&hw_stats->rx_discards);
10329
10330         stats->rx_dropped = tp->rx_dropped;
10331         stats->tx_dropped = tp->tx_dropped;
10332 }
10333
10334 static int tg3_get_regs_len(struct net_device *dev)
10335 {
10336         return TG3_REG_BLK_SIZE;
10337 }
10338
10339 static void tg3_get_regs(struct net_device *dev,
10340                 struct ethtool_regs *regs, void *_p)
10341 {
10342         struct tg3 *tp = netdev_priv(dev);
10343
10344         regs->version = 0;
10345
10346         memset(_p, 0, TG3_REG_BLK_SIZE);
10347
10348         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10349                 return;
10350
10351         tg3_full_lock(tp, 0);
10352
10353         tg3_dump_legacy_regs(tp, (u32 *)_p);
10354
10355         tg3_full_unlock(tp);
10356 }
10357
10358 static int tg3_get_eeprom_len(struct net_device *dev)
10359 {
10360         struct tg3 *tp = netdev_priv(dev);
10361
10362         return tp->nvram_size;
10363 }
10364
10365 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10366 {
10367         struct tg3 *tp = netdev_priv(dev);
10368         int ret;
10369         u8  *pd;
10370         u32 i, offset, len, b_offset, b_count;
10371         __be32 val;
10372
10373         if (tg3_flag(tp, NO_NVRAM))
10374                 return -EINVAL;
10375
10376         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10377                 return -EAGAIN;
10378
10379         offset = eeprom->offset;
10380         len = eeprom->len;
10381         eeprom->len = 0;
10382
10383         eeprom->magic = TG3_EEPROM_MAGIC;
10384
10385         if (offset & 3) {
10386                 /* adjustments to start on required 4 byte boundary */
10387                 b_offset = offset & 3;
10388                 b_count = 4 - b_offset;
10389                 if (b_count > len) {
10390                         /* i.e. offset=1 len=2 */
10391                         b_count = len;
10392                 }
10393                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10394                 if (ret)
10395                         return ret;
10396                 memcpy(data, ((char *)&val) + b_offset, b_count);
10397                 len -= b_count;
10398                 offset += b_count;
10399                 eeprom->len += b_count;
10400         }
10401
10402         /* read bytes up to the last 4 byte boundary */
10403         pd = &data[eeprom->len];
10404         for (i = 0; i < (len - (len & 3)); i += 4) {
10405                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10406                 if (ret) {
10407                         eeprom->len += i;
10408                         return ret;
10409                 }
10410                 memcpy(pd + i, &val, 4);
10411         }
10412         eeprom->len += i;
10413
10414         if (len & 3) {
10415                 /* read last bytes not ending on 4 byte boundary */
10416                 pd = &data[eeprom->len];
10417                 b_count = len & 3;
10418                 b_offset = offset + len - b_count;
10419                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10420                 if (ret)
10421                         return ret;
10422                 memcpy(pd, &val, b_count);
10423                 eeprom->len += b_count;
10424         }
10425         return 0;
10426 }
10427
10428 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10429 {
10430         struct tg3 *tp = netdev_priv(dev);
10431         int ret;
10432         u32 offset, len, b_offset, odd_len;
10433         u8 *buf;
10434         __be32 start, end;
10435
10436         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10437                 return -EAGAIN;
10438
10439         if (tg3_flag(tp, NO_NVRAM) ||
10440             eeprom->magic != TG3_EEPROM_MAGIC)
10441                 return -EINVAL;
10442
10443         offset = eeprom->offset;
10444         len = eeprom->len;
10445
10446         if ((b_offset = (offset & 3))) {
10447                 /* adjustments to start on required 4 byte boundary */
10448                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10449                 if (ret)
10450                         return ret;
10451                 len += b_offset;
10452                 offset &= ~3;
10453                 if (len < 4)
10454                         len = 4;
10455         }
10456
10457         odd_len = 0;
10458         if (len & 3) {
10459                 /* adjustments to end on required 4 byte boundary */
10460                 odd_len = 1;
10461                 len = (len + 3) & ~3;
10462                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10463                 if (ret)
10464                         return ret;
10465         }
10466
10467         buf = data;
10468         if (b_offset || odd_len) {
10469                 buf = kmalloc(len, GFP_KERNEL);
10470                 if (!buf)
10471                         return -ENOMEM;
10472                 if (b_offset)
10473                         memcpy(buf, &start, 4);
10474                 if (odd_len)
10475                         memcpy(buf+len-4, &end, 4);
10476                 memcpy(buf + b_offset, data, eeprom->len);
10477         }
10478
10479         ret = tg3_nvram_write_block(tp, offset, len, buf);
10480
10481         if (buf != data)
10482                 kfree(buf);
10483
10484         return ret;
10485 }
10486
10487 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10488 {
10489         struct tg3 *tp = netdev_priv(dev);
10490
10491         if (tg3_flag(tp, USE_PHYLIB)) {
10492                 struct phy_device *phydev;
10493                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10494                         return -EAGAIN;
10495                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10496                 return phy_ethtool_gset(phydev, cmd);
10497         }
10498
10499         cmd->supported = (SUPPORTED_Autoneg);
10500
10501         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10502                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10503                                    SUPPORTED_1000baseT_Full);
10504
10505         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10506                 cmd->supported |= (SUPPORTED_100baseT_Half |
10507                                   SUPPORTED_100baseT_Full |
10508                                   SUPPORTED_10baseT_Half |
10509                                   SUPPORTED_10baseT_Full |
10510                                   SUPPORTED_TP);
10511                 cmd->port = PORT_TP;
10512         } else {
10513                 cmd->supported |= SUPPORTED_FIBRE;
10514                 cmd->port = PORT_FIBRE;
10515         }
10516
10517         cmd->advertising = tp->link_config.advertising;
10518         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10519                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10520                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10521                                 cmd->advertising |= ADVERTISED_Pause;
10522                         } else {
10523                                 cmd->advertising |= ADVERTISED_Pause |
10524                                                     ADVERTISED_Asym_Pause;
10525                         }
10526                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10527                         cmd->advertising |= ADVERTISED_Asym_Pause;
10528                 }
10529         }
10530         if (netif_running(dev) && netif_carrier_ok(dev)) {
10531                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10532                 cmd->duplex = tp->link_config.active_duplex;
10533                 cmd->lp_advertising = tp->link_config.rmt_adv;
10534                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10535                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10536                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10537                         else
10538                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10539                 }
10540         } else {
10541                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10542                 cmd->duplex = DUPLEX_UNKNOWN;
10543                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10544         }
10545         cmd->phy_address = tp->phy_addr;
10546         cmd->transceiver = XCVR_INTERNAL;
10547         cmd->autoneg = tp->link_config.autoneg;
10548         cmd->maxtxpkt = 0;
10549         cmd->maxrxpkt = 0;
10550         return 0;
10551 }
10552
10553 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10554 {
10555         struct tg3 *tp = netdev_priv(dev);
10556         u32 speed = ethtool_cmd_speed(cmd);
10557
10558         if (tg3_flag(tp, USE_PHYLIB)) {
10559                 struct phy_device *phydev;
10560                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10561                         return -EAGAIN;
10562                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10563                 return phy_ethtool_sset(phydev, cmd);
10564         }
10565
10566         if (cmd->autoneg != AUTONEG_ENABLE &&
10567             cmd->autoneg != AUTONEG_DISABLE)
10568                 return -EINVAL;
10569
10570         if (cmd->autoneg == AUTONEG_DISABLE &&
10571             cmd->duplex != DUPLEX_FULL &&
10572             cmd->duplex != DUPLEX_HALF)
10573                 return -EINVAL;
10574
10575         if (cmd->autoneg == AUTONEG_ENABLE) {
10576                 u32 mask = ADVERTISED_Autoneg |
10577                            ADVERTISED_Pause |
10578                            ADVERTISED_Asym_Pause;
10579
10580                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10581                         mask |= ADVERTISED_1000baseT_Half |
10582                                 ADVERTISED_1000baseT_Full;
10583
10584                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10585                         mask |= ADVERTISED_100baseT_Half |
10586                                 ADVERTISED_100baseT_Full |
10587                                 ADVERTISED_10baseT_Half |
10588                                 ADVERTISED_10baseT_Full |
10589                                 ADVERTISED_TP;
10590                 else
10591                         mask |= ADVERTISED_FIBRE;
10592
10593                 if (cmd->advertising & ~mask)
10594                         return -EINVAL;
10595
10596                 mask &= (ADVERTISED_1000baseT_Half |
10597                          ADVERTISED_1000baseT_Full |
10598                          ADVERTISED_100baseT_Half |
10599                          ADVERTISED_100baseT_Full |
10600                          ADVERTISED_10baseT_Half |
10601                          ADVERTISED_10baseT_Full);
10602
10603                 cmd->advertising &= mask;
10604         } else {
10605                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10606                         if (speed != SPEED_1000)
10607                                 return -EINVAL;
10608
10609                         if (cmd->duplex != DUPLEX_FULL)
10610                                 return -EINVAL;
10611                 } else {
10612                         if (speed != SPEED_100 &&
10613                             speed != SPEED_10)
10614                                 return -EINVAL;
10615                 }
10616         }
10617
10618         tg3_full_lock(tp, 0);
10619
10620         tp->link_config.autoneg = cmd->autoneg;
10621         if (cmd->autoneg == AUTONEG_ENABLE) {
10622                 tp->link_config.advertising = (cmd->advertising |
10623                                               ADVERTISED_Autoneg);
10624                 tp->link_config.speed = SPEED_UNKNOWN;
10625                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10626         } else {
10627                 tp->link_config.advertising = 0;
10628                 tp->link_config.speed = speed;
10629                 tp->link_config.duplex = cmd->duplex;
10630         }
10631
10632         if (netif_running(dev))
10633                 tg3_setup_phy(tp, 1);
10634
10635         tg3_full_unlock(tp);
10636
10637         return 0;
10638 }
10639
10640 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10641 {
10642         struct tg3 *tp = netdev_priv(dev);
10643
10644         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10645         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10646         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10647         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10648 }
10649
10650 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10651 {
10652         struct tg3 *tp = netdev_priv(dev);
10653
10654         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10655                 wol->supported = WAKE_MAGIC;
10656         else
10657                 wol->supported = 0;
10658         wol->wolopts = 0;
10659         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10660                 wol->wolopts = WAKE_MAGIC;
10661         memset(&wol->sopass, 0, sizeof(wol->sopass));
10662 }
10663
10664 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10665 {
10666         struct tg3 *tp = netdev_priv(dev);
10667         struct device *dp = &tp->pdev->dev;
10668
10669         if (wol->wolopts & ~WAKE_MAGIC)
10670                 return -EINVAL;
10671         if ((wol->wolopts & WAKE_MAGIC) &&
10672             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10673                 return -EINVAL;
10674
10675         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10676
10677         spin_lock_bh(&tp->lock);
10678         if (device_may_wakeup(dp))
10679                 tg3_flag_set(tp, WOL_ENABLE);
10680         else
10681                 tg3_flag_clear(tp, WOL_ENABLE);
10682         spin_unlock_bh(&tp->lock);
10683
10684         return 0;
10685 }
10686
10687 static u32 tg3_get_msglevel(struct net_device *dev)
10688 {
10689         struct tg3 *tp = netdev_priv(dev);
10690         return tp->msg_enable;
10691 }
10692
10693 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10694 {
10695         struct tg3 *tp = netdev_priv(dev);
10696         tp->msg_enable = value;
10697 }
10698
10699 static int tg3_nway_reset(struct net_device *dev)
10700 {
10701         struct tg3 *tp = netdev_priv(dev);
10702         int r;
10703
10704         if (!netif_running(dev))
10705                 return -EAGAIN;
10706
10707         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10708                 return -EINVAL;
10709
10710         if (tg3_flag(tp, USE_PHYLIB)) {
10711                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10712                         return -EAGAIN;
10713                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10714         } else {
10715                 u32 bmcr;
10716
10717                 spin_lock_bh(&tp->lock);
10718                 r = -EINVAL;
10719                 tg3_readphy(tp, MII_BMCR, &bmcr);
10720                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10721                     ((bmcr & BMCR_ANENABLE) ||
10722                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10723                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10724                                                    BMCR_ANENABLE);
10725                         r = 0;
10726                 }
10727                 spin_unlock_bh(&tp->lock);
10728         }
10729
10730         return r;
10731 }
10732
10733 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10734 {
10735         struct tg3 *tp = netdev_priv(dev);
10736
10737         ering->rx_max_pending = tp->rx_std_ring_mask;
10738         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10739                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10740         else
10741                 ering->rx_jumbo_max_pending = 0;
10742
10743         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10744
10745         ering->rx_pending = tp->rx_pending;
10746         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10747                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10748         else
10749                 ering->rx_jumbo_pending = 0;
10750
10751         ering->tx_pending = tp->napi[0].tx_pending;
10752 }
10753
10754 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10755 {
10756         struct tg3 *tp = netdev_priv(dev);
10757         int i, irq_sync = 0, err = 0;
10758
10759         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10760             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10761             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10762             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10763             (tg3_flag(tp, TSO_BUG) &&
10764              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10765                 return -EINVAL;
10766
10767         if (netif_running(dev)) {
10768                 tg3_phy_stop(tp);
10769                 tg3_netif_stop(tp);
10770                 irq_sync = 1;
10771         }
10772
10773         tg3_full_lock(tp, irq_sync);
10774
10775         tp->rx_pending = ering->rx_pending;
10776
10777         if (tg3_flag(tp, MAX_RXPEND_64) &&
10778             tp->rx_pending > 63)
10779                 tp->rx_pending = 63;
10780         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10781
10782         for (i = 0; i < tp->irq_max; i++)
10783                 tp->napi[i].tx_pending = ering->tx_pending;
10784
10785         if (netif_running(dev)) {
10786                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10787                 err = tg3_restart_hw(tp, 1);
10788                 if (!err)
10789                         tg3_netif_start(tp);
10790         }
10791
10792         tg3_full_unlock(tp);
10793
10794         if (irq_sync && !err)
10795                 tg3_phy_start(tp);
10796
10797         return err;
10798 }
10799
10800 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10801 {
10802         struct tg3 *tp = netdev_priv(dev);
10803
10804         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10805
10806         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10807                 epause->rx_pause = 1;
10808         else
10809                 epause->rx_pause = 0;
10810
10811         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10812                 epause->tx_pause = 1;
10813         else
10814                 epause->tx_pause = 0;
10815 }
10816
10817 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10818 {
10819         struct tg3 *tp = netdev_priv(dev);
10820         int err = 0;
10821
10822         if (tg3_flag(tp, USE_PHYLIB)) {
10823                 u32 newadv;
10824                 struct phy_device *phydev;
10825
10826                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10827
10828                 if (!(phydev->supported & SUPPORTED_Pause) ||
10829                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10830                      (epause->rx_pause != epause->tx_pause)))
10831                         return -EINVAL;
10832
10833                 tp->link_config.flowctrl = 0;
10834                 if (epause->rx_pause) {
10835                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10836
10837                         if (epause->tx_pause) {
10838                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10839                                 newadv = ADVERTISED_Pause;
10840                         } else
10841                                 newadv = ADVERTISED_Pause |
10842                                          ADVERTISED_Asym_Pause;
10843                 } else if (epause->tx_pause) {
10844                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10845                         newadv = ADVERTISED_Asym_Pause;
10846                 } else
10847                         newadv = 0;
10848
10849                 if (epause->autoneg)
10850                         tg3_flag_set(tp, PAUSE_AUTONEG);
10851                 else
10852                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10853
10854                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10855                         u32 oldadv = phydev->advertising &
10856                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10857                         if (oldadv != newadv) {
10858                                 phydev->advertising &=
10859                                         ~(ADVERTISED_Pause |
10860                                           ADVERTISED_Asym_Pause);
10861                                 phydev->advertising |= newadv;
10862                                 if (phydev->autoneg) {
10863                                         /*
10864                                          * Always renegotiate the link to
10865                                          * inform our link partner of our
10866                                          * flow control settings, even if the
10867                                          * flow control is forced.  Let
10868                                          * tg3_adjust_link() do the final
10869                                          * flow control setup.
10870                                          */
10871                                         return phy_start_aneg(phydev);
10872                                 }
10873                         }
10874
10875                         if (!epause->autoneg)
10876                                 tg3_setup_flow_control(tp, 0, 0);
10877                 } else {
10878                         tp->link_config.advertising &=
10879                                         ~(ADVERTISED_Pause |
10880                                           ADVERTISED_Asym_Pause);
10881                         tp->link_config.advertising |= newadv;
10882                 }
10883         } else {
10884                 int irq_sync = 0;
10885
10886                 if (netif_running(dev)) {
10887                         tg3_netif_stop(tp);
10888                         irq_sync = 1;
10889                 }
10890
10891                 tg3_full_lock(tp, irq_sync);
10892
10893                 if (epause->autoneg)
10894                         tg3_flag_set(tp, PAUSE_AUTONEG);
10895                 else
10896                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10897                 if (epause->rx_pause)
10898                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10899                 else
10900                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10901                 if (epause->tx_pause)
10902                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10903                 else
10904                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10905
10906                 if (netif_running(dev)) {
10907                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10908                         err = tg3_restart_hw(tp, 1);
10909                         if (!err)
10910                                 tg3_netif_start(tp);
10911                 }
10912
10913                 tg3_full_unlock(tp);
10914         }
10915
10916         return err;
10917 }
10918
10919 static int tg3_get_sset_count(struct net_device *dev, int sset)
10920 {
10921         switch (sset) {
10922         case ETH_SS_TEST:
10923                 return TG3_NUM_TEST;
10924         case ETH_SS_STATS:
10925                 return TG3_NUM_STATS;
10926         default:
10927                 return -EOPNOTSUPP;
10928         }
10929 }
10930
10931 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10932                          u32 *rules __always_unused)
10933 {
10934         struct tg3 *tp = netdev_priv(dev);
10935
10936         if (!tg3_flag(tp, SUPPORT_MSIX))
10937                 return -EOPNOTSUPP;
10938
10939         switch (info->cmd) {
10940         case ETHTOOL_GRXRINGS:
10941                 if (netif_running(tp->dev))
10942                         info->data = tp->irq_cnt;
10943                 else {
10944                         info->data = num_online_cpus();
10945                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10946                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10947                 }
10948
10949                 /* The first interrupt vector only
10950                  * handles link interrupts.
10951                  */
10952                 info->data -= 1;
10953                 return 0;
10954
10955         default:
10956                 return -EOPNOTSUPP;
10957         }
10958 }
10959
10960 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10961 {
10962         u32 size = 0;
10963         struct tg3 *tp = netdev_priv(dev);
10964
10965         if (tg3_flag(tp, SUPPORT_MSIX))
10966                 size = TG3_RSS_INDIR_TBL_SIZE;
10967
10968         return size;
10969 }
10970
10971 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10972 {
10973         struct tg3 *tp = netdev_priv(dev);
10974         int i;
10975
10976         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10977                 indir[i] = tp->rss_ind_tbl[i];
10978
10979         return 0;
10980 }
10981
10982 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10983 {
10984         struct tg3 *tp = netdev_priv(dev);
10985         size_t i;
10986
10987         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10988                 tp->rss_ind_tbl[i] = indir[i];
10989
10990         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10991                 return 0;
10992
10993         /* It is legal to write the indirection
10994          * table while the device is running.
10995          */
10996         tg3_full_lock(tp, 0);
10997         tg3_rss_write_indir_tbl(tp);
10998         tg3_full_unlock(tp);
10999
11000         return 0;
11001 }
11002
11003 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11004 {
11005         switch (stringset) {
11006         case ETH_SS_STATS:
11007                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11008                 break;
11009         case ETH_SS_TEST:
11010                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11011                 break;
11012         default:
11013                 WARN_ON(1);     /* we need a WARN() */
11014                 break;
11015         }
11016 }
11017
11018 static int tg3_set_phys_id(struct net_device *dev,
11019                             enum ethtool_phys_id_state state)
11020 {
11021         struct tg3 *tp = netdev_priv(dev);
11022
11023         if (!netif_running(tp->dev))
11024                 return -EAGAIN;
11025
11026         switch (state) {
11027         case ETHTOOL_ID_ACTIVE:
11028                 return 1;       /* cycle on/off once per second */
11029
11030         case ETHTOOL_ID_ON:
11031                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11032                      LED_CTRL_1000MBPS_ON |
11033                      LED_CTRL_100MBPS_ON |
11034                      LED_CTRL_10MBPS_ON |
11035                      LED_CTRL_TRAFFIC_OVERRIDE |
11036                      LED_CTRL_TRAFFIC_BLINK |
11037                      LED_CTRL_TRAFFIC_LED);
11038                 break;
11039
11040         case ETHTOOL_ID_OFF:
11041                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11042                      LED_CTRL_TRAFFIC_OVERRIDE);
11043                 break;
11044
11045         case ETHTOOL_ID_INACTIVE:
11046                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11047                 break;
11048         }
11049
11050         return 0;
11051 }
11052
11053 static void tg3_get_ethtool_stats(struct net_device *dev,
11054                                    struct ethtool_stats *estats, u64 *tmp_stats)
11055 {
11056         struct tg3 *tp = netdev_priv(dev);
11057
11058         if (tp->hw_stats)
11059                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11060         else
11061                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11062 }
11063
11064 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11065 {
11066         int i;
11067         __be32 *buf;
11068         u32 offset = 0, len = 0;
11069         u32 magic, val;
11070
11071         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11072                 return NULL;
11073
11074         if (magic == TG3_EEPROM_MAGIC) {
11075                 for (offset = TG3_NVM_DIR_START;
11076                      offset < TG3_NVM_DIR_END;
11077                      offset += TG3_NVM_DIRENT_SIZE) {
11078                         if (tg3_nvram_read(tp, offset, &val))
11079                                 return NULL;
11080
11081                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11082                             TG3_NVM_DIRTYPE_EXTVPD)
11083                                 break;
11084                 }
11085
11086                 if (offset != TG3_NVM_DIR_END) {
11087                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11088                         if (tg3_nvram_read(tp, offset + 4, &offset))
11089                                 return NULL;
11090
11091                         offset = tg3_nvram_logical_addr(tp, offset);
11092                 }
11093         }
11094
11095         if (!offset || !len) {
11096                 offset = TG3_NVM_VPD_OFF;
11097                 len = TG3_NVM_VPD_LEN;
11098         }
11099
11100         buf = kmalloc(len, GFP_KERNEL);
11101         if (buf == NULL)
11102                 return NULL;
11103
11104         if (magic == TG3_EEPROM_MAGIC) {
11105                 for (i = 0; i < len; i += 4) {
11106                         /* The data is in little-endian format in NVRAM.
11107                          * Use the big-endian read routines to preserve
11108                          * the byte order as it exists in NVRAM.
11109                          */
11110                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11111                                 goto error;
11112                 }
11113         } else {
11114                 u8 *ptr;
11115                 ssize_t cnt;
11116                 unsigned int pos = 0;
11117
11118                 ptr = (u8 *)&buf[0];
11119                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11120                         cnt = pci_read_vpd(tp->pdev, pos,
11121                                            len - pos, ptr);
11122                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11123                                 cnt = 0;
11124                         else if (cnt < 0)
11125                                 goto error;
11126                 }
11127                 if (pos != len)
11128                         goto error;
11129         }
11130
11131         *vpdlen = len;
11132
11133         return buf;
11134
11135 error:
11136         kfree(buf);
11137         return NULL;
11138 }
11139
11140 #define NVRAM_TEST_SIZE 0x100
11141 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11142 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11143 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11144 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11145 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11146 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11147 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11148 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11149
11150 static int tg3_test_nvram(struct tg3 *tp)
11151 {
11152         u32 csum, magic, len;
11153         __be32 *buf;
11154         int i, j, k, err = 0, size;
11155
11156         if (tg3_flag(tp, NO_NVRAM))
11157                 return 0;
11158
11159         if (tg3_nvram_read(tp, 0, &magic) != 0)
11160                 return -EIO;
11161
11162         if (magic == TG3_EEPROM_MAGIC)
11163                 size = NVRAM_TEST_SIZE;
11164         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11165                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11166                     TG3_EEPROM_SB_FORMAT_1) {
11167                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11168                         case TG3_EEPROM_SB_REVISION_0:
11169                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11170                                 break;
11171                         case TG3_EEPROM_SB_REVISION_2:
11172                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11173                                 break;
11174                         case TG3_EEPROM_SB_REVISION_3:
11175                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11176                                 break;
11177                         case TG3_EEPROM_SB_REVISION_4:
11178                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11179                                 break;
11180                         case TG3_EEPROM_SB_REVISION_5:
11181                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11182                                 break;
11183                         case TG3_EEPROM_SB_REVISION_6:
11184                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11185                                 break;
11186                         default:
11187                                 return -EIO;
11188                         }
11189                 } else
11190                         return 0;
11191         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11192                 size = NVRAM_SELFBOOT_HW_SIZE;
11193         else
11194                 return -EIO;
11195
11196         buf = kmalloc(size, GFP_KERNEL);
11197         if (buf == NULL)
11198                 return -ENOMEM;
11199
11200         err = -EIO;
11201         for (i = 0, j = 0; i < size; i += 4, j++) {
11202                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11203                 if (err)
11204                         break;
11205         }
11206         if (i < size)
11207                 goto out;
11208
11209         /* Selfboot format */
11210         magic = be32_to_cpu(buf[0]);
11211         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11212             TG3_EEPROM_MAGIC_FW) {
11213                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11214
11215                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11216                     TG3_EEPROM_SB_REVISION_2) {
11217                         /* For rev 2, the csum doesn't include the MBA. */
11218                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11219                                 csum8 += buf8[i];
11220                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11221                                 csum8 += buf8[i];
11222                 } else {
11223                         for (i = 0; i < size; i++)
11224                                 csum8 += buf8[i];
11225                 }
11226
11227                 if (csum8 == 0) {
11228                         err = 0;
11229                         goto out;
11230                 }
11231
11232                 err = -EIO;
11233                 goto out;
11234         }
11235
11236         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11237             TG3_EEPROM_MAGIC_HW) {
11238                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11239                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11240                 u8 *buf8 = (u8 *) buf;
11241
11242                 /* Separate the parity bits and the data bytes.  */
11243                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11244                         if ((i == 0) || (i == 8)) {
11245                                 int l;
11246                                 u8 msk;
11247
11248                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11249                                         parity[k++] = buf8[i] & msk;
11250                                 i++;
11251                         } else if (i == 16) {
11252                                 int l;
11253                                 u8 msk;
11254
11255                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11256                                         parity[k++] = buf8[i] & msk;
11257                                 i++;
11258
11259                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11260                                         parity[k++] = buf8[i] & msk;
11261                                 i++;
11262                         }
11263                         data[j++] = buf8[i];
11264                 }
11265
11266                 err = -EIO;
11267                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11268                         u8 hw8 = hweight8(data[i]);
11269
11270                         if ((hw8 & 0x1) && parity[i])
11271                                 goto out;
11272                         else if (!(hw8 & 0x1) && !parity[i])
11273                                 goto out;
11274                 }
11275                 err = 0;
11276                 goto out;
11277         }
11278
11279         err = -EIO;
11280
11281         /* Bootstrap checksum at offset 0x10 */
11282         csum = calc_crc((unsigned char *) buf, 0x10);
11283         if (csum != le32_to_cpu(buf[0x10/4]))
11284                 goto out;
11285
11286         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11287         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11288         if (csum != le32_to_cpu(buf[0xfc/4]))
11289                 goto out;
11290
11291         kfree(buf);
11292
11293         buf = tg3_vpd_readblock(tp, &len);
11294         if (!buf)
11295                 return -ENOMEM;
11296
11297         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11298         if (i > 0) {
11299                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11300                 if (j < 0)
11301                         goto out;
11302
11303                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11304                         goto out;
11305
11306                 i += PCI_VPD_LRDT_TAG_SIZE;
11307                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11308                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11309                 if (j > 0) {
11310                         u8 csum8 = 0;
11311
11312                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11313
11314                         for (i = 0; i <= j; i++)
11315                                 csum8 += ((u8 *)buf)[i];
11316
11317                         if (csum8)
11318                                 goto out;
11319                 }
11320         }
11321
11322         err = 0;
11323
11324 out:
11325         kfree(buf);
11326         return err;
11327 }
11328
11329 #define TG3_SERDES_TIMEOUT_SEC  2
11330 #define TG3_COPPER_TIMEOUT_SEC  6
11331
11332 static int tg3_test_link(struct tg3 *tp)
11333 {
11334         int i, max;
11335
11336         if (!netif_running(tp->dev))
11337                 return -ENODEV;
11338
11339         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11340                 max = TG3_SERDES_TIMEOUT_SEC;
11341         else
11342                 max = TG3_COPPER_TIMEOUT_SEC;
11343
11344         for (i = 0; i < max; i++) {
11345                 if (netif_carrier_ok(tp->dev))
11346                         return 0;
11347
11348                 if (msleep_interruptible(1000))
11349                         break;
11350         }
11351
11352         return -EIO;
11353 }
11354
11355 /* Only test the commonly used registers */
11356 static int tg3_test_registers(struct tg3 *tp)
11357 {
11358         int i, is_5705, is_5750;
11359         u32 offset, read_mask, write_mask, val, save_val, read_val;
11360         static struct {
11361                 u16 offset;
11362                 u16 flags;
11363 #define TG3_FL_5705     0x1
11364 #define TG3_FL_NOT_5705 0x2
11365 #define TG3_FL_NOT_5788 0x4
11366 #define TG3_FL_NOT_5750 0x8
11367                 u32 read_mask;
11368                 u32 write_mask;
11369         } reg_tbl[] = {
11370                 /* MAC Control Registers */
11371                 { MAC_MODE, TG3_FL_NOT_5705,
11372                         0x00000000, 0x00ef6f8c },
11373                 { MAC_MODE, TG3_FL_5705,
11374                         0x00000000, 0x01ef6b8c },
11375                 { MAC_STATUS, TG3_FL_NOT_5705,
11376                         0x03800107, 0x00000000 },
11377                 { MAC_STATUS, TG3_FL_5705,
11378                         0x03800100, 0x00000000 },
11379                 { MAC_ADDR_0_HIGH, 0x0000,
11380                         0x00000000, 0x0000ffff },
11381                 { MAC_ADDR_0_LOW, 0x0000,
11382                         0x00000000, 0xffffffff },
11383                 { MAC_RX_MTU_SIZE, 0x0000,
11384                         0x00000000, 0x0000ffff },
11385                 { MAC_TX_MODE, 0x0000,
11386                         0x00000000, 0x00000070 },
11387                 { MAC_TX_LENGTHS, 0x0000,
11388                         0x00000000, 0x00003fff },
11389                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11390                         0x00000000, 0x000007fc },
11391                 { MAC_RX_MODE, TG3_FL_5705,
11392                         0x00000000, 0x000007dc },
11393                 { MAC_HASH_REG_0, 0x0000,
11394                         0x00000000, 0xffffffff },
11395                 { MAC_HASH_REG_1, 0x0000,
11396                         0x00000000, 0xffffffff },
11397                 { MAC_HASH_REG_2, 0x0000,
11398                         0x00000000, 0xffffffff },
11399                 { MAC_HASH_REG_3, 0x0000,
11400                         0x00000000, 0xffffffff },
11401
11402                 /* Receive Data and Receive BD Initiator Control Registers. */
11403                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11404                         0x00000000, 0xffffffff },
11405                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11406                         0x00000000, 0xffffffff },
11407                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11408                         0x00000000, 0x00000003 },
11409                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11410                         0x00000000, 0xffffffff },
11411                 { RCVDBDI_STD_BD+0, 0x0000,
11412                         0x00000000, 0xffffffff },
11413                 { RCVDBDI_STD_BD+4, 0x0000,
11414                         0x00000000, 0xffffffff },
11415                 { RCVDBDI_STD_BD+8, 0x0000,
11416                         0x00000000, 0xffff0002 },
11417                 { RCVDBDI_STD_BD+0xc, 0x0000,
11418                         0x00000000, 0xffffffff },
11419
11420                 /* Receive BD Initiator Control Registers. */
11421                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11422                         0x00000000, 0xffffffff },
11423                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11424                         0x00000000, 0x000003ff },
11425                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11426                         0x00000000, 0xffffffff },
11427
11428                 /* Host Coalescing Control Registers. */
11429                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11430                         0x00000000, 0x00000004 },
11431                 { HOSTCC_MODE, TG3_FL_5705,
11432                         0x00000000, 0x000000f6 },
11433                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11434                         0x00000000, 0xffffffff },
11435                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11436                         0x00000000, 0x000003ff },
11437                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11438                         0x00000000, 0xffffffff },
11439                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11440                         0x00000000, 0x000003ff },
11441                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11442                         0x00000000, 0xffffffff },
11443                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11444                         0x00000000, 0x000000ff },
11445                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11446                         0x00000000, 0xffffffff },
11447                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11448                         0x00000000, 0x000000ff },
11449                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11450                         0x00000000, 0xffffffff },
11451                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11452                         0x00000000, 0xffffffff },
11453                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11454                         0x00000000, 0xffffffff },
11455                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11456                         0x00000000, 0x000000ff },
11457                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11458                         0x00000000, 0xffffffff },
11459                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11460                         0x00000000, 0x000000ff },
11461                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11462                         0x00000000, 0xffffffff },
11463                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11464                         0x00000000, 0xffffffff },
11465                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11466                         0x00000000, 0xffffffff },
11467                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11468                         0x00000000, 0xffffffff },
11469                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11470                         0x00000000, 0xffffffff },
11471                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11472                         0xffffffff, 0x00000000 },
11473                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11474                         0xffffffff, 0x00000000 },
11475
11476                 /* Buffer Manager Control Registers. */
11477                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11478                         0x00000000, 0x007fff80 },
11479                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11480                         0x00000000, 0x007fffff },
11481                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11482                         0x00000000, 0x0000003f },
11483                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11484                         0x00000000, 0x000001ff },
11485                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11486                         0x00000000, 0x000001ff },
11487                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11488                         0xffffffff, 0x00000000 },
11489                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11490                         0xffffffff, 0x00000000 },
11491
11492                 /* Mailbox Registers */
11493                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11494                         0x00000000, 0x000001ff },
11495                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11496                         0x00000000, 0x000001ff },
11497                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11498                         0x00000000, 0x000007ff },
11499                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11500                         0x00000000, 0x000001ff },
11501
11502                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11503         };
11504
11505         is_5705 = is_5750 = 0;
11506         if (tg3_flag(tp, 5705_PLUS)) {
11507                 is_5705 = 1;
11508                 if (tg3_flag(tp, 5750_PLUS))
11509                         is_5750 = 1;
11510         }
11511
11512         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11513                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11514                         continue;
11515
11516                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11517                         continue;
11518
11519                 if (tg3_flag(tp, IS_5788) &&
11520                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11521                         continue;
11522
11523                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11524                         continue;
11525
11526                 offset = (u32) reg_tbl[i].offset;
11527                 read_mask = reg_tbl[i].read_mask;
11528                 write_mask = reg_tbl[i].write_mask;
11529
11530                 /* Save the original register content */
11531                 save_val = tr32(offset);
11532
11533                 /* Determine the read-only value. */
11534                 read_val = save_val & read_mask;
11535
11536                 /* Write zero to the register, then make sure the read-only bits
11537                  * are not changed and the read/write bits are all zeros.
11538                  */
11539                 tw32(offset, 0);
11540
11541                 val = tr32(offset);
11542
11543                 /* Test the read-only and read/write bits. */
11544                 if (((val & read_mask) != read_val) || (val & write_mask))
11545                         goto out;
11546
11547                 /* Write ones to all the bits defined by RdMask and WrMask, then
11548                  * make sure the read-only bits are not changed and the
11549                  * read/write bits are all ones.
11550                  */
11551                 tw32(offset, read_mask | write_mask);
11552
11553                 val = tr32(offset);
11554
11555                 /* Test the read-only bits. */
11556                 if ((val & read_mask) != read_val)
11557                         goto out;
11558
11559                 /* Test the read/write bits. */
11560                 if ((val & write_mask) != write_mask)
11561                         goto out;
11562
11563                 tw32(offset, save_val);
11564         }
11565
11566         return 0;
11567
11568 out:
11569         if (netif_msg_hw(tp))
11570                 netdev_err(tp->dev,
11571                            "Register test failed at offset %x\n", offset);
11572         tw32(offset, save_val);
11573         return -EIO;
11574 }
11575
11576 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11577 {
11578         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11579         int i;
11580         u32 j;
11581
11582         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11583                 for (j = 0; j < len; j += 4) {
11584                         u32 val;
11585
11586                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11587                         tg3_read_mem(tp, offset + j, &val);
11588                         if (val != test_pattern[i])
11589                                 return -EIO;
11590                 }
11591         }
11592         return 0;
11593 }
11594
11595 static int tg3_test_memory(struct tg3 *tp)
11596 {
11597         static struct mem_entry {
11598                 u32 offset;
11599                 u32 len;
11600         } mem_tbl_570x[] = {
11601                 { 0x00000000, 0x00b50},
11602                 { 0x00002000, 0x1c000},
11603                 { 0xffffffff, 0x00000}
11604         }, mem_tbl_5705[] = {
11605                 { 0x00000100, 0x0000c},
11606                 { 0x00000200, 0x00008},
11607                 { 0x00004000, 0x00800},
11608                 { 0x00006000, 0x01000},
11609                 { 0x00008000, 0x02000},
11610                 { 0x00010000, 0x0e000},
11611                 { 0xffffffff, 0x00000}
11612         }, mem_tbl_5755[] = {
11613                 { 0x00000200, 0x00008},
11614                 { 0x00004000, 0x00800},
11615                 { 0x00006000, 0x00800},
11616                 { 0x00008000, 0x02000},
11617                 { 0x00010000, 0x0c000},
11618                 { 0xffffffff, 0x00000}
11619         }, mem_tbl_5906[] = {
11620                 { 0x00000200, 0x00008},
11621                 { 0x00004000, 0x00400},
11622                 { 0x00006000, 0x00400},
11623                 { 0x00008000, 0x01000},
11624                 { 0x00010000, 0x01000},
11625                 { 0xffffffff, 0x00000}
11626         }, mem_tbl_5717[] = {
11627                 { 0x00000200, 0x00008},
11628                 { 0x00010000, 0x0a000},
11629                 { 0x00020000, 0x13c00},
11630                 { 0xffffffff, 0x00000}
11631         }, mem_tbl_57765[] = {
11632                 { 0x00000200, 0x00008},
11633                 { 0x00004000, 0x00800},
11634                 { 0x00006000, 0x09800},
11635                 { 0x00010000, 0x0a000},
11636                 { 0xffffffff, 0x00000}
11637         };
11638         struct mem_entry *mem_tbl;
11639         int err = 0;
11640         int i;
11641
11642         if (tg3_flag(tp, 5717_PLUS))
11643                 mem_tbl = mem_tbl_5717;
11644         else if (tg3_flag(tp, 57765_CLASS))
11645                 mem_tbl = mem_tbl_57765;
11646         else if (tg3_flag(tp, 5755_PLUS))
11647                 mem_tbl = mem_tbl_5755;
11648         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11649                 mem_tbl = mem_tbl_5906;
11650         else if (tg3_flag(tp, 5705_PLUS))
11651                 mem_tbl = mem_tbl_5705;
11652         else
11653                 mem_tbl = mem_tbl_570x;
11654
11655         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11656                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11657                 if (err)
11658                         break;
11659         }
11660
11661         return err;
11662 }
11663
11664 #define TG3_TSO_MSS             500
11665
11666 #define TG3_TSO_IP_HDR_LEN      20
11667 #define TG3_TSO_TCP_HDR_LEN     20
11668 #define TG3_TSO_TCP_OPT_LEN     12
11669
11670 static const u8 tg3_tso_header[] = {
11671 0x08, 0x00,
11672 0x45, 0x00, 0x00, 0x00,
11673 0x00, 0x00, 0x40, 0x00,
11674 0x40, 0x06, 0x00, 0x00,
11675 0x0a, 0x00, 0x00, 0x01,
11676 0x0a, 0x00, 0x00, 0x02,
11677 0x0d, 0x00, 0xe0, 0x00,
11678 0x00, 0x00, 0x01, 0x00,
11679 0x00, 0x00, 0x02, 0x00,
11680 0x80, 0x10, 0x10, 0x00,
11681 0x14, 0x09, 0x00, 0x00,
11682 0x01, 0x01, 0x08, 0x0a,
11683 0x11, 0x11, 0x11, 0x11,
11684 0x11, 0x11, 0x11, 0x11,
11685 };
11686
11687 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11688 {
11689         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11690         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11691         u32 budget;
11692         struct sk_buff *skb;
11693         u8 *tx_data, *rx_data;
11694         dma_addr_t map;
11695         int num_pkts, tx_len, rx_len, i, err;
11696         struct tg3_rx_buffer_desc *desc;
11697         struct tg3_napi *tnapi, *rnapi;
11698         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11699
11700         tnapi = &tp->napi[0];
11701         rnapi = &tp->napi[0];
11702         if (tp->irq_cnt > 1) {
11703                 if (tg3_flag(tp, ENABLE_RSS))
11704                         rnapi = &tp->napi[1];
11705                 if (tg3_flag(tp, ENABLE_TSS))
11706                         tnapi = &tp->napi[1];
11707         }
11708         coal_now = tnapi->coal_now | rnapi->coal_now;
11709
11710         err = -EIO;
11711
11712         tx_len = pktsz;
11713         skb = netdev_alloc_skb(tp->dev, tx_len);
11714         if (!skb)
11715                 return -ENOMEM;
11716
11717         tx_data = skb_put(skb, tx_len);
11718         memcpy(tx_data, tp->dev->dev_addr, 6);
11719         memset(tx_data + 6, 0x0, 8);
11720
11721         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11722
11723         if (tso_loopback) {
11724                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11725
11726                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11727                               TG3_TSO_TCP_OPT_LEN;
11728
11729                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11730                        sizeof(tg3_tso_header));
11731                 mss = TG3_TSO_MSS;
11732
11733                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11734                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11735
11736                 /* Set the total length field in the IP header */
11737                 iph->tot_len = htons((u16)(mss + hdr_len));
11738
11739                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11740                               TXD_FLAG_CPU_POST_DMA);
11741
11742                 if (tg3_flag(tp, HW_TSO_1) ||
11743                     tg3_flag(tp, HW_TSO_2) ||
11744                     tg3_flag(tp, HW_TSO_3)) {
11745                         struct tcphdr *th;
11746                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11747                         th = (struct tcphdr *)&tx_data[val];
11748                         th->check = 0;
11749                 } else
11750                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11751
11752                 if (tg3_flag(tp, HW_TSO_3)) {
11753                         mss |= (hdr_len & 0xc) << 12;
11754                         if (hdr_len & 0x10)
11755                                 base_flags |= 0x00000010;
11756                         base_flags |= (hdr_len & 0x3e0) << 5;
11757                 } else if (tg3_flag(tp, HW_TSO_2))
11758                         mss |= hdr_len << 9;
11759                 else if (tg3_flag(tp, HW_TSO_1) ||
11760                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11761                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11762                 } else {
11763                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11764                 }
11765
11766                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11767         } else {
11768                 num_pkts = 1;
11769                 data_off = ETH_HLEN;
11770
11771                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11772                     tx_len > VLAN_ETH_FRAME_LEN)
11773                         base_flags |= TXD_FLAG_JMB_PKT;
11774         }
11775
11776         for (i = data_off; i < tx_len; i++)
11777                 tx_data[i] = (u8) (i & 0xff);
11778
11779         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11780         if (pci_dma_mapping_error(tp->pdev, map)) {
11781                 dev_kfree_skb(skb);
11782                 return -EIO;
11783         }
11784
11785         val = tnapi->tx_prod;
11786         tnapi->tx_buffers[val].skb = skb;
11787         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11788
11789         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11790                rnapi->coal_now);
11791
11792         udelay(10);
11793
11794         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11795
11796         budget = tg3_tx_avail(tnapi);
11797         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11798                             base_flags | TXD_FLAG_END, mss, 0)) {
11799                 tnapi->tx_buffers[val].skb = NULL;
11800                 dev_kfree_skb(skb);
11801                 return -EIO;
11802         }
11803
11804         tnapi->tx_prod++;
11805
11806         /* Sync BD data before updating mailbox */
11807         wmb();
11808
11809         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11810         tr32_mailbox(tnapi->prodmbox);
11811
11812         udelay(10);
11813
11814         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11815         for (i = 0; i < 35; i++) {
11816                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11817                        coal_now);
11818
11819                 udelay(10);
11820
11821                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11822                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11823                 if ((tx_idx == tnapi->tx_prod) &&
11824                     (rx_idx == (rx_start_idx + num_pkts)))
11825                         break;
11826         }
11827
11828         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11829         dev_kfree_skb(skb);
11830
11831         if (tx_idx != tnapi->tx_prod)
11832                 goto out;
11833
11834         if (rx_idx != rx_start_idx + num_pkts)
11835                 goto out;
11836
11837         val = data_off;
11838         while (rx_idx != rx_start_idx) {
11839                 desc = &rnapi->rx_rcb[rx_start_idx++];
11840                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11841                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11842
11843                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11844                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11845                         goto out;
11846
11847                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11848                          - ETH_FCS_LEN;
11849
11850                 if (!tso_loopback) {
11851                         if (rx_len != tx_len)
11852                                 goto out;
11853
11854                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11855                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11856                                         goto out;
11857                         } else {
11858                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11859                                         goto out;
11860                         }
11861                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11862                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11863                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11864                         goto out;
11865                 }
11866
11867                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11868                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11869                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11870                                              mapping);
11871                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11872                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11873                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11874                                              mapping);
11875                 } else
11876                         goto out;
11877
11878                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11879                                             PCI_DMA_FROMDEVICE);
11880
11881                 rx_data += TG3_RX_OFFSET(tp);
11882                 for (i = data_off; i < rx_len; i++, val++) {
11883                         if (*(rx_data + i) != (u8) (val & 0xff))
11884                                 goto out;
11885                 }
11886         }
11887
11888         err = 0;
11889
11890         /* tg3_free_rings will unmap and free the rx_data */
11891 out:
11892         return err;
11893 }
11894
11895 #define TG3_STD_LOOPBACK_FAILED         1
11896 #define TG3_JMB_LOOPBACK_FAILED         2
11897 #define TG3_TSO_LOOPBACK_FAILED         4
11898 #define TG3_LOOPBACK_FAILED \
11899         (TG3_STD_LOOPBACK_FAILED | \
11900          TG3_JMB_LOOPBACK_FAILED | \
11901          TG3_TSO_LOOPBACK_FAILED)
11902
11903 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11904 {
11905         int err = -EIO;
11906         u32 eee_cap;
11907         u32 jmb_pkt_sz = 9000;
11908
11909         if (tp->dma_limit)
11910                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11911
11912         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11913         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11914
11915         if (!netif_running(tp->dev)) {
11916                 data[0] = TG3_LOOPBACK_FAILED;
11917                 data[1] = TG3_LOOPBACK_FAILED;
11918                 if (do_extlpbk)
11919                         data[2] = TG3_LOOPBACK_FAILED;
11920                 goto done;
11921         }
11922
11923         err = tg3_reset_hw(tp, 1);
11924         if (err) {
11925                 data[0] = TG3_LOOPBACK_FAILED;
11926                 data[1] = TG3_LOOPBACK_FAILED;
11927                 if (do_extlpbk)
11928                         data[2] = TG3_LOOPBACK_FAILED;
11929                 goto done;
11930         }
11931
11932         if (tg3_flag(tp, ENABLE_RSS)) {
11933                 int i;
11934
11935                 /* Reroute all rx packets to the 1st queue */
11936                 for (i = MAC_RSS_INDIR_TBL_0;
11937                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11938                         tw32(i, 0x0);
11939         }
11940
11941         /* HW errata - mac loopback fails in some cases on 5780.
11942          * Normal traffic and PHY loopback are not affected by
11943          * errata.  Also, the MAC loopback test is deprecated for
11944          * all newer ASIC revisions.
11945          */
11946         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11947             !tg3_flag(tp, CPMU_PRESENT)) {
11948                 tg3_mac_loopback(tp, true);
11949
11950                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11951                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11952
11953                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11954                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11955                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11956
11957                 tg3_mac_loopback(tp, false);
11958         }
11959
11960         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11961             !tg3_flag(tp, USE_PHYLIB)) {
11962                 int i;
11963
11964                 tg3_phy_lpbk_set(tp, 0, false);
11965
11966                 /* Wait for link */
11967                 for (i = 0; i < 100; i++) {
11968                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11969                                 break;
11970                         mdelay(1);
11971                 }
11972
11973                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11974                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11975                 if (tg3_flag(tp, TSO_CAPABLE) &&
11976                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11977                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11978                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11979                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11980                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11981
11982                 if (do_extlpbk) {
11983                         tg3_phy_lpbk_set(tp, 0, true);
11984
11985                         /* All link indications report up, but the hardware
11986                          * isn't really ready for about 20 msec.  Double it
11987                          * to be sure.
11988                          */
11989                         mdelay(40);
11990
11991                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11992                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11993                         if (tg3_flag(tp, TSO_CAPABLE) &&
11994                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11995                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11996                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11997                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11998                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11999                 }
12000
12001                 /* Re-enable gphy autopowerdown. */
12002                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12003                         tg3_phy_toggle_apd(tp, true);
12004         }
12005
12006         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12007
12008 done:
12009         tp->phy_flags |= eee_cap;
12010
12011         return err;
12012 }
12013
12014 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12015                           u64 *data)
12016 {
12017         struct tg3 *tp = netdev_priv(dev);
12018         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12019
12020         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12021             tg3_power_up(tp)) {
12022                 etest->flags |= ETH_TEST_FL_FAILED;
12023                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12024                 return;
12025         }
12026
12027         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12028
12029         if (tg3_test_nvram(tp) != 0) {
12030                 etest->flags |= ETH_TEST_FL_FAILED;
12031                 data[0] = 1;
12032         }
12033         if (!doextlpbk && tg3_test_link(tp)) {
12034                 etest->flags |= ETH_TEST_FL_FAILED;
12035                 data[1] = 1;
12036         }
12037         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12038                 int err, err2 = 0, irq_sync = 0;
12039
12040                 if (netif_running(dev)) {
12041                         tg3_phy_stop(tp);
12042                         tg3_netif_stop(tp);
12043                         irq_sync = 1;
12044                 }
12045
12046                 tg3_full_lock(tp, irq_sync);
12047
12048                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12049                 err = tg3_nvram_lock(tp);
12050                 tg3_halt_cpu(tp, RX_CPU_BASE);
12051                 if (!tg3_flag(tp, 5705_PLUS))
12052                         tg3_halt_cpu(tp, TX_CPU_BASE);
12053                 if (!err)
12054                         tg3_nvram_unlock(tp);
12055
12056                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12057                         tg3_phy_reset(tp);
12058
12059                 if (tg3_test_registers(tp) != 0) {
12060                         etest->flags |= ETH_TEST_FL_FAILED;
12061                         data[2] = 1;
12062                 }
12063
12064                 if (tg3_test_memory(tp) != 0) {
12065                         etest->flags |= ETH_TEST_FL_FAILED;
12066                         data[3] = 1;
12067                 }
12068
12069                 if (doextlpbk)
12070                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12071
12072                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12073                         etest->flags |= ETH_TEST_FL_FAILED;
12074
12075                 tg3_full_unlock(tp);
12076
12077                 if (tg3_test_interrupt(tp) != 0) {
12078                         etest->flags |= ETH_TEST_FL_FAILED;
12079                         data[7] = 1;
12080                 }
12081
12082                 tg3_full_lock(tp, 0);
12083
12084                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12085                 if (netif_running(dev)) {
12086                         tg3_flag_set(tp, INIT_COMPLETE);
12087                         err2 = tg3_restart_hw(tp, 1);
12088                         if (!err2)
12089                                 tg3_netif_start(tp);
12090                 }
12091
12092                 tg3_full_unlock(tp);
12093
12094                 if (irq_sync && !err2)
12095                         tg3_phy_start(tp);
12096         }
12097         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12098                 tg3_power_down(tp);
12099
12100 }
12101
12102 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12103 {
12104         struct mii_ioctl_data *data = if_mii(ifr);
12105         struct tg3 *tp = netdev_priv(dev);
12106         int err;
12107
12108         if (tg3_flag(tp, USE_PHYLIB)) {
12109                 struct phy_device *phydev;
12110                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12111                         return -EAGAIN;
12112                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12113                 return phy_mii_ioctl(phydev, ifr, cmd);
12114         }
12115
12116         switch (cmd) {
12117         case SIOCGMIIPHY:
12118                 data->phy_id = tp->phy_addr;
12119
12120                 /* fallthru */
12121         case SIOCGMIIREG: {
12122                 u32 mii_regval;
12123
12124                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12125                         break;                  /* We have no PHY */
12126
12127                 if (!netif_running(dev))
12128                         return -EAGAIN;
12129
12130                 spin_lock_bh(&tp->lock);
12131                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12132                 spin_unlock_bh(&tp->lock);
12133
12134                 data->val_out = mii_regval;
12135
12136                 return err;
12137         }
12138
12139         case SIOCSMIIREG:
12140                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12141                         break;                  /* We have no PHY */
12142
12143                 if (!netif_running(dev))
12144                         return -EAGAIN;
12145
12146                 spin_lock_bh(&tp->lock);
12147                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12148                 spin_unlock_bh(&tp->lock);
12149
12150                 return err;
12151
12152         default:
12153                 /* do nothing */
12154                 break;
12155         }
12156         return -EOPNOTSUPP;
12157 }
12158
12159 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12160 {
12161         struct tg3 *tp = netdev_priv(dev);
12162
12163         memcpy(ec, &tp->coal, sizeof(*ec));
12164         return 0;
12165 }
12166
12167 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12168 {
12169         struct tg3 *tp = netdev_priv(dev);
12170         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12171         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12172
12173         if (!tg3_flag(tp, 5705_PLUS)) {
12174                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12175                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12176                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12177                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12178         }
12179
12180         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12181             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12182             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12183             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12184             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12185             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12186             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12187             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12188             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12189             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12190                 return -EINVAL;
12191
12192         /* No rx interrupts will be generated if both are zero */
12193         if ((ec->rx_coalesce_usecs == 0) &&
12194             (ec->rx_max_coalesced_frames == 0))
12195                 return -EINVAL;
12196
12197         /* No tx interrupts will be generated if both are zero */
12198         if ((ec->tx_coalesce_usecs == 0) &&
12199             (ec->tx_max_coalesced_frames == 0))
12200                 return -EINVAL;
12201
12202         /* Only copy relevant parameters, ignore all others. */
12203         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12204         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12205         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12206         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12207         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12208         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12209         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12210         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12211         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12212
12213         if (netif_running(dev)) {
12214                 tg3_full_lock(tp, 0);
12215                 __tg3_set_coalesce(tp, &tp->coal);
12216                 tg3_full_unlock(tp);
12217         }
12218         return 0;
12219 }
12220
12221 static const struct ethtool_ops tg3_ethtool_ops = {
12222         .get_settings           = tg3_get_settings,
12223         .set_settings           = tg3_set_settings,
12224         .get_drvinfo            = tg3_get_drvinfo,
12225         .get_regs_len           = tg3_get_regs_len,
12226         .get_regs               = tg3_get_regs,
12227         .get_wol                = tg3_get_wol,
12228         .set_wol                = tg3_set_wol,
12229         .get_msglevel           = tg3_get_msglevel,
12230         .set_msglevel           = tg3_set_msglevel,
12231         .nway_reset             = tg3_nway_reset,
12232         .get_link               = ethtool_op_get_link,
12233         .get_eeprom_len         = tg3_get_eeprom_len,
12234         .get_eeprom             = tg3_get_eeprom,
12235         .set_eeprom             = tg3_set_eeprom,
12236         .get_ringparam          = tg3_get_ringparam,
12237         .set_ringparam          = tg3_set_ringparam,
12238         .get_pauseparam         = tg3_get_pauseparam,
12239         .set_pauseparam         = tg3_set_pauseparam,
12240         .self_test              = tg3_self_test,
12241         .get_strings            = tg3_get_strings,
12242         .set_phys_id            = tg3_set_phys_id,
12243         .get_ethtool_stats      = tg3_get_ethtool_stats,
12244         .get_coalesce           = tg3_get_coalesce,
12245         .set_coalesce           = tg3_set_coalesce,
12246         .get_sset_count         = tg3_get_sset_count,
12247         .get_rxnfc              = tg3_get_rxnfc,
12248         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12249         .get_rxfh_indir         = tg3_get_rxfh_indir,
12250         .set_rxfh_indir         = tg3_set_rxfh_indir,
12251 };
12252
12253 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12254                                                 struct rtnl_link_stats64 *stats)
12255 {
12256         struct tg3 *tp = netdev_priv(dev);
12257
12258         if (!tp->hw_stats)
12259                 return &tp->net_stats_prev;
12260
12261         spin_lock_bh(&tp->lock);
12262         tg3_get_nstats(tp, stats);
12263         spin_unlock_bh(&tp->lock);
12264
12265         return stats;
12266 }
12267
12268 static void tg3_set_rx_mode(struct net_device *dev)
12269 {
12270         struct tg3 *tp = netdev_priv(dev);
12271
12272         if (!netif_running(dev))
12273                 return;
12274
12275         tg3_full_lock(tp, 0);
12276         __tg3_set_rx_mode(dev);
12277         tg3_full_unlock(tp);
12278 }
12279
12280 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12281                                int new_mtu)
12282 {
12283         dev->mtu = new_mtu;
12284
12285         if (new_mtu > ETH_DATA_LEN) {
12286                 if (tg3_flag(tp, 5780_CLASS)) {
12287                         netdev_update_features(dev);
12288                         tg3_flag_clear(tp, TSO_CAPABLE);
12289                 } else {
12290                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12291                 }
12292         } else {
12293                 if (tg3_flag(tp, 5780_CLASS)) {
12294                         tg3_flag_set(tp, TSO_CAPABLE);
12295                         netdev_update_features(dev);
12296                 }
12297                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12298         }
12299 }
12300
12301 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12302 {
12303         struct tg3 *tp = netdev_priv(dev);
12304         int err, reset_phy = 0;
12305
12306         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12307                 return -EINVAL;
12308
12309         if (!netif_running(dev)) {
12310                 /* We'll just catch it later when the
12311                  * device is up'd.
12312                  */
12313                 tg3_set_mtu(dev, tp, new_mtu);
12314                 return 0;
12315         }
12316
12317         tg3_phy_stop(tp);
12318
12319         tg3_netif_stop(tp);
12320
12321         tg3_full_lock(tp, 1);
12322
12323         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12324
12325         tg3_set_mtu(dev, tp, new_mtu);
12326
12327         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12328          * breaks all requests to 256 bytes.
12329          */
12330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12331                 reset_phy = 1;
12332
12333         err = tg3_restart_hw(tp, reset_phy);
12334
12335         if (!err)
12336                 tg3_netif_start(tp);
12337
12338         tg3_full_unlock(tp);
12339
12340         if (!err)
12341                 tg3_phy_start(tp);
12342
12343         return err;
12344 }
12345
12346 static const struct net_device_ops tg3_netdev_ops = {
12347         .ndo_open               = tg3_open,
12348         .ndo_stop               = tg3_close,
12349         .ndo_start_xmit         = tg3_start_xmit,
12350         .ndo_get_stats64        = tg3_get_stats64,
12351         .ndo_validate_addr      = eth_validate_addr,
12352         .ndo_set_rx_mode        = tg3_set_rx_mode,
12353         .ndo_set_mac_address    = tg3_set_mac_addr,
12354         .ndo_do_ioctl           = tg3_ioctl,
12355         .ndo_tx_timeout         = tg3_tx_timeout,
12356         .ndo_change_mtu         = tg3_change_mtu,
12357         .ndo_fix_features       = tg3_fix_features,
12358         .ndo_set_features       = tg3_set_features,
12359 #ifdef CONFIG_NET_POLL_CONTROLLER
12360         .ndo_poll_controller    = tg3_poll_controller,
12361 #endif
12362 };
12363
12364 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12365 {
12366         u32 cursize, val, magic;
12367
12368         tp->nvram_size = EEPROM_CHIP_SIZE;
12369
12370         if (tg3_nvram_read(tp, 0, &magic) != 0)
12371                 return;
12372
12373         if ((magic != TG3_EEPROM_MAGIC) &&
12374             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12375             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12376                 return;
12377
12378         /*
12379          * Size the chip by reading offsets at increasing powers of two.
12380          * When we encounter our validation signature, we know the addressing
12381          * has wrapped around, and thus have our chip size.
12382          */
12383         cursize = 0x10;
12384
12385         while (cursize < tp->nvram_size) {
12386                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12387                         return;
12388
12389                 if (val == magic)
12390                         break;
12391
12392                 cursize <<= 1;
12393         }
12394
12395         tp->nvram_size = cursize;
12396 }
12397
12398 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12399 {
12400         u32 val;
12401
12402         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12403                 return;
12404
12405         /* Selfboot format */
12406         if (val != TG3_EEPROM_MAGIC) {
12407                 tg3_get_eeprom_size(tp);
12408                 return;
12409         }
12410
12411         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12412                 if (val != 0) {
12413                         /* This is confusing.  We want to operate on the
12414                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12415                          * call will read from NVRAM and byteswap the data
12416                          * according to the byteswapping settings for all
12417                          * other register accesses.  This ensures the data we
12418                          * want will always reside in the lower 16-bits.
12419                          * However, the data in NVRAM is in LE format, which
12420                          * means the data from the NVRAM read will always be
12421                          * opposite the endianness of the CPU.  The 16-bit
12422                          * byteswap then brings the data to CPU endianness.
12423                          */
12424                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12425                         return;
12426                 }
12427         }
12428         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12429 }
12430
12431 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12432 {
12433         u32 nvcfg1;
12434
12435         nvcfg1 = tr32(NVRAM_CFG1);
12436         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12437                 tg3_flag_set(tp, FLASH);
12438         } else {
12439                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12440                 tw32(NVRAM_CFG1, nvcfg1);
12441         }
12442
12443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12444             tg3_flag(tp, 5780_CLASS)) {
12445                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12446                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12447                         tp->nvram_jedecnum = JEDEC_ATMEL;
12448                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12449                         tg3_flag_set(tp, NVRAM_BUFFERED);
12450                         break;
12451                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12452                         tp->nvram_jedecnum = JEDEC_ATMEL;
12453                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12454                         break;
12455                 case FLASH_VENDOR_ATMEL_EEPROM:
12456                         tp->nvram_jedecnum = JEDEC_ATMEL;
12457                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12458                         tg3_flag_set(tp, NVRAM_BUFFERED);
12459                         break;
12460                 case FLASH_VENDOR_ST:
12461                         tp->nvram_jedecnum = JEDEC_ST;
12462                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12463                         tg3_flag_set(tp, NVRAM_BUFFERED);
12464                         break;
12465                 case FLASH_VENDOR_SAIFUN:
12466                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12467                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12468                         break;
12469                 case FLASH_VENDOR_SST_SMALL:
12470                 case FLASH_VENDOR_SST_LARGE:
12471                         tp->nvram_jedecnum = JEDEC_SST;
12472                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12473                         break;
12474                 }
12475         } else {
12476                 tp->nvram_jedecnum = JEDEC_ATMEL;
12477                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12478                 tg3_flag_set(tp, NVRAM_BUFFERED);
12479         }
12480 }
12481
12482 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12483 {
12484         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12485         case FLASH_5752PAGE_SIZE_256:
12486                 tp->nvram_pagesize = 256;
12487                 break;
12488         case FLASH_5752PAGE_SIZE_512:
12489                 tp->nvram_pagesize = 512;
12490                 break;
12491         case FLASH_5752PAGE_SIZE_1K:
12492                 tp->nvram_pagesize = 1024;
12493                 break;
12494         case FLASH_5752PAGE_SIZE_2K:
12495                 tp->nvram_pagesize = 2048;
12496                 break;
12497         case FLASH_5752PAGE_SIZE_4K:
12498                 tp->nvram_pagesize = 4096;
12499                 break;
12500         case FLASH_5752PAGE_SIZE_264:
12501                 tp->nvram_pagesize = 264;
12502                 break;
12503         case FLASH_5752PAGE_SIZE_528:
12504                 tp->nvram_pagesize = 528;
12505                 break;
12506         }
12507 }
12508
12509 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12510 {
12511         u32 nvcfg1;
12512
12513         nvcfg1 = tr32(NVRAM_CFG1);
12514
12515         /* NVRAM protection for TPM */
12516         if (nvcfg1 & (1 << 27))
12517                 tg3_flag_set(tp, PROTECTED_NVRAM);
12518
12519         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12520         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12521         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12522                 tp->nvram_jedecnum = JEDEC_ATMEL;
12523                 tg3_flag_set(tp, NVRAM_BUFFERED);
12524                 break;
12525         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12526                 tp->nvram_jedecnum = JEDEC_ATMEL;
12527                 tg3_flag_set(tp, NVRAM_BUFFERED);
12528                 tg3_flag_set(tp, FLASH);
12529                 break;
12530         case FLASH_5752VENDOR_ST_M45PE10:
12531         case FLASH_5752VENDOR_ST_M45PE20:
12532         case FLASH_5752VENDOR_ST_M45PE40:
12533                 tp->nvram_jedecnum = JEDEC_ST;
12534                 tg3_flag_set(tp, NVRAM_BUFFERED);
12535                 tg3_flag_set(tp, FLASH);
12536                 break;
12537         }
12538
12539         if (tg3_flag(tp, FLASH)) {
12540                 tg3_nvram_get_pagesize(tp, nvcfg1);
12541         } else {
12542                 /* For eeprom, set pagesize to maximum eeprom size */
12543                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12544
12545                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12546                 tw32(NVRAM_CFG1, nvcfg1);
12547         }
12548 }
12549
12550 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12551 {
12552         u32 nvcfg1, protect = 0;
12553
12554         nvcfg1 = tr32(NVRAM_CFG1);
12555
12556         /* NVRAM protection for TPM */
12557         if (nvcfg1 & (1 << 27)) {
12558                 tg3_flag_set(tp, PROTECTED_NVRAM);
12559                 protect = 1;
12560         }
12561
12562         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12563         switch (nvcfg1) {
12564         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12565         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12566         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12567         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12568                 tp->nvram_jedecnum = JEDEC_ATMEL;
12569                 tg3_flag_set(tp, NVRAM_BUFFERED);
12570                 tg3_flag_set(tp, FLASH);
12571                 tp->nvram_pagesize = 264;
12572                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12573                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12574                         tp->nvram_size = (protect ? 0x3e200 :
12575                                           TG3_NVRAM_SIZE_512KB);
12576                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12577                         tp->nvram_size = (protect ? 0x1f200 :
12578                                           TG3_NVRAM_SIZE_256KB);
12579                 else
12580                         tp->nvram_size = (protect ? 0x1f200 :
12581                                           TG3_NVRAM_SIZE_128KB);
12582                 break;
12583         case FLASH_5752VENDOR_ST_M45PE10:
12584         case FLASH_5752VENDOR_ST_M45PE20:
12585         case FLASH_5752VENDOR_ST_M45PE40:
12586                 tp->nvram_jedecnum = JEDEC_ST;
12587                 tg3_flag_set(tp, NVRAM_BUFFERED);
12588                 tg3_flag_set(tp, FLASH);
12589                 tp->nvram_pagesize = 256;
12590                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12591                         tp->nvram_size = (protect ?
12592                                           TG3_NVRAM_SIZE_64KB :
12593                                           TG3_NVRAM_SIZE_128KB);
12594                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12595                         tp->nvram_size = (protect ?
12596                                           TG3_NVRAM_SIZE_64KB :
12597                                           TG3_NVRAM_SIZE_256KB);
12598                 else
12599                         tp->nvram_size = (protect ?
12600                                           TG3_NVRAM_SIZE_128KB :
12601                                           TG3_NVRAM_SIZE_512KB);
12602                 break;
12603         }
12604 }
12605
12606 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12607 {
12608         u32 nvcfg1;
12609
12610         nvcfg1 = tr32(NVRAM_CFG1);
12611
12612         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12613         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12614         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12615         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12616         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12617                 tp->nvram_jedecnum = JEDEC_ATMEL;
12618                 tg3_flag_set(tp, NVRAM_BUFFERED);
12619                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12620
12621                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12622                 tw32(NVRAM_CFG1, nvcfg1);
12623                 break;
12624         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12625         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12626         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12627         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12628                 tp->nvram_jedecnum = JEDEC_ATMEL;
12629                 tg3_flag_set(tp, NVRAM_BUFFERED);
12630                 tg3_flag_set(tp, FLASH);
12631                 tp->nvram_pagesize = 264;
12632                 break;
12633         case FLASH_5752VENDOR_ST_M45PE10:
12634         case FLASH_5752VENDOR_ST_M45PE20:
12635         case FLASH_5752VENDOR_ST_M45PE40:
12636                 tp->nvram_jedecnum = JEDEC_ST;
12637                 tg3_flag_set(tp, NVRAM_BUFFERED);
12638                 tg3_flag_set(tp, FLASH);
12639                 tp->nvram_pagesize = 256;
12640                 break;
12641         }
12642 }
12643
12644 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12645 {
12646         u32 nvcfg1, protect = 0;
12647
12648         nvcfg1 = tr32(NVRAM_CFG1);
12649
12650         /* NVRAM protection for TPM */
12651         if (nvcfg1 & (1 << 27)) {
12652                 tg3_flag_set(tp, PROTECTED_NVRAM);
12653                 protect = 1;
12654         }
12655
12656         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12657         switch (nvcfg1) {
12658         case FLASH_5761VENDOR_ATMEL_ADB021D:
12659         case FLASH_5761VENDOR_ATMEL_ADB041D:
12660         case FLASH_5761VENDOR_ATMEL_ADB081D:
12661         case FLASH_5761VENDOR_ATMEL_ADB161D:
12662         case FLASH_5761VENDOR_ATMEL_MDB021D:
12663         case FLASH_5761VENDOR_ATMEL_MDB041D:
12664         case FLASH_5761VENDOR_ATMEL_MDB081D:
12665         case FLASH_5761VENDOR_ATMEL_MDB161D:
12666                 tp->nvram_jedecnum = JEDEC_ATMEL;
12667                 tg3_flag_set(tp, NVRAM_BUFFERED);
12668                 tg3_flag_set(tp, FLASH);
12669                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12670                 tp->nvram_pagesize = 256;
12671                 break;
12672         case FLASH_5761VENDOR_ST_A_M45PE20:
12673         case FLASH_5761VENDOR_ST_A_M45PE40:
12674         case FLASH_5761VENDOR_ST_A_M45PE80:
12675         case FLASH_5761VENDOR_ST_A_M45PE16:
12676         case FLASH_5761VENDOR_ST_M_M45PE20:
12677         case FLASH_5761VENDOR_ST_M_M45PE40:
12678         case FLASH_5761VENDOR_ST_M_M45PE80:
12679         case FLASH_5761VENDOR_ST_M_M45PE16:
12680                 tp->nvram_jedecnum = JEDEC_ST;
12681                 tg3_flag_set(tp, NVRAM_BUFFERED);
12682                 tg3_flag_set(tp, FLASH);
12683                 tp->nvram_pagesize = 256;
12684                 break;
12685         }
12686
12687         if (protect) {
12688                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12689         } else {
12690                 switch (nvcfg1) {
12691                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12692                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12693                 case FLASH_5761VENDOR_ST_A_M45PE16:
12694                 case FLASH_5761VENDOR_ST_M_M45PE16:
12695                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12696                         break;
12697                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12698                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12699                 case FLASH_5761VENDOR_ST_A_M45PE80:
12700                 case FLASH_5761VENDOR_ST_M_M45PE80:
12701                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12702                         break;
12703                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12704                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12705                 case FLASH_5761VENDOR_ST_A_M45PE40:
12706                 case FLASH_5761VENDOR_ST_M_M45PE40:
12707                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12708                         break;
12709                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12710                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12711                 case FLASH_5761VENDOR_ST_A_M45PE20:
12712                 case FLASH_5761VENDOR_ST_M_M45PE20:
12713                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12714                         break;
12715                 }
12716         }
12717 }
12718
12719 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12720 {
12721         tp->nvram_jedecnum = JEDEC_ATMEL;
12722         tg3_flag_set(tp, NVRAM_BUFFERED);
12723         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12724 }
12725
12726 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12727 {
12728         u32 nvcfg1;
12729
12730         nvcfg1 = tr32(NVRAM_CFG1);
12731
12732         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12733         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12734         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12735                 tp->nvram_jedecnum = JEDEC_ATMEL;
12736                 tg3_flag_set(tp, NVRAM_BUFFERED);
12737                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12738
12739                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12740                 tw32(NVRAM_CFG1, nvcfg1);
12741                 return;
12742         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12743         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12744         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12745         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12746         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12747         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12748         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12749                 tp->nvram_jedecnum = JEDEC_ATMEL;
12750                 tg3_flag_set(tp, NVRAM_BUFFERED);
12751                 tg3_flag_set(tp, FLASH);
12752
12753                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12754                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12755                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12756                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12757                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12758                         break;
12759                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12760                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12761                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12762                         break;
12763                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12764                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12765                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12766                         break;
12767                 }
12768                 break;
12769         case FLASH_5752VENDOR_ST_M45PE10:
12770         case FLASH_5752VENDOR_ST_M45PE20:
12771         case FLASH_5752VENDOR_ST_M45PE40:
12772                 tp->nvram_jedecnum = JEDEC_ST;
12773                 tg3_flag_set(tp, NVRAM_BUFFERED);
12774                 tg3_flag_set(tp, FLASH);
12775
12776                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12777                 case FLASH_5752VENDOR_ST_M45PE10:
12778                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12779                         break;
12780                 case FLASH_5752VENDOR_ST_M45PE20:
12781                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12782                         break;
12783                 case FLASH_5752VENDOR_ST_M45PE40:
12784                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12785                         break;
12786                 }
12787                 break;
12788         default:
12789                 tg3_flag_set(tp, NO_NVRAM);
12790                 return;
12791         }
12792
12793         tg3_nvram_get_pagesize(tp, nvcfg1);
12794         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12795                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12796 }
12797
12798
12799 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12800 {
12801         u32 nvcfg1;
12802
12803         nvcfg1 = tr32(NVRAM_CFG1);
12804
12805         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12806         case FLASH_5717VENDOR_ATMEL_EEPROM:
12807         case FLASH_5717VENDOR_MICRO_EEPROM:
12808                 tp->nvram_jedecnum = JEDEC_ATMEL;
12809                 tg3_flag_set(tp, NVRAM_BUFFERED);
12810                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12811
12812                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12813                 tw32(NVRAM_CFG1, nvcfg1);
12814                 return;
12815         case FLASH_5717VENDOR_ATMEL_MDB011D:
12816         case FLASH_5717VENDOR_ATMEL_ADB011B:
12817         case FLASH_5717VENDOR_ATMEL_ADB011D:
12818         case FLASH_5717VENDOR_ATMEL_MDB021D:
12819         case FLASH_5717VENDOR_ATMEL_ADB021B:
12820         case FLASH_5717VENDOR_ATMEL_ADB021D:
12821         case FLASH_5717VENDOR_ATMEL_45USPT:
12822                 tp->nvram_jedecnum = JEDEC_ATMEL;
12823                 tg3_flag_set(tp, NVRAM_BUFFERED);
12824                 tg3_flag_set(tp, FLASH);
12825
12826                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12827                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12828                         /* Detect size with tg3_nvram_get_size() */
12829                         break;
12830                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12831                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12832                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12833                         break;
12834                 default:
12835                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12836                         break;
12837                 }
12838                 break;
12839         case FLASH_5717VENDOR_ST_M_M25PE10:
12840         case FLASH_5717VENDOR_ST_A_M25PE10:
12841         case FLASH_5717VENDOR_ST_M_M45PE10:
12842         case FLASH_5717VENDOR_ST_A_M45PE10:
12843         case FLASH_5717VENDOR_ST_M_M25PE20:
12844         case FLASH_5717VENDOR_ST_A_M25PE20:
12845         case FLASH_5717VENDOR_ST_M_M45PE20:
12846         case FLASH_5717VENDOR_ST_A_M45PE20:
12847         case FLASH_5717VENDOR_ST_25USPT:
12848         case FLASH_5717VENDOR_ST_45USPT:
12849                 tp->nvram_jedecnum = JEDEC_ST;
12850                 tg3_flag_set(tp, NVRAM_BUFFERED);
12851                 tg3_flag_set(tp, FLASH);
12852
12853                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12854                 case FLASH_5717VENDOR_ST_M_M25PE20:
12855                 case FLASH_5717VENDOR_ST_M_M45PE20:
12856                         /* Detect size with tg3_nvram_get_size() */
12857                         break;
12858                 case FLASH_5717VENDOR_ST_A_M25PE20:
12859                 case FLASH_5717VENDOR_ST_A_M45PE20:
12860                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12861                         break;
12862                 default:
12863                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12864                         break;
12865                 }
12866                 break;
12867         default:
12868                 tg3_flag_set(tp, NO_NVRAM);
12869                 return;
12870         }
12871
12872         tg3_nvram_get_pagesize(tp, nvcfg1);
12873         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12874                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12875 }
12876
12877 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12878 {
12879         u32 nvcfg1, nvmpinstrp;
12880
12881         nvcfg1 = tr32(NVRAM_CFG1);
12882         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12883
12884         switch (nvmpinstrp) {
12885         case FLASH_5720_EEPROM_HD:
12886         case FLASH_5720_EEPROM_LD:
12887                 tp->nvram_jedecnum = JEDEC_ATMEL;
12888                 tg3_flag_set(tp, NVRAM_BUFFERED);
12889
12890                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12891                 tw32(NVRAM_CFG1, nvcfg1);
12892                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12893                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12894                 else
12895                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12896                 return;
12897         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12898         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12899         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12900         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12901         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12902         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12903         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12904         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12905         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12906         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12907         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12908         case FLASH_5720VENDOR_ATMEL_45USPT:
12909                 tp->nvram_jedecnum = JEDEC_ATMEL;
12910                 tg3_flag_set(tp, NVRAM_BUFFERED);
12911                 tg3_flag_set(tp, FLASH);
12912
12913                 switch (nvmpinstrp) {
12914                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12915                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12916                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12917                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12918                         break;
12919                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12920                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12921                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12922                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12923                         break;
12924                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12925                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12926                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12927                         break;
12928                 default:
12929                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12930                         break;
12931                 }
12932                 break;
12933         case FLASH_5720VENDOR_M_ST_M25PE10:
12934         case FLASH_5720VENDOR_M_ST_M45PE10:
12935         case FLASH_5720VENDOR_A_ST_M25PE10:
12936         case FLASH_5720VENDOR_A_ST_M45PE10:
12937         case FLASH_5720VENDOR_M_ST_M25PE20:
12938         case FLASH_5720VENDOR_M_ST_M45PE20:
12939         case FLASH_5720VENDOR_A_ST_M25PE20:
12940         case FLASH_5720VENDOR_A_ST_M45PE20:
12941         case FLASH_5720VENDOR_M_ST_M25PE40:
12942         case FLASH_5720VENDOR_M_ST_M45PE40:
12943         case FLASH_5720VENDOR_A_ST_M25PE40:
12944         case FLASH_5720VENDOR_A_ST_M45PE40:
12945         case FLASH_5720VENDOR_M_ST_M25PE80:
12946         case FLASH_5720VENDOR_M_ST_M45PE80:
12947         case FLASH_5720VENDOR_A_ST_M25PE80:
12948         case FLASH_5720VENDOR_A_ST_M45PE80:
12949         case FLASH_5720VENDOR_ST_25USPT:
12950         case FLASH_5720VENDOR_ST_45USPT:
12951                 tp->nvram_jedecnum = JEDEC_ST;
12952                 tg3_flag_set(tp, NVRAM_BUFFERED);
12953                 tg3_flag_set(tp, FLASH);
12954
12955                 switch (nvmpinstrp) {
12956                 case FLASH_5720VENDOR_M_ST_M25PE20:
12957                 case FLASH_5720VENDOR_M_ST_M45PE20:
12958                 case FLASH_5720VENDOR_A_ST_M25PE20:
12959                 case FLASH_5720VENDOR_A_ST_M45PE20:
12960                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12961                         break;
12962                 case FLASH_5720VENDOR_M_ST_M25PE40:
12963                 case FLASH_5720VENDOR_M_ST_M45PE40:
12964                 case FLASH_5720VENDOR_A_ST_M25PE40:
12965                 case FLASH_5720VENDOR_A_ST_M45PE40:
12966                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12967                         break;
12968                 case FLASH_5720VENDOR_M_ST_M25PE80:
12969                 case FLASH_5720VENDOR_M_ST_M45PE80:
12970                 case FLASH_5720VENDOR_A_ST_M25PE80:
12971                 case FLASH_5720VENDOR_A_ST_M45PE80:
12972                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12973                         break;
12974                 default:
12975                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12976                         break;
12977                 }
12978                 break;
12979         default:
12980                 tg3_flag_set(tp, NO_NVRAM);
12981                 return;
12982         }
12983
12984         tg3_nvram_get_pagesize(tp, nvcfg1);
12985         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12986                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12987 }
12988
12989 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12990 static void __devinit tg3_nvram_init(struct tg3 *tp)
12991 {
12992         tw32_f(GRC_EEPROM_ADDR,
12993              (EEPROM_ADDR_FSM_RESET |
12994               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12995                EEPROM_ADDR_CLKPERD_SHIFT)));
12996
12997         msleep(1);
12998
12999         /* Enable seeprom accesses. */
13000         tw32_f(GRC_LOCAL_CTRL,
13001              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13002         udelay(100);
13003
13004         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13005             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13006                 tg3_flag_set(tp, NVRAM);
13007
13008                 if (tg3_nvram_lock(tp)) {
13009                         netdev_warn(tp->dev,
13010                                     "Cannot get nvram lock, %s failed\n",
13011                                     __func__);
13012                         return;
13013                 }
13014                 tg3_enable_nvram_access(tp);
13015
13016                 tp->nvram_size = 0;
13017
13018                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13019                         tg3_get_5752_nvram_info(tp);
13020                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13021                         tg3_get_5755_nvram_info(tp);
13022                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13023                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13024                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13025                         tg3_get_5787_nvram_info(tp);
13026                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13027                         tg3_get_5761_nvram_info(tp);
13028                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13029                         tg3_get_5906_nvram_info(tp);
13030                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13031                          tg3_flag(tp, 57765_CLASS))
13032                         tg3_get_57780_nvram_info(tp);
13033                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13034                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13035                         tg3_get_5717_nvram_info(tp);
13036                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13037                         tg3_get_5720_nvram_info(tp);
13038                 else
13039                         tg3_get_nvram_info(tp);
13040
13041                 if (tp->nvram_size == 0)
13042                         tg3_get_nvram_size(tp);
13043
13044                 tg3_disable_nvram_access(tp);
13045                 tg3_nvram_unlock(tp);
13046
13047         } else {
13048                 tg3_flag_clear(tp, NVRAM);
13049                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13050
13051                 tg3_get_eeprom_size(tp);
13052         }
13053 }
13054
13055 struct subsys_tbl_ent {
13056         u16 subsys_vendor, subsys_devid;
13057         u32 phy_id;
13058 };
13059
13060 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13061         /* Broadcom boards. */
13062         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13063           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13064         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13065           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13066         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13067           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13068         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13069           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13070         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13071           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13072         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13073           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13074         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13075           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13076         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13077           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13078         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13079           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13080         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13081           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13082         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13083           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13084
13085         /* 3com boards. */
13086         { TG3PCI_SUBVENDOR_ID_3COM,
13087           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13088         { TG3PCI_SUBVENDOR_ID_3COM,
13089           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13090         { TG3PCI_SUBVENDOR_ID_3COM,
13091           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13092         { TG3PCI_SUBVENDOR_ID_3COM,
13093           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13094         { TG3PCI_SUBVENDOR_ID_3COM,
13095           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13096
13097         /* DELL boards. */
13098         { TG3PCI_SUBVENDOR_ID_DELL,
13099           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13100         { TG3PCI_SUBVENDOR_ID_DELL,
13101           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13102         { TG3PCI_SUBVENDOR_ID_DELL,
13103           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13104         { TG3PCI_SUBVENDOR_ID_DELL,
13105           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13106
13107         /* Compaq boards. */
13108         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13109           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13110         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13111           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13112         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13113           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13114         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13115           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13116         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13117           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13118
13119         /* IBM boards. */
13120         { TG3PCI_SUBVENDOR_ID_IBM,
13121           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13122 };
13123
13124 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13125 {
13126         int i;
13127
13128         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13129                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13130                      tp->pdev->subsystem_vendor) &&
13131                     (subsys_id_to_phy_id[i].subsys_devid ==
13132                      tp->pdev->subsystem_device))
13133                         return &subsys_id_to_phy_id[i];
13134         }
13135         return NULL;
13136 }
13137
13138 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13139 {
13140         u32 val;
13141
13142         tp->phy_id = TG3_PHY_ID_INVALID;
13143         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13144
13145         /* Assume an onboard device and WOL capable by default.  */
13146         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13147         tg3_flag_set(tp, WOL_CAP);
13148
13149         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13150                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13151                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13152                         tg3_flag_set(tp, IS_NIC);
13153                 }
13154                 val = tr32(VCPU_CFGSHDW);
13155                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13156                         tg3_flag_set(tp, ASPM_WORKAROUND);
13157                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13158                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13159                         tg3_flag_set(tp, WOL_ENABLE);
13160                         device_set_wakeup_enable(&tp->pdev->dev, true);
13161                 }
13162                 goto done;
13163         }
13164
13165         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13166         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13167                 u32 nic_cfg, led_cfg;
13168                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13169                 int eeprom_phy_serdes = 0;
13170
13171                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13172                 tp->nic_sram_data_cfg = nic_cfg;
13173
13174                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13175                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13176                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13177                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13178                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13179                     (ver > 0) && (ver < 0x100))
13180                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13181
13182                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13183                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13184
13185                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13186                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13187                         eeprom_phy_serdes = 1;
13188
13189                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13190                 if (nic_phy_id != 0) {
13191                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13192                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13193
13194                         eeprom_phy_id  = (id1 >> 16) << 10;
13195                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13196                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13197                 } else
13198                         eeprom_phy_id = 0;
13199
13200                 tp->phy_id = eeprom_phy_id;
13201                 if (eeprom_phy_serdes) {
13202                         if (!tg3_flag(tp, 5705_PLUS))
13203                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13204                         else
13205                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13206                 }
13207
13208                 if (tg3_flag(tp, 5750_PLUS))
13209                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13210                                     SHASTA_EXT_LED_MODE_MASK);
13211                 else
13212                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13213
13214                 switch (led_cfg) {
13215                 default:
13216                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13217                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13218                         break;
13219
13220                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13221                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13222                         break;
13223
13224                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13225                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13226
13227                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13228                          * read on some older 5700/5701 bootcode.
13229                          */
13230                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13231                             ASIC_REV_5700 ||
13232                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13233                             ASIC_REV_5701)
13234                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13235
13236                         break;
13237
13238                 case SHASTA_EXT_LED_SHARED:
13239                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13240                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13241                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13242                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13243                                                  LED_CTRL_MODE_PHY_2);
13244                         break;
13245
13246                 case SHASTA_EXT_LED_MAC:
13247                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13248                         break;
13249
13250                 case SHASTA_EXT_LED_COMBO:
13251                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13252                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13253                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13254                                                  LED_CTRL_MODE_PHY_2);
13255                         break;
13256
13257                 }
13258
13259                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13260                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13261                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13262                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13263
13264                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13265                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13266
13267                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13268                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13269                         if ((tp->pdev->subsystem_vendor ==
13270                              PCI_VENDOR_ID_ARIMA) &&
13271                             (tp->pdev->subsystem_device == 0x205a ||
13272                              tp->pdev->subsystem_device == 0x2063))
13273                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13274                 } else {
13275                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13276                         tg3_flag_set(tp, IS_NIC);
13277                 }
13278
13279                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13280                         tg3_flag_set(tp, ENABLE_ASF);
13281                         if (tg3_flag(tp, 5750_PLUS))
13282                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13283                 }
13284
13285                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13286                     tg3_flag(tp, 5750_PLUS))
13287                         tg3_flag_set(tp, ENABLE_APE);
13288
13289                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13290                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13291                         tg3_flag_clear(tp, WOL_CAP);
13292
13293                 if (tg3_flag(tp, WOL_CAP) &&
13294                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13295                         tg3_flag_set(tp, WOL_ENABLE);
13296                         device_set_wakeup_enable(&tp->pdev->dev, true);
13297                 }
13298
13299                 if (cfg2 & (1 << 17))
13300                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13301
13302                 /* serdes signal pre-emphasis in register 0x590 set by */
13303                 /* bootcode if bit 18 is set */
13304                 if (cfg2 & (1 << 18))
13305                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13306
13307                 if ((tg3_flag(tp, 57765_PLUS) ||
13308                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13309                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13310                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13311                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13312
13313                 if (tg3_flag(tp, PCI_EXPRESS) &&
13314                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13315                     !tg3_flag(tp, 57765_PLUS)) {
13316                         u32 cfg3;
13317
13318                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13319                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13320                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13321                 }
13322
13323                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13324                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13325                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13326                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13327                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13328                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13329         }
13330 done:
13331         if (tg3_flag(tp, WOL_CAP))
13332                 device_set_wakeup_enable(&tp->pdev->dev,
13333                                          tg3_flag(tp, WOL_ENABLE));
13334         else
13335                 device_set_wakeup_capable(&tp->pdev->dev, false);
13336 }
13337
13338 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13339 {
13340         int i;
13341         u32 val;
13342
13343         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13344         tw32(OTP_CTRL, cmd);
13345
13346         /* Wait for up to 1 ms for command to execute. */
13347         for (i = 0; i < 100; i++) {
13348                 val = tr32(OTP_STATUS);
13349                 if (val & OTP_STATUS_CMD_DONE)
13350                         break;
13351                 udelay(10);
13352         }
13353
13354         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13355 }
13356
13357 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13358  * configuration is a 32-bit value that straddles the alignment boundary.
13359  * We do two 32-bit reads and then shift and merge the results.
13360  */
13361 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13362 {
13363         u32 bhalf_otp, thalf_otp;
13364
13365         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13366
13367         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13368                 return 0;
13369
13370         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13371
13372         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13373                 return 0;
13374
13375         thalf_otp = tr32(OTP_READ_DATA);
13376
13377         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13378
13379         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13380                 return 0;
13381
13382         bhalf_otp = tr32(OTP_READ_DATA);
13383
13384         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13385 }
13386
13387 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13388 {
13389         u32 adv = ADVERTISED_Autoneg;
13390
13391         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13392                 adv |= ADVERTISED_1000baseT_Half |
13393                        ADVERTISED_1000baseT_Full;
13394
13395         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13396                 adv |= ADVERTISED_100baseT_Half |
13397                        ADVERTISED_100baseT_Full |
13398                        ADVERTISED_10baseT_Half |
13399                        ADVERTISED_10baseT_Full |
13400                        ADVERTISED_TP;
13401         else
13402                 adv |= ADVERTISED_FIBRE;
13403
13404         tp->link_config.advertising = adv;
13405         tp->link_config.speed = SPEED_UNKNOWN;
13406         tp->link_config.duplex = DUPLEX_UNKNOWN;
13407         tp->link_config.autoneg = AUTONEG_ENABLE;
13408         tp->link_config.active_speed = SPEED_UNKNOWN;
13409         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13410
13411         tp->old_link = -1;
13412 }
13413
13414 static int __devinit tg3_phy_probe(struct tg3 *tp)
13415 {
13416         u32 hw_phy_id_1, hw_phy_id_2;
13417         u32 hw_phy_id, hw_phy_id_masked;
13418         int err;
13419
13420         /* flow control autonegotiation is default behavior */
13421         tg3_flag_set(tp, PAUSE_AUTONEG);
13422         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13423
13424         if (tg3_flag(tp, USE_PHYLIB))
13425                 return tg3_phy_init(tp);
13426
13427         /* Reading the PHY ID register can conflict with ASF
13428          * firmware access to the PHY hardware.
13429          */
13430         err = 0;
13431         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13432                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13433         } else {
13434                 /* Now read the physical PHY_ID from the chip and verify
13435                  * that it is sane.  If it doesn't look good, we fall back
13436                  * to either the hard-coded table based PHY_ID and failing
13437                  * that the value found in the eeprom area.
13438                  */
13439                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13440                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13441
13442                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13443                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13444                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13445
13446                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13447         }
13448
13449         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13450                 tp->phy_id = hw_phy_id;
13451                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13452                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13453                 else
13454                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13455         } else {
13456                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13457                         /* Do nothing, phy ID already set up in
13458                          * tg3_get_eeprom_hw_cfg().
13459                          */
13460                 } else {
13461                         struct subsys_tbl_ent *p;
13462
13463                         /* No eeprom signature?  Try the hardcoded
13464                          * subsys device table.
13465                          */
13466                         p = tg3_lookup_by_subsys(tp);
13467                         if (!p)
13468                                 return -ENODEV;
13469
13470                         tp->phy_id = p->phy_id;
13471                         if (!tp->phy_id ||
13472                             tp->phy_id == TG3_PHY_ID_BCM8002)
13473                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13474                 }
13475         }
13476
13477         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13478             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13479              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13480              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13481               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13482              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13483               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13484                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13485
13486         tg3_phy_init_link_config(tp);
13487
13488         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13489             !tg3_flag(tp, ENABLE_APE) &&
13490             !tg3_flag(tp, ENABLE_ASF)) {
13491                 u32 bmsr, dummy;
13492
13493                 tg3_readphy(tp, MII_BMSR, &bmsr);
13494                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13495                     (bmsr & BMSR_LSTATUS))
13496                         goto skip_phy_reset;
13497
13498                 err = tg3_phy_reset(tp);
13499                 if (err)
13500                         return err;
13501
13502                 tg3_phy_set_wirespeed(tp);
13503
13504                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13505                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13506                                             tp->link_config.flowctrl);
13507
13508                         tg3_writephy(tp, MII_BMCR,
13509                                      BMCR_ANENABLE | BMCR_ANRESTART);
13510                 }
13511         }
13512
13513 skip_phy_reset:
13514         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13515                 err = tg3_init_5401phy_dsp(tp);
13516                 if (err)
13517                         return err;
13518
13519                 err = tg3_init_5401phy_dsp(tp);
13520         }
13521
13522         return err;
13523 }
13524
13525 static void __devinit tg3_read_vpd(struct tg3 *tp)
13526 {
13527         u8 *vpd_data;
13528         unsigned int block_end, rosize, len;
13529         u32 vpdlen;
13530         int j, i = 0;
13531
13532         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13533         if (!vpd_data)
13534                 goto out_no_vpd;
13535
13536         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13537         if (i < 0)
13538                 goto out_not_found;
13539
13540         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13541         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13542         i += PCI_VPD_LRDT_TAG_SIZE;
13543
13544         if (block_end > vpdlen)
13545                 goto out_not_found;
13546
13547         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13548                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13549         if (j > 0) {
13550                 len = pci_vpd_info_field_size(&vpd_data[j]);
13551
13552                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13553                 if (j + len > block_end || len != 4 ||
13554                     memcmp(&vpd_data[j], "1028", 4))
13555                         goto partno;
13556
13557                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13558                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13559                 if (j < 0)
13560                         goto partno;
13561
13562                 len = pci_vpd_info_field_size(&vpd_data[j]);
13563
13564                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13565                 if (j + len > block_end)
13566                         goto partno;
13567
13568                 memcpy(tp->fw_ver, &vpd_data[j], len);
13569                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13570         }
13571
13572 partno:
13573         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13574                                       PCI_VPD_RO_KEYWORD_PARTNO);
13575         if (i < 0)
13576                 goto out_not_found;
13577
13578         len = pci_vpd_info_field_size(&vpd_data[i]);
13579
13580         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13581         if (len > TG3_BPN_SIZE ||
13582             (len + i) > vpdlen)
13583                 goto out_not_found;
13584
13585         memcpy(tp->board_part_number, &vpd_data[i], len);
13586
13587 out_not_found:
13588         kfree(vpd_data);
13589         if (tp->board_part_number[0])
13590                 return;
13591
13592 out_no_vpd:
13593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13594                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13595                         strcpy(tp->board_part_number, "BCM5717");
13596                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13597                         strcpy(tp->board_part_number, "BCM5718");
13598                 else
13599                         goto nomatch;
13600         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13601                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13602                         strcpy(tp->board_part_number, "BCM57780");
13603                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13604                         strcpy(tp->board_part_number, "BCM57760");
13605                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13606                         strcpy(tp->board_part_number, "BCM57790");
13607                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13608                         strcpy(tp->board_part_number, "BCM57788");
13609                 else
13610                         goto nomatch;
13611         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13612                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13613                         strcpy(tp->board_part_number, "BCM57761");
13614                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13615                         strcpy(tp->board_part_number, "BCM57765");
13616                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13617                         strcpy(tp->board_part_number, "BCM57781");
13618                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13619                         strcpy(tp->board_part_number, "BCM57785");
13620                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13621                         strcpy(tp->board_part_number, "BCM57791");
13622                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13623                         strcpy(tp->board_part_number, "BCM57795");
13624                 else
13625                         goto nomatch;
13626         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13627                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13628                         strcpy(tp->board_part_number, "BCM57762");
13629                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13630                         strcpy(tp->board_part_number, "BCM57766");
13631                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13632                         strcpy(tp->board_part_number, "BCM57782");
13633                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13634                         strcpy(tp->board_part_number, "BCM57786");
13635                 else
13636                         goto nomatch;
13637         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13638                 strcpy(tp->board_part_number, "BCM95906");
13639         } else {
13640 nomatch:
13641                 strcpy(tp->board_part_number, "none");
13642         }
13643 }
13644
13645 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13646 {
13647         u32 val;
13648
13649         if (tg3_nvram_read(tp, offset, &val) ||
13650             (val & 0xfc000000) != 0x0c000000 ||
13651             tg3_nvram_read(tp, offset + 4, &val) ||
13652             val != 0)
13653                 return 0;
13654
13655         return 1;
13656 }
13657
13658 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13659 {
13660         u32 val, offset, start, ver_offset;
13661         int i, dst_off;
13662         bool newver = false;
13663
13664         if (tg3_nvram_read(tp, 0xc, &offset) ||
13665             tg3_nvram_read(tp, 0x4, &start))
13666                 return;
13667
13668         offset = tg3_nvram_logical_addr(tp, offset);
13669
13670         if (tg3_nvram_read(tp, offset, &val))
13671                 return;
13672
13673         if ((val & 0xfc000000) == 0x0c000000) {
13674                 if (tg3_nvram_read(tp, offset + 4, &val))
13675                         return;
13676
13677                 if (val == 0)
13678                         newver = true;
13679         }
13680
13681         dst_off = strlen(tp->fw_ver);
13682
13683         if (newver) {
13684                 if (TG3_VER_SIZE - dst_off < 16 ||
13685                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13686                         return;
13687
13688                 offset = offset + ver_offset - start;
13689                 for (i = 0; i < 16; i += 4) {
13690                         __be32 v;
13691                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13692                                 return;
13693
13694                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13695                 }
13696         } else {
13697                 u32 major, minor;
13698
13699                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13700                         return;
13701
13702                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13703                         TG3_NVM_BCVER_MAJSFT;
13704                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13705                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13706                          "v%d.%02d", major, minor);
13707         }
13708 }
13709
13710 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13711 {
13712         u32 val, major, minor;
13713
13714         /* Use native endian representation */
13715         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13716                 return;
13717
13718         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13719                 TG3_NVM_HWSB_CFG1_MAJSFT;
13720         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13721                 TG3_NVM_HWSB_CFG1_MINSFT;
13722
13723         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13724 }
13725
13726 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13727 {
13728         u32 offset, major, minor, build;
13729
13730         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13731
13732         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13733                 return;
13734
13735         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13736         case TG3_EEPROM_SB_REVISION_0:
13737                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13738                 break;
13739         case TG3_EEPROM_SB_REVISION_2:
13740                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13741                 break;
13742         case TG3_EEPROM_SB_REVISION_3:
13743                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13744                 break;
13745         case TG3_EEPROM_SB_REVISION_4:
13746                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13747                 break;
13748         case TG3_EEPROM_SB_REVISION_5:
13749                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13750                 break;
13751         case TG3_EEPROM_SB_REVISION_6:
13752                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13753                 break;
13754         default:
13755                 return;
13756         }
13757
13758         if (tg3_nvram_read(tp, offset, &val))
13759                 return;
13760
13761         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13762                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13763         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13764                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13765         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13766
13767         if (minor > 99 || build > 26)
13768                 return;
13769
13770         offset = strlen(tp->fw_ver);
13771         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13772                  " v%d.%02d", major, minor);
13773
13774         if (build > 0) {
13775                 offset = strlen(tp->fw_ver);
13776                 if (offset < TG3_VER_SIZE - 1)
13777                         tp->fw_ver[offset] = 'a' + build - 1;
13778         }
13779 }
13780
13781 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13782 {
13783         u32 val, offset, start;
13784         int i, vlen;
13785
13786         for (offset = TG3_NVM_DIR_START;
13787              offset < TG3_NVM_DIR_END;
13788              offset += TG3_NVM_DIRENT_SIZE) {
13789                 if (tg3_nvram_read(tp, offset, &val))
13790                         return;
13791
13792                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13793                         break;
13794         }
13795
13796         if (offset == TG3_NVM_DIR_END)
13797                 return;
13798
13799         if (!tg3_flag(tp, 5705_PLUS))
13800                 start = 0x08000000;
13801         else if (tg3_nvram_read(tp, offset - 4, &start))
13802                 return;
13803
13804         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13805             !tg3_fw_img_is_valid(tp, offset) ||
13806             tg3_nvram_read(tp, offset + 8, &val))
13807                 return;
13808
13809         offset += val - start;
13810
13811         vlen = strlen(tp->fw_ver);
13812
13813         tp->fw_ver[vlen++] = ',';
13814         tp->fw_ver[vlen++] = ' ';
13815
13816         for (i = 0; i < 4; i++) {
13817                 __be32 v;
13818                 if (tg3_nvram_read_be32(tp, offset, &v))
13819                         return;
13820
13821                 offset += sizeof(v);
13822
13823                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13824                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13825                         break;
13826                 }
13827
13828                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13829                 vlen += sizeof(v);
13830         }
13831 }
13832
13833 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13834 {
13835         int vlen;
13836         u32 apedata;
13837         char *fwtype;
13838
13839         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13840                 return;
13841
13842         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13843         if (apedata != APE_SEG_SIG_MAGIC)
13844                 return;
13845
13846         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13847         if (!(apedata & APE_FW_STATUS_READY))
13848                 return;
13849
13850         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13851
13852         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13853                 tg3_flag_set(tp, APE_HAS_NCSI);
13854                 fwtype = "NCSI";
13855         } else {
13856                 fwtype = "DASH";
13857         }
13858
13859         vlen = strlen(tp->fw_ver);
13860
13861         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13862                  fwtype,
13863                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13864                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13865                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13866                  (apedata & APE_FW_VERSION_BLDMSK));
13867 }
13868
13869 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13870 {
13871         u32 val;
13872         bool vpd_vers = false;
13873
13874         if (tp->fw_ver[0] != 0)
13875                 vpd_vers = true;
13876
13877         if (tg3_flag(tp, NO_NVRAM)) {
13878                 strcat(tp->fw_ver, "sb");
13879                 return;
13880         }
13881
13882         if (tg3_nvram_read(tp, 0, &val))
13883                 return;
13884
13885         if (val == TG3_EEPROM_MAGIC)
13886                 tg3_read_bc_ver(tp);
13887         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13888                 tg3_read_sb_ver(tp, val);
13889         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13890                 tg3_read_hwsb_ver(tp);
13891         else
13892                 return;
13893
13894         if (vpd_vers)
13895                 goto done;
13896
13897         if (tg3_flag(tp, ENABLE_APE)) {
13898                 if (tg3_flag(tp, ENABLE_ASF))
13899                         tg3_read_dash_ver(tp);
13900         } else if (tg3_flag(tp, ENABLE_ASF)) {
13901                 tg3_read_mgmtfw_ver(tp);
13902         }
13903
13904 done:
13905         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13906 }
13907
13908 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13909 {
13910         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13911                 return TG3_RX_RET_MAX_SIZE_5717;
13912         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13913                 return TG3_RX_RET_MAX_SIZE_5700;
13914         else
13915                 return TG3_RX_RET_MAX_SIZE_5705;
13916 }
13917
13918 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13919         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13920         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13921         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13922         { },
13923 };
13924
13925 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13926 {
13927         struct pci_dev *peer;
13928         unsigned int func, devnr = tp->pdev->devfn & ~7;
13929
13930         for (func = 0; func < 8; func++) {
13931                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13932                 if (peer && peer != tp->pdev)
13933                         break;
13934                 pci_dev_put(peer);
13935         }
13936         /* 5704 can be configured in single-port mode, set peer to
13937          * tp->pdev in that case.
13938          */
13939         if (!peer) {
13940                 peer = tp->pdev;
13941                 return peer;
13942         }
13943
13944         /*
13945          * We don't need to keep the refcount elevated; there's no way
13946          * to remove one half of this device without removing the other
13947          */
13948         pci_dev_put(peer);
13949
13950         return peer;
13951 }
13952
13953 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13954 {
13955         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13956         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13957                 u32 reg;
13958
13959                 /* All devices that use the alternate
13960                  * ASIC REV location have a CPMU.
13961                  */
13962                 tg3_flag_set(tp, CPMU_PRESENT);
13963
13964                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13965                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13966                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13967                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13968                         reg = TG3PCI_GEN2_PRODID_ASICREV;
13969                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13970                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13971                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13972                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13973                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13974                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13975                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13976                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13977                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13978                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13979                         reg = TG3PCI_GEN15_PRODID_ASICREV;
13980                 else
13981                         reg = TG3PCI_PRODID_ASICREV;
13982
13983                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
13984         }
13985
13986         /* Wrong chip ID in 5752 A0. This code can be removed later
13987          * as A0 is not in production.
13988          */
13989         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13990                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13991
13992         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13993             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13994             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13995                 tg3_flag_set(tp, 5717_PLUS);
13996
13997         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13998             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13999                 tg3_flag_set(tp, 57765_CLASS);
14000
14001         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14002                 tg3_flag_set(tp, 57765_PLUS);
14003
14004         /* Intentionally exclude ASIC_REV_5906 */
14005         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14006             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14007             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14008             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14009             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14010             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14011             tg3_flag(tp, 57765_PLUS))
14012                 tg3_flag_set(tp, 5755_PLUS);
14013
14014         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14015             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14016                 tg3_flag_set(tp, 5780_CLASS);
14017
14018         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14019             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14020             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14021             tg3_flag(tp, 5755_PLUS) ||
14022             tg3_flag(tp, 5780_CLASS))
14023                 tg3_flag_set(tp, 5750_PLUS);
14024
14025         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14026             tg3_flag(tp, 5750_PLUS))
14027                 tg3_flag_set(tp, 5705_PLUS);
14028 }
14029
14030 static int __devinit tg3_get_invariants(struct tg3 *tp)
14031 {
14032         u32 misc_ctrl_reg;
14033         u32 pci_state_reg, grc_misc_cfg;
14034         u32 val;
14035         u16 pci_cmd;
14036         int err;
14037
14038         /* Force memory write invalidate off.  If we leave it on,
14039          * then on 5700_BX chips we have to enable a workaround.
14040          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14041          * to match the cacheline size.  The Broadcom driver have this
14042          * workaround but turns MWI off all the times so never uses
14043          * it.  This seems to suggest that the workaround is insufficient.
14044          */
14045         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14046         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14047         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14048
14049         /* Important! -- Make sure register accesses are byteswapped
14050          * correctly.  Also, for those chips that require it, make
14051          * sure that indirect register accesses are enabled before
14052          * the first operation.
14053          */
14054         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14055                               &misc_ctrl_reg);
14056         tp->misc_host_ctrl |= (misc_ctrl_reg &
14057                                MISC_HOST_CTRL_CHIPREV);
14058         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14059                                tp->misc_host_ctrl);
14060
14061         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14062
14063         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14064          * we need to disable memory and use config. cycles
14065          * only to access all registers. The 5702/03 chips
14066          * can mistakenly decode the special cycles from the
14067          * ICH chipsets as memory write cycles, causing corruption
14068          * of register and memory space. Only certain ICH bridges
14069          * will drive special cycles with non-zero data during the
14070          * address phase which can fall within the 5703's address
14071          * range. This is not an ICH bug as the PCI spec allows
14072          * non-zero address during special cycles. However, only
14073          * these ICH bridges are known to drive non-zero addresses
14074          * during special cycles.
14075          *
14076          * Since special cycles do not cross PCI bridges, we only
14077          * enable this workaround if the 5703 is on the secondary
14078          * bus of these ICH bridges.
14079          */
14080         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14081             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14082                 static struct tg3_dev_id {
14083                         u32     vendor;
14084                         u32     device;
14085                         u32     rev;
14086                 } ich_chipsets[] = {
14087                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14088                           PCI_ANY_ID },
14089                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14090                           PCI_ANY_ID },
14091                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14092                           0xa },
14093                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14094                           PCI_ANY_ID },
14095                         { },
14096                 };
14097                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14098                 struct pci_dev *bridge = NULL;
14099
14100                 while (pci_id->vendor != 0) {
14101                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14102                                                 bridge);
14103                         if (!bridge) {
14104                                 pci_id++;
14105                                 continue;
14106                         }
14107                         if (pci_id->rev != PCI_ANY_ID) {
14108                                 if (bridge->revision > pci_id->rev)
14109                                         continue;
14110                         }
14111                         if (bridge->subordinate &&
14112                             (bridge->subordinate->number ==
14113                              tp->pdev->bus->number)) {
14114                                 tg3_flag_set(tp, ICH_WORKAROUND);
14115                                 pci_dev_put(bridge);
14116                                 break;
14117                         }
14118                 }
14119         }
14120
14121         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14122                 static struct tg3_dev_id {
14123                         u32     vendor;
14124                         u32     device;
14125                 } bridge_chipsets[] = {
14126                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14127                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14128                         { },
14129                 };
14130                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14131                 struct pci_dev *bridge = NULL;
14132
14133                 while (pci_id->vendor != 0) {
14134                         bridge = pci_get_device(pci_id->vendor,
14135                                                 pci_id->device,
14136                                                 bridge);
14137                         if (!bridge) {
14138                                 pci_id++;
14139                                 continue;
14140                         }
14141                         if (bridge->subordinate &&
14142                             (bridge->subordinate->number <=
14143                              tp->pdev->bus->number) &&
14144                             (bridge->subordinate->subordinate >=
14145                              tp->pdev->bus->number)) {
14146                                 tg3_flag_set(tp, 5701_DMA_BUG);
14147                                 pci_dev_put(bridge);
14148                                 break;
14149                         }
14150                 }
14151         }
14152
14153         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14154          * DMA addresses > 40-bit. This bridge may have other additional
14155          * 57xx devices behind it in some 4-port NIC designs for example.
14156          * Any tg3 device found behind the bridge will also need the 40-bit
14157          * DMA workaround.
14158          */
14159         if (tg3_flag(tp, 5780_CLASS)) {
14160                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14161                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14162         } else {
14163                 struct pci_dev *bridge = NULL;
14164
14165                 do {
14166                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14167                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14168                                                 bridge);
14169                         if (bridge && bridge->subordinate &&
14170                             (bridge->subordinate->number <=
14171                              tp->pdev->bus->number) &&
14172                             (bridge->subordinate->subordinate >=
14173                              tp->pdev->bus->number)) {
14174                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14175                                 pci_dev_put(bridge);
14176                                 break;
14177                         }
14178                 } while (bridge);
14179         }
14180
14181         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14182             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14183                 tp->pdev_peer = tg3_find_peer(tp);
14184
14185         /* Determine TSO capabilities */
14186         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14187                 ; /* Do nothing. HW bug. */
14188         else if (tg3_flag(tp, 57765_PLUS))
14189                 tg3_flag_set(tp, HW_TSO_3);
14190         else if (tg3_flag(tp, 5755_PLUS) ||
14191                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14192                 tg3_flag_set(tp, HW_TSO_2);
14193         else if (tg3_flag(tp, 5750_PLUS)) {
14194                 tg3_flag_set(tp, HW_TSO_1);
14195                 tg3_flag_set(tp, TSO_BUG);
14196                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14197                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14198                         tg3_flag_clear(tp, TSO_BUG);
14199         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14200                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14201                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14202                         tg3_flag_set(tp, TSO_BUG);
14203                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14204                         tp->fw_needed = FIRMWARE_TG3TSO5;
14205                 else
14206                         tp->fw_needed = FIRMWARE_TG3TSO;
14207         }
14208
14209         /* Selectively allow TSO based on operating conditions */
14210         if (tg3_flag(tp, HW_TSO_1) ||
14211             tg3_flag(tp, HW_TSO_2) ||
14212             tg3_flag(tp, HW_TSO_3) ||
14213             tp->fw_needed) {
14214                 /* For firmware TSO, assume ASF is disabled.
14215                  * We'll disable TSO later if we discover ASF
14216                  * is enabled in tg3_get_eeprom_hw_cfg().
14217                  */
14218                 tg3_flag_set(tp, TSO_CAPABLE);
14219         } else {
14220                 tg3_flag_clear(tp, TSO_CAPABLE);
14221                 tg3_flag_clear(tp, TSO_BUG);
14222                 tp->fw_needed = NULL;
14223         }
14224
14225         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14226                 tp->fw_needed = FIRMWARE_TG3;
14227
14228         tp->irq_max = 1;
14229
14230         if (tg3_flag(tp, 5750_PLUS)) {
14231                 tg3_flag_set(tp, SUPPORT_MSI);
14232                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14233                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14234                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14235                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14236                      tp->pdev_peer == tp->pdev))
14237                         tg3_flag_clear(tp, SUPPORT_MSI);
14238
14239                 if (tg3_flag(tp, 5755_PLUS) ||
14240                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14241                         tg3_flag_set(tp, 1SHOT_MSI);
14242                 }
14243
14244                 if (tg3_flag(tp, 57765_PLUS)) {
14245                         tg3_flag_set(tp, SUPPORT_MSIX);
14246                         tp->irq_max = TG3_IRQ_MAX_VECS;
14247                         tg3_rss_init_dflt_indir_tbl(tp);
14248                 }
14249         }
14250
14251         if (tg3_flag(tp, 5755_PLUS) ||
14252             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14253                 tg3_flag_set(tp, SHORT_DMA_BUG);
14254
14255         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14256                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14257
14258         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14259             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14260             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14261                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14262
14263         if (tg3_flag(tp, 57765_PLUS) &&
14264             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14265                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14266
14267         if (!tg3_flag(tp, 5705_PLUS) ||
14268             tg3_flag(tp, 5780_CLASS) ||
14269             tg3_flag(tp, USE_JUMBO_BDFLAG))
14270                 tg3_flag_set(tp, JUMBO_CAPABLE);
14271
14272         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14273                               &pci_state_reg);
14274
14275         if (pci_is_pcie(tp->pdev)) {
14276                 u16 lnkctl;
14277
14278                 tg3_flag_set(tp, PCI_EXPRESS);
14279
14280                 pci_read_config_word(tp->pdev,
14281                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14282                                      &lnkctl);
14283                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14284                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14285                             ASIC_REV_5906) {
14286                                 tg3_flag_clear(tp, HW_TSO_2);
14287                                 tg3_flag_clear(tp, TSO_CAPABLE);
14288                         }
14289                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14290                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14291                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14292                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14293                                 tg3_flag_set(tp, CLKREQ_BUG);
14294                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14295                         tg3_flag_set(tp, L1PLLPD_EN);
14296                 }
14297         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14298                 /* BCM5785 devices are effectively PCIe devices, and should
14299                  * follow PCIe codepaths, but do not have a PCIe capabilities
14300                  * section.
14301                  */
14302                 tg3_flag_set(tp, PCI_EXPRESS);
14303         } else if (!tg3_flag(tp, 5705_PLUS) ||
14304                    tg3_flag(tp, 5780_CLASS)) {
14305                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14306                 if (!tp->pcix_cap) {
14307                         dev_err(&tp->pdev->dev,
14308                                 "Cannot find PCI-X capability, aborting\n");
14309                         return -EIO;
14310                 }
14311
14312                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14313                         tg3_flag_set(tp, PCIX_MODE);
14314         }
14315
14316         /* If we have an AMD 762 or VIA K8T800 chipset, write
14317          * reordering to the mailbox registers done by the host
14318          * controller can cause major troubles.  We read back from
14319          * every mailbox register write to force the writes to be
14320          * posted to the chip in order.
14321          */
14322         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14323             !tg3_flag(tp, PCI_EXPRESS))
14324                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14325
14326         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14327                              &tp->pci_cacheline_sz);
14328         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14329                              &tp->pci_lat_timer);
14330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14331             tp->pci_lat_timer < 64) {
14332                 tp->pci_lat_timer = 64;
14333                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14334                                       tp->pci_lat_timer);
14335         }
14336
14337         /* Important! -- It is critical that the PCI-X hw workaround
14338          * situation is decided before the first MMIO register access.
14339          */
14340         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14341                 /* 5700 BX chips need to have their TX producer index
14342                  * mailboxes written twice to workaround a bug.
14343                  */
14344                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14345
14346                 /* If we are in PCI-X mode, enable register write workaround.
14347                  *
14348                  * The workaround is to use indirect register accesses
14349                  * for all chip writes not to mailbox registers.
14350                  */
14351                 if (tg3_flag(tp, PCIX_MODE)) {
14352                         u32 pm_reg;
14353
14354                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14355
14356                         /* The chip can have it's power management PCI config
14357                          * space registers clobbered due to this bug.
14358                          * So explicitly force the chip into D0 here.
14359                          */
14360                         pci_read_config_dword(tp->pdev,
14361                                               tp->pm_cap + PCI_PM_CTRL,
14362                                               &pm_reg);
14363                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14364                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14365                         pci_write_config_dword(tp->pdev,
14366                                                tp->pm_cap + PCI_PM_CTRL,
14367                                                pm_reg);
14368
14369                         /* Also, force SERR#/PERR# in PCI command. */
14370                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14371                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14372                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14373                 }
14374         }
14375
14376         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14377                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14378         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14379                 tg3_flag_set(tp, PCI_32BIT);
14380
14381         /* Chip-specific fixup from Broadcom driver */
14382         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14383             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14384                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14385                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14386         }
14387
14388         /* Default fast path register access methods */
14389         tp->read32 = tg3_read32;
14390         tp->write32 = tg3_write32;
14391         tp->read32_mbox = tg3_read32;
14392         tp->write32_mbox = tg3_write32;
14393         tp->write32_tx_mbox = tg3_write32;
14394         tp->write32_rx_mbox = tg3_write32;
14395
14396         /* Various workaround register access methods */
14397         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14398                 tp->write32 = tg3_write_indirect_reg32;
14399         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14400                  (tg3_flag(tp, PCI_EXPRESS) &&
14401                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14402                 /*
14403                  * Back to back register writes can cause problems on these
14404                  * chips, the workaround is to read back all reg writes
14405                  * except those to mailbox regs.
14406                  *
14407                  * See tg3_write_indirect_reg32().
14408                  */
14409                 tp->write32 = tg3_write_flush_reg32;
14410         }
14411
14412         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14413                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14414                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14415                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14416         }
14417
14418         if (tg3_flag(tp, ICH_WORKAROUND)) {
14419                 tp->read32 = tg3_read_indirect_reg32;
14420                 tp->write32 = tg3_write_indirect_reg32;
14421                 tp->read32_mbox = tg3_read_indirect_mbox;
14422                 tp->write32_mbox = tg3_write_indirect_mbox;
14423                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14424                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14425
14426                 iounmap(tp->regs);
14427                 tp->regs = NULL;
14428
14429                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14430                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14431                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14432         }
14433         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14434                 tp->read32_mbox = tg3_read32_mbox_5906;
14435                 tp->write32_mbox = tg3_write32_mbox_5906;
14436                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14437                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14438         }
14439
14440         if (tp->write32 == tg3_write_indirect_reg32 ||
14441             (tg3_flag(tp, PCIX_MODE) &&
14442              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14443               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14444                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14445
14446         /* The memory arbiter has to be enabled in order for SRAM accesses
14447          * to succeed.  Normally on powerup the tg3 chip firmware will make
14448          * sure it is enabled, but other entities such as system netboot
14449          * code might disable it.
14450          */
14451         val = tr32(MEMARB_MODE);
14452         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14453
14454         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14455         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14456             tg3_flag(tp, 5780_CLASS)) {
14457                 if (tg3_flag(tp, PCIX_MODE)) {
14458                         pci_read_config_dword(tp->pdev,
14459                                               tp->pcix_cap + PCI_X_STATUS,
14460                                               &val);
14461                         tp->pci_fn = val & 0x7;
14462                 }
14463         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14464                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14465                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14466                     NIC_SRAM_CPMUSTAT_SIG) {
14467                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14468                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14469                 }
14470         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14471                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14472                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14473                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14474                     NIC_SRAM_CPMUSTAT_SIG) {
14475                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14476                                      TG3_CPMU_STATUS_FSHFT_5719;
14477                 }
14478         }
14479
14480         /* Get eeprom hw config before calling tg3_set_power_state().
14481          * In particular, the TG3_FLAG_IS_NIC flag must be
14482          * determined before calling tg3_set_power_state() so that
14483          * we know whether or not to switch out of Vaux power.
14484          * When the flag is set, it means that GPIO1 is used for eeprom
14485          * write protect and also implies that it is a LOM where GPIOs
14486          * are not used to switch power.
14487          */
14488         tg3_get_eeprom_hw_cfg(tp);
14489
14490         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14491                 tg3_flag_clear(tp, TSO_CAPABLE);
14492                 tg3_flag_clear(tp, TSO_BUG);
14493                 tp->fw_needed = NULL;
14494         }
14495
14496         if (tg3_flag(tp, ENABLE_APE)) {
14497                 /* Allow reads and writes to the
14498                  * APE register and memory space.
14499                  */
14500                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14501                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14502                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14503                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14504                                        pci_state_reg);
14505
14506                 tg3_ape_lock_init(tp);
14507         }
14508
14509         /* Set up tp->grc_local_ctrl before calling
14510          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14511          * will bring 5700's external PHY out of reset.
14512          * It is also used as eeprom write protect on LOMs.
14513          */
14514         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14516             tg3_flag(tp, EEPROM_WRITE_PROT))
14517                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14518                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14519         /* Unused GPIO3 must be driven as output on 5752 because there
14520          * are no pull-up resistors on unused GPIO pins.
14521          */
14522         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14523                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14524
14525         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14526             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14527             tg3_flag(tp, 57765_CLASS))
14528                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14529
14530         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14531             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14532                 /* Turn off the debug UART. */
14533                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14534                 if (tg3_flag(tp, IS_NIC))
14535                         /* Keep VMain power. */
14536                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14537                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14538         }
14539
14540         /* Switch out of Vaux if it is a NIC */
14541         tg3_pwrsrc_switch_to_vmain(tp);
14542
14543         /* Derive initial jumbo mode from MTU assigned in
14544          * ether_setup() via the alloc_etherdev() call
14545          */
14546         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14547                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14548
14549         /* Determine WakeOnLan speed to use. */
14550         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14551             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14552             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14553             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14554                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14555         } else {
14556                 tg3_flag_set(tp, WOL_SPEED_100MB);
14557         }
14558
14559         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14560                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14561
14562         /* A few boards don't want Ethernet@WireSpeed phy feature */
14563         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14564             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14565              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14566              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14567             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14568             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14569                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14570
14571         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14572             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14573                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14574         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14575                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14576
14577         if (tg3_flag(tp, 5705_PLUS) &&
14578             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14579             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14580             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14581             !tg3_flag(tp, 57765_PLUS)) {
14582                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14583                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14584                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14585                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14586                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14587                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14588                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14589                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14590                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14591                 } else
14592                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14593         }
14594
14595         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14596             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14597                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14598                 if (tp->phy_otp == 0)
14599                         tp->phy_otp = TG3_OTP_DEFAULT;
14600         }
14601
14602         if (tg3_flag(tp, CPMU_PRESENT))
14603                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14604         else
14605                 tp->mi_mode = MAC_MI_MODE_BASE;
14606
14607         tp->coalesce_mode = 0;
14608         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14609             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14610                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14611
14612         /* Set these bits to enable statistics workaround. */
14613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14614             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14615             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14616                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14617                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14618         }
14619
14620         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14621             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14622                 tg3_flag_set(tp, USE_PHYLIB);
14623
14624         err = tg3_mdio_init(tp);
14625         if (err)
14626                 return err;
14627
14628         /* Initialize data/descriptor byte/word swapping. */
14629         val = tr32(GRC_MODE);
14630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14631                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14632                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14633                         GRC_MODE_B2HRX_ENABLE |
14634                         GRC_MODE_HTX2B_ENABLE |
14635                         GRC_MODE_HOST_STACKUP);
14636         else
14637                 val &= GRC_MODE_HOST_STACKUP;
14638
14639         tw32(GRC_MODE, val | tp->grc_mode);
14640
14641         tg3_switch_clocks(tp);
14642
14643         /* Clear this out for sanity. */
14644         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14645
14646         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14647                               &pci_state_reg);
14648         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14649             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14650                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14651
14652                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14653                     chiprevid == CHIPREV_ID_5701_B0 ||
14654                     chiprevid == CHIPREV_ID_5701_B2 ||
14655                     chiprevid == CHIPREV_ID_5701_B5) {
14656                         void __iomem *sram_base;
14657
14658                         /* Write some dummy words into the SRAM status block
14659                          * area, see if it reads back correctly.  If the return
14660                          * value is bad, force enable the PCIX workaround.
14661                          */
14662                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14663
14664                         writel(0x00000000, sram_base);
14665                         writel(0x00000000, sram_base + 4);
14666                         writel(0xffffffff, sram_base + 4);
14667                         if (readl(sram_base) != 0x00000000)
14668                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14669                 }
14670         }
14671
14672         udelay(50);
14673         tg3_nvram_init(tp);
14674
14675         grc_misc_cfg = tr32(GRC_MISC_CFG);
14676         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14677
14678         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14679             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14680              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14681                 tg3_flag_set(tp, IS_5788);
14682
14683         if (!tg3_flag(tp, IS_5788) &&
14684             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14685                 tg3_flag_set(tp, TAGGED_STATUS);
14686         if (tg3_flag(tp, TAGGED_STATUS)) {
14687                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14688                                       HOSTCC_MODE_CLRTICK_TXBD);
14689
14690                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14691                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14692                                        tp->misc_host_ctrl);
14693         }
14694
14695         /* Preserve the APE MAC_MODE bits */
14696         if (tg3_flag(tp, ENABLE_APE))
14697                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14698         else
14699                 tp->mac_mode = 0;
14700
14701         /* these are limited to 10/100 only */
14702         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14703              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14704             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14705              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14706              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14707               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14708               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14709             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14710              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14711               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14712               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14713             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14714             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14715             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14716             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14717                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14718
14719         err = tg3_phy_probe(tp);
14720         if (err) {
14721                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14722                 /* ... but do not return immediately ... */
14723                 tg3_mdio_fini(tp);
14724         }
14725
14726         tg3_read_vpd(tp);
14727         tg3_read_fw_ver(tp);
14728
14729         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14730                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14731         } else {
14732                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14733                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14734                 else
14735                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14736         }
14737
14738         /* 5700 {AX,BX} chips have a broken status block link
14739          * change bit implementation, so we must use the
14740          * status register in those cases.
14741          */
14742         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14743                 tg3_flag_set(tp, USE_LINKCHG_REG);
14744         else
14745                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14746
14747         /* The led_ctrl is set during tg3_phy_probe, here we might
14748          * have to force the link status polling mechanism based
14749          * upon subsystem IDs.
14750          */
14751         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14752             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14753             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14754                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14755                 tg3_flag_set(tp, USE_LINKCHG_REG);
14756         }
14757
14758         /* For all SERDES we poll the MAC status register. */
14759         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14760                 tg3_flag_set(tp, POLL_SERDES);
14761         else
14762                 tg3_flag_clear(tp, POLL_SERDES);
14763
14764         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14765         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14766         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14767             tg3_flag(tp, PCIX_MODE)) {
14768                 tp->rx_offset = NET_SKB_PAD;
14769 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14770                 tp->rx_copy_thresh = ~(u16)0;
14771 #endif
14772         }
14773
14774         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14775         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14776         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14777
14778         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14779
14780         /* Increment the rx prod index on the rx std ring by at most
14781          * 8 for these chips to workaround hw errata.
14782          */
14783         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14784             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14785             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14786                 tp->rx_std_max_post = 8;
14787
14788         if (tg3_flag(tp, ASPM_WORKAROUND))
14789                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14790                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14791
14792         return err;
14793 }
14794
14795 #ifdef CONFIG_SPARC
14796 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14797 {
14798         struct net_device *dev = tp->dev;
14799         struct pci_dev *pdev = tp->pdev;
14800         struct device_node *dp = pci_device_to_OF_node(pdev);
14801         const unsigned char *addr;
14802         int len;
14803
14804         addr = of_get_property(dp, "local-mac-address", &len);
14805         if (addr && len == 6) {
14806                 memcpy(dev->dev_addr, addr, 6);
14807                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14808                 return 0;
14809         }
14810         return -ENODEV;
14811 }
14812
14813 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14814 {
14815         struct net_device *dev = tp->dev;
14816
14817         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14818         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14819         return 0;
14820 }
14821 #endif
14822
14823 static int __devinit tg3_get_device_address(struct tg3 *tp)
14824 {
14825         struct net_device *dev = tp->dev;
14826         u32 hi, lo, mac_offset;
14827         int addr_ok = 0;
14828
14829 #ifdef CONFIG_SPARC
14830         if (!tg3_get_macaddr_sparc(tp))
14831                 return 0;
14832 #endif
14833
14834         mac_offset = 0x7c;
14835         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14836             tg3_flag(tp, 5780_CLASS)) {
14837                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14838                         mac_offset = 0xcc;
14839                 if (tg3_nvram_lock(tp))
14840                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14841                 else
14842                         tg3_nvram_unlock(tp);
14843         } else if (tg3_flag(tp, 5717_PLUS)) {
14844                 if (tp->pci_fn & 1)
14845                         mac_offset = 0xcc;
14846                 if (tp->pci_fn > 1)
14847                         mac_offset += 0x18c;
14848         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14849                 mac_offset = 0x10;
14850
14851         /* First try to get it from MAC address mailbox. */
14852         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14853         if ((hi >> 16) == 0x484b) {
14854                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14855                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14856
14857                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14858                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14859                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14860                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14861                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14862
14863                 /* Some old bootcode may report a 0 MAC address in SRAM */
14864                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14865         }
14866         if (!addr_ok) {
14867                 /* Next, try NVRAM. */
14868                 if (!tg3_flag(tp, NO_NVRAM) &&
14869                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14870                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14871                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14872                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14873                 }
14874                 /* Finally just fetch it out of the MAC control regs. */
14875                 else {
14876                         hi = tr32(MAC_ADDR_0_HIGH);
14877                         lo = tr32(MAC_ADDR_0_LOW);
14878
14879                         dev->dev_addr[5] = lo & 0xff;
14880                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14881                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14882                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14883                         dev->dev_addr[1] = hi & 0xff;
14884                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14885                 }
14886         }
14887
14888         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14889 #ifdef CONFIG_SPARC
14890                 if (!tg3_get_default_macaddr_sparc(tp))
14891                         return 0;
14892 #endif
14893                 return -EINVAL;
14894         }
14895         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14896         return 0;
14897 }
14898
14899 #define BOUNDARY_SINGLE_CACHELINE       1
14900 #define BOUNDARY_MULTI_CACHELINE        2
14901
14902 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14903 {
14904         int cacheline_size;
14905         u8 byte;
14906         int goal;
14907
14908         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14909         if (byte == 0)
14910                 cacheline_size = 1024;
14911         else
14912                 cacheline_size = (int) byte * 4;
14913
14914         /* On 5703 and later chips, the boundary bits have no
14915          * effect.
14916          */
14917         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14918             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14919             !tg3_flag(tp, PCI_EXPRESS))
14920                 goto out;
14921
14922 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14923         goal = BOUNDARY_MULTI_CACHELINE;
14924 #else
14925 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14926         goal = BOUNDARY_SINGLE_CACHELINE;
14927 #else
14928         goal = 0;
14929 #endif
14930 #endif
14931
14932         if (tg3_flag(tp, 57765_PLUS)) {
14933                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14934                 goto out;
14935         }
14936
14937         if (!goal)
14938                 goto out;
14939
14940         /* PCI controllers on most RISC systems tend to disconnect
14941          * when a device tries to burst across a cache-line boundary.
14942          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14943          *
14944          * Unfortunately, for PCI-E there are only limited
14945          * write-side controls for this, and thus for reads
14946          * we will still get the disconnects.  We'll also waste
14947          * these PCI cycles for both read and write for chips
14948          * other than 5700 and 5701 which do not implement the
14949          * boundary bits.
14950          */
14951         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14952                 switch (cacheline_size) {
14953                 case 16:
14954                 case 32:
14955                 case 64:
14956                 case 128:
14957                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14958                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14959                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14960                         } else {
14961                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14962                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14963                         }
14964                         break;
14965
14966                 case 256:
14967                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14968                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14969                         break;
14970
14971                 default:
14972                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14973                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14974                         break;
14975                 }
14976         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14977                 switch (cacheline_size) {
14978                 case 16:
14979                 case 32:
14980                 case 64:
14981                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14982                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14983                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14984                                 break;
14985                         }
14986                         /* fallthrough */
14987                 case 128:
14988                 default:
14989                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14990                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14991                         break;
14992                 }
14993         } else {
14994                 switch (cacheline_size) {
14995                 case 16:
14996                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14997                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14998                                         DMA_RWCTRL_WRITE_BNDRY_16);
14999                                 break;
15000                         }
15001                         /* fallthrough */
15002                 case 32:
15003                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15004                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15005                                         DMA_RWCTRL_WRITE_BNDRY_32);
15006                                 break;
15007                         }
15008                         /* fallthrough */
15009                 case 64:
15010                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15011                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15012                                         DMA_RWCTRL_WRITE_BNDRY_64);
15013                                 break;
15014                         }
15015                         /* fallthrough */
15016                 case 128:
15017                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15018                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15019                                         DMA_RWCTRL_WRITE_BNDRY_128);
15020                                 break;
15021                         }
15022                         /* fallthrough */
15023                 case 256:
15024                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15025                                 DMA_RWCTRL_WRITE_BNDRY_256);
15026                         break;
15027                 case 512:
15028                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15029                                 DMA_RWCTRL_WRITE_BNDRY_512);
15030                         break;
15031                 case 1024:
15032                 default:
15033                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15034                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15035                         break;
15036                 }
15037         }
15038
15039 out:
15040         return val;
15041 }
15042
15043 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15044 {
15045         struct tg3_internal_buffer_desc test_desc;
15046         u32 sram_dma_descs;
15047         int i, ret;
15048
15049         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15050
15051         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15052         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15053         tw32(RDMAC_STATUS, 0);
15054         tw32(WDMAC_STATUS, 0);
15055
15056         tw32(BUFMGR_MODE, 0);
15057         tw32(FTQ_RESET, 0);
15058
15059         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15060         test_desc.addr_lo = buf_dma & 0xffffffff;
15061         test_desc.nic_mbuf = 0x00002100;
15062         test_desc.len = size;
15063
15064         /*
15065          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15066          * the *second* time the tg3 driver was getting loaded after an
15067          * initial scan.
15068          *
15069          * Broadcom tells me:
15070          *   ...the DMA engine is connected to the GRC block and a DMA
15071          *   reset may affect the GRC block in some unpredictable way...
15072          *   The behavior of resets to individual blocks has not been tested.
15073          *
15074          * Broadcom noted the GRC reset will also reset all sub-components.
15075          */
15076         if (to_device) {
15077                 test_desc.cqid_sqid = (13 << 8) | 2;
15078
15079                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15080                 udelay(40);
15081         } else {
15082                 test_desc.cqid_sqid = (16 << 8) | 7;
15083
15084                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15085                 udelay(40);
15086         }
15087         test_desc.flags = 0x00000005;
15088
15089         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15090                 u32 val;
15091
15092                 val = *(((u32 *)&test_desc) + i);
15093                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15094                                        sram_dma_descs + (i * sizeof(u32)));
15095                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15096         }
15097         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15098
15099         if (to_device)
15100                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15101         else
15102                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15103
15104         ret = -ENODEV;
15105         for (i = 0; i < 40; i++) {
15106                 u32 val;
15107
15108                 if (to_device)
15109                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15110                 else
15111                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15112                 if ((val & 0xffff) == sram_dma_descs) {
15113                         ret = 0;
15114                         break;
15115                 }
15116
15117                 udelay(100);
15118         }
15119
15120         return ret;
15121 }
15122
15123 #define TEST_BUFFER_SIZE        0x2000
15124
15125 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15126         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15127         { },
15128 };
15129
15130 static int __devinit tg3_test_dma(struct tg3 *tp)
15131 {
15132         dma_addr_t buf_dma;
15133         u32 *buf, saved_dma_rwctrl;
15134         int ret = 0;
15135
15136         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15137                                  &buf_dma, GFP_KERNEL);
15138         if (!buf) {
15139                 ret = -ENOMEM;
15140                 goto out_nofree;
15141         }
15142
15143         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15144                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15145
15146         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15147
15148         if (tg3_flag(tp, 57765_PLUS))
15149                 goto out;
15150
15151         if (tg3_flag(tp, PCI_EXPRESS)) {
15152                 /* DMA read watermark not used on PCIE */
15153                 tp->dma_rwctrl |= 0x00180000;
15154         } else if (!tg3_flag(tp, PCIX_MODE)) {
15155                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15156                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15157                         tp->dma_rwctrl |= 0x003f0000;
15158                 else
15159                         tp->dma_rwctrl |= 0x003f000f;
15160         } else {
15161                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15162                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15163                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15164                         u32 read_water = 0x7;
15165
15166                         /* If the 5704 is behind the EPB bridge, we can
15167                          * do the less restrictive ONE_DMA workaround for
15168                          * better performance.
15169                          */
15170                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15171                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15172                                 tp->dma_rwctrl |= 0x8000;
15173                         else if (ccval == 0x6 || ccval == 0x7)
15174                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15175
15176                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15177                                 read_water = 4;
15178                         /* Set bit 23 to enable PCIX hw bug fix */
15179                         tp->dma_rwctrl |=
15180                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15181                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15182                                 (1 << 23);
15183                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15184                         /* 5780 always in PCIX mode */
15185                         tp->dma_rwctrl |= 0x00144000;
15186                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15187                         /* 5714 always in PCIX mode */
15188                         tp->dma_rwctrl |= 0x00148000;
15189                 } else {
15190                         tp->dma_rwctrl |= 0x001b000f;
15191                 }
15192         }
15193
15194         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15195             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15196                 tp->dma_rwctrl &= 0xfffffff0;
15197
15198         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15199             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15200                 /* Remove this if it causes problems for some boards. */
15201                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15202
15203                 /* On 5700/5701 chips, we need to set this bit.
15204                  * Otherwise the chip will issue cacheline transactions
15205                  * to streamable DMA memory with not all the byte
15206                  * enables turned on.  This is an error on several
15207                  * RISC PCI controllers, in particular sparc64.
15208                  *
15209                  * On 5703/5704 chips, this bit has been reassigned
15210                  * a different meaning.  In particular, it is used
15211                  * on those chips to enable a PCI-X workaround.
15212                  */
15213                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15214         }
15215
15216         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15217
15218 #if 0
15219         /* Unneeded, already done by tg3_get_invariants.  */
15220         tg3_switch_clocks(tp);
15221 #endif
15222
15223         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15224             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15225                 goto out;
15226
15227         /* It is best to perform DMA test with maximum write burst size
15228          * to expose the 5700/5701 write DMA bug.
15229          */
15230         saved_dma_rwctrl = tp->dma_rwctrl;
15231         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15232         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15233
15234         while (1) {
15235                 u32 *p = buf, i;
15236
15237                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15238                         p[i] = i;
15239
15240                 /* Send the buffer to the chip. */
15241                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15242                 if (ret) {
15243                         dev_err(&tp->pdev->dev,
15244                                 "%s: Buffer write failed. err = %d\n",
15245                                 __func__, ret);
15246                         break;
15247                 }
15248
15249 #if 0
15250                 /* validate data reached card RAM correctly. */
15251                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15252                         u32 val;
15253                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15254                         if (le32_to_cpu(val) != p[i]) {
15255                                 dev_err(&tp->pdev->dev,
15256                                         "%s: Buffer corrupted on device! "
15257                                         "(%d != %d)\n", __func__, val, i);
15258                                 /* ret = -ENODEV here? */
15259                         }
15260                         p[i] = 0;
15261                 }
15262 #endif
15263                 /* Now read it back. */
15264                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15265                 if (ret) {
15266                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15267                                 "err = %d\n", __func__, ret);
15268                         break;
15269                 }
15270
15271                 /* Verify it. */
15272                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15273                         if (p[i] == i)
15274                                 continue;
15275
15276                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15277                             DMA_RWCTRL_WRITE_BNDRY_16) {
15278                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15279                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15280                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15281                                 break;
15282                         } else {
15283                                 dev_err(&tp->pdev->dev,
15284                                         "%s: Buffer corrupted on read back! "
15285                                         "(%d != %d)\n", __func__, p[i], i);
15286                                 ret = -ENODEV;
15287                                 goto out;
15288                         }
15289                 }
15290
15291                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15292                         /* Success. */
15293                         ret = 0;
15294                         break;
15295                 }
15296         }
15297         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15298             DMA_RWCTRL_WRITE_BNDRY_16) {
15299                 /* DMA test passed without adjusting DMA boundary,
15300                  * now look for chipsets that are known to expose the
15301                  * DMA bug without failing the test.
15302                  */
15303                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15304                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15305                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15306                 } else {
15307                         /* Safe to use the calculated DMA boundary. */
15308                         tp->dma_rwctrl = saved_dma_rwctrl;
15309                 }
15310
15311                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15312         }
15313
15314 out:
15315         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15316 out_nofree:
15317         return ret;
15318 }
15319
15320 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15321 {
15322         if (tg3_flag(tp, 57765_PLUS)) {
15323                 tp->bufmgr_config.mbuf_read_dma_low_water =
15324                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15325                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15326                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15327                 tp->bufmgr_config.mbuf_high_water =
15328                         DEFAULT_MB_HIGH_WATER_57765;
15329
15330                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15331                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15332                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15333                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15334                 tp->bufmgr_config.mbuf_high_water_jumbo =
15335                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15336         } else if (tg3_flag(tp, 5705_PLUS)) {
15337                 tp->bufmgr_config.mbuf_read_dma_low_water =
15338                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15339                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15340                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15341                 tp->bufmgr_config.mbuf_high_water =
15342                         DEFAULT_MB_HIGH_WATER_5705;
15343                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15344                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15345                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15346                         tp->bufmgr_config.mbuf_high_water =
15347                                 DEFAULT_MB_HIGH_WATER_5906;
15348                 }
15349
15350                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15351                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15352                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15353                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15354                 tp->bufmgr_config.mbuf_high_water_jumbo =
15355                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15356         } else {
15357                 tp->bufmgr_config.mbuf_read_dma_low_water =
15358                         DEFAULT_MB_RDMA_LOW_WATER;
15359                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15360                         DEFAULT_MB_MACRX_LOW_WATER;
15361                 tp->bufmgr_config.mbuf_high_water =
15362                         DEFAULT_MB_HIGH_WATER;
15363
15364                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15365                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15366                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15367                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15368                 tp->bufmgr_config.mbuf_high_water_jumbo =
15369                         DEFAULT_MB_HIGH_WATER_JUMBO;
15370         }
15371
15372         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15373         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15374 }
15375
15376 static char * __devinit tg3_phy_string(struct tg3 *tp)
15377 {
15378         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15379         case TG3_PHY_ID_BCM5400:        return "5400";
15380         case TG3_PHY_ID_BCM5401:        return "5401";
15381         case TG3_PHY_ID_BCM5411:        return "5411";
15382         case TG3_PHY_ID_BCM5701:        return "5701";
15383         case TG3_PHY_ID_BCM5703:        return "5703";
15384         case TG3_PHY_ID_BCM5704:        return "5704";
15385         case TG3_PHY_ID_BCM5705:        return "5705";
15386         case TG3_PHY_ID_BCM5750:        return "5750";
15387         case TG3_PHY_ID_BCM5752:        return "5752";
15388         case TG3_PHY_ID_BCM5714:        return "5714";
15389         case TG3_PHY_ID_BCM5780:        return "5780";
15390         case TG3_PHY_ID_BCM5755:        return "5755";
15391         case TG3_PHY_ID_BCM5787:        return "5787";
15392         case TG3_PHY_ID_BCM5784:        return "5784";
15393         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15394         case TG3_PHY_ID_BCM5906:        return "5906";
15395         case TG3_PHY_ID_BCM5761:        return "5761";
15396         case TG3_PHY_ID_BCM5718C:       return "5718C";
15397         case TG3_PHY_ID_BCM5718S:       return "5718S";
15398         case TG3_PHY_ID_BCM57765:       return "57765";
15399         case TG3_PHY_ID_BCM5719C:       return "5719C";
15400         case TG3_PHY_ID_BCM5720C:       return "5720C";
15401         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15402         case 0:                 return "serdes";
15403         default:                return "unknown";
15404         }
15405 }
15406
15407 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15408 {
15409         if (tg3_flag(tp, PCI_EXPRESS)) {
15410                 strcpy(str, "PCI Express");
15411                 return str;
15412         } else if (tg3_flag(tp, PCIX_MODE)) {
15413                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15414
15415                 strcpy(str, "PCIX:");
15416
15417                 if ((clock_ctrl == 7) ||
15418                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15419                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15420                         strcat(str, "133MHz");
15421                 else if (clock_ctrl == 0)
15422                         strcat(str, "33MHz");
15423                 else if (clock_ctrl == 2)
15424                         strcat(str, "50MHz");
15425                 else if (clock_ctrl == 4)
15426                         strcat(str, "66MHz");
15427                 else if (clock_ctrl == 6)
15428                         strcat(str, "100MHz");
15429         } else {
15430                 strcpy(str, "PCI:");
15431                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15432                         strcat(str, "66MHz");
15433                 else
15434                         strcat(str, "33MHz");
15435         }
15436         if (tg3_flag(tp, PCI_32BIT))
15437                 strcat(str, ":32-bit");
15438         else
15439                 strcat(str, ":64-bit");
15440         return str;
15441 }
15442
15443 static void __devinit tg3_init_coal(struct tg3 *tp)
15444 {
15445         struct ethtool_coalesce *ec = &tp->coal;
15446
15447         memset(ec, 0, sizeof(*ec));
15448         ec->cmd = ETHTOOL_GCOALESCE;
15449         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15450         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15451         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15452         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15453         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15454         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15455         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15456         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15457         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15458
15459         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15460                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15461                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15462                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15463                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15464                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15465         }
15466
15467         if (tg3_flag(tp, 5705_PLUS)) {
15468                 ec->rx_coalesce_usecs_irq = 0;
15469                 ec->tx_coalesce_usecs_irq = 0;
15470                 ec->stats_block_coalesce_usecs = 0;
15471         }
15472 }
15473
15474 static int __devinit tg3_init_one(struct pci_dev *pdev,
15475                                   const struct pci_device_id *ent)
15476 {
15477         struct net_device *dev;
15478         struct tg3 *tp;
15479         int i, err, pm_cap;
15480         u32 sndmbx, rcvmbx, intmbx;
15481         char str[40];
15482         u64 dma_mask, persist_dma_mask;
15483         netdev_features_t features = 0;
15484
15485         printk_once(KERN_INFO "%s\n", version);
15486
15487         err = pci_enable_device(pdev);
15488         if (err) {
15489                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15490                 return err;
15491         }
15492
15493         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15494         if (err) {
15495                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15496                 goto err_out_disable_pdev;
15497         }
15498
15499         pci_set_master(pdev);
15500
15501         /* Find power-management capability. */
15502         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15503         if (pm_cap == 0) {
15504                 dev_err(&pdev->dev,
15505                         "Cannot find Power Management capability, aborting\n");
15506                 err = -EIO;
15507                 goto err_out_free_res;
15508         }
15509
15510         err = pci_set_power_state(pdev, PCI_D0);
15511         if (err) {
15512                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15513                 goto err_out_free_res;
15514         }
15515
15516         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15517         if (!dev) {
15518                 err = -ENOMEM;
15519                 goto err_out_power_down;
15520         }
15521
15522         SET_NETDEV_DEV(dev, &pdev->dev);
15523
15524         tp = netdev_priv(dev);
15525         tp->pdev = pdev;
15526         tp->dev = dev;
15527         tp->pm_cap = pm_cap;
15528         tp->rx_mode = TG3_DEF_RX_MODE;
15529         tp->tx_mode = TG3_DEF_TX_MODE;
15530
15531         if (tg3_debug > 0)
15532                 tp->msg_enable = tg3_debug;
15533         else
15534                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15535
15536         /* The word/byte swap controls here control register access byte
15537          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15538          * setting below.
15539          */
15540         tp->misc_host_ctrl =
15541                 MISC_HOST_CTRL_MASK_PCI_INT |
15542                 MISC_HOST_CTRL_WORD_SWAP |
15543                 MISC_HOST_CTRL_INDIR_ACCESS |
15544                 MISC_HOST_CTRL_PCISTATE_RW;
15545
15546         /* The NONFRM (non-frame) byte/word swap controls take effect
15547          * on descriptor entries, anything which isn't packet data.
15548          *
15549          * The StrongARM chips on the board (one for tx, one for rx)
15550          * are running in big-endian mode.
15551          */
15552         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15553                         GRC_MODE_WSWAP_NONFRM_DATA);
15554 #ifdef __BIG_ENDIAN
15555         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15556 #endif
15557         spin_lock_init(&tp->lock);
15558         spin_lock_init(&tp->indirect_lock);
15559         INIT_WORK(&tp->reset_task, tg3_reset_task);
15560
15561         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15562         if (!tp->regs) {
15563                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15564                 err = -ENOMEM;
15565                 goto err_out_free_dev;
15566         }
15567
15568         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15569             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15570             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15571             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15572             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15573             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15574             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15575             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15576                 tg3_flag_set(tp, ENABLE_APE);
15577                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15578                 if (!tp->aperegs) {
15579                         dev_err(&pdev->dev,
15580                                 "Cannot map APE registers, aborting\n");
15581                         err = -ENOMEM;
15582                         goto err_out_iounmap;
15583                 }
15584         }
15585
15586         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15587         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15588
15589         dev->ethtool_ops = &tg3_ethtool_ops;
15590         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15591         dev->netdev_ops = &tg3_netdev_ops;
15592         dev->irq = pdev->irq;
15593
15594         err = tg3_get_invariants(tp);
15595         if (err) {
15596                 dev_err(&pdev->dev,
15597                         "Problem fetching invariants of chip, aborting\n");
15598                 goto err_out_apeunmap;
15599         }
15600
15601         /* The EPB bridge inside 5714, 5715, and 5780 and any
15602          * device behind the EPB cannot support DMA addresses > 40-bit.
15603          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15604          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15605          * do DMA address check in tg3_start_xmit().
15606          */
15607         if (tg3_flag(tp, IS_5788))
15608                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15609         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15610                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15611 #ifdef CONFIG_HIGHMEM
15612                 dma_mask = DMA_BIT_MASK(64);
15613 #endif
15614         } else
15615                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15616
15617         /* Configure DMA attributes. */
15618         if (dma_mask > DMA_BIT_MASK(32)) {
15619                 err = pci_set_dma_mask(pdev, dma_mask);
15620                 if (!err) {
15621                         features |= NETIF_F_HIGHDMA;
15622                         err = pci_set_consistent_dma_mask(pdev,
15623                                                           persist_dma_mask);
15624                         if (err < 0) {
15625                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15626                                         "DMA for consistent allocations\n");
15627                                 goto err_out_apeunmap;
15628                         }
15629                 }
15630         }
15631         if (err || dma_mask == DMA_BIT_MASK(32)) {
15632                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15633                 if (err) {
15634                         dev_err(&pdev->dev,
15635                                 "No usable DMA configuration, aborting\n");
15636                         goto err_out_apeunmap;
15637                 }
15638         }
15639
15640         tg3_init_bufmgr_config(tp);
15641
15642         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15643
15644         /* 5700 B0 chips do not support checksumming correctly due
15645          * to hardware bugs.
15646          */
15647         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15648                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15649
15650                 if (tg3_flag(tp, 5755_PLUS))
15651                         features |= NETIF_F_IPV6_CSUM;
15652         }
15653
15654         /* TSO is on by default on chips that support hardware TSO.
15655          * Firmware TSO on older chips gives lower performance, so it
15656          * is off by default, but can be enabled using ethtool.
15657          */
15658         if ((tg3_flag(tp, HW_TSO_1) ||
15659              tg3_flag(tp, HW_TSO_2) ||
15660              tg3_flag(tp, HW_TSO_3)) &&
15661             (features & NETIF_F_IP_CSUM))
15662                 features |= NETIF_F_TSO;
15663         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15664                 if (features & NETIF_F_IPV6_CSUM)
15665                         features |= NETIF_F_TSO6;
15666                 if (tg3_flag(tp, HW_TSO_3) ||
15667                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15668                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15669                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15670                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15671                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15672                         features |= NETIF_F_TSO_ECN;
15673         }
15674
15675         dev->features |= features;
15676         dev->vlan_features |= features;
15677
15678         /*
15679          * Add loopback capability only for a subset of devices that support
15680          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15681          * loopback for the remaining devices.
15682          */
15683         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15684             !tg3_flag(tp, CPMU_PRESENT))
15685                 /* Add the loopback capability */
15686                 features |= NETIF_F_LOOPBACK;
15687
15688         dev->hw_features |= features;
15689
15690         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15691             !tg3_flag(tp, TSO_CAPABLE) &&
15692             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15693                 tg3_flag_set(tp, MAX_RXPEND_64);
15694                 tp->rx_pending = 63;
15695         }
15696
15697         err = tg3_get_device_address(tp);
15698         if (err) {
15699                 dev_err(&pdev->dev,
15700                         "Could not obtain valid ethernet address, aborting\n");
15701                 goto err_out_apeunmap;
15702         }
15703
15704         /*
15705          * Reset chip in case UNDI or EFI driver did not shutdown
15706          * DMA self test will enable WDMAC and we'll see (spurious)
15707          * pending DMA on the PCI bus at that point.
15708          */
15709         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15710             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15711                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15712                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15713         }
15714
15715         err = tg3_test_dma(tp);
15716         if (err) {
15717                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15718                 goto err_out_apeunmap;
15719         }
15720
15721         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15722         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15723         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15724         for (i = 0; i < tp->irq_max; i++) {
15725                 struct tg3_napi *tnapi = &tp->napi[i];
15726
15727                 tnapi->tp = tp;
15728                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15729
15730                 tnapi->int_mbox = intmbx;
15731                 if (i <= 4)
15732                         intmbx += 0x8;
15733                 else
15734                         intmbx += 0x4;
15735
15736                 tnapi->consmbox = rcvmbx;
15737                 tnapi->prodmbox = sndmbx;
15738
15739                 if (i)
15740                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15741                 else
15742                         tnapi->coal_now = HOSTCC_MODE_NOW;
15743
15744                 if (!tg3_flag(tp, SUPPORT_MSIX))
15745                         break;
15746
15747                 /*
15748                  * If we support MSIX, we'll be using RSS.  If we're using
15749                  * RSS, the first vector only handles link interrupts and the
15750                  * remaining vectors handle rx and tx interrupts.  Reuse the
15751                  * mailbox values for the next iteration.  The values we setup
15752                  * above are still useful for the single vectored mode.
15753                  */
15754                 if (!i)
15755                         continue;
15756
15757                 rcvmbx += 0x8;
15758
15759                 if (sndmbx & 0x4)
15760                         sndmbx -= 0x4;
15761                 else
15762                         sndmbx += 0xc;
15763         }
15764
15765         tg3_init_coal(tp);
15766
15767         pci_set_drvdata(pdev, dev);
15768
15769         if (tg3_flag(tp, 5717_PLUS)) {
15770                 /* Resume a low-power mode */
15771                 tg3_frob_aux_power(tp, false);
15772         }
15773
15774         tg3_timer_init(tp);
15775
15776         err = register_netdev(dev);
15777         if (err) {
15778                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15779                 goto err_out_apeunmap;
15780         }
15781
15782         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15783                     tp->board_part_number,
15784                     tp->pci_chip_rev_id,
15785                     tg3_bus_string(tp, str),
15786                     dev->dev_addr);
15787
15788         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15789                 struct phy_device *phydev;
15790                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15791                 netdev_info(dev,
15792                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15793                             phydev->drv->name, dev_name(&phydev->dev));
15794         } else {
15795                 char *ethtype;
15796
15797                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15798                         ethtype = "10/100Base-TX";
15799                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15800                         ethtype = "1000Base-SX";
15801                 else
15802                         ethtype = "10/100/1000Base-T";
15803
15804                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15805                             "(WireSpeed[%d], EEE[%d])\n",
15806                             tg3_phy_string(tp), ethtype,
15807                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15808                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15809         }
15810
15811         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15812                     (dev->features & NETIF_F_RXCSUM) != 0,
15813                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15814                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15815                     tg3_flag(tp, ENABLE_ASF) != 0,
15816                     tg3_flag(tp, TSO_CAPABLE) != 0);
15817         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15818                     tp->dma_rwctrl,
15819                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15820                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15821
15822         pci_save_state(pdev);
15823
15824         return 0;
15825
15826 err_out_apeunmap:
15827         if (tp->aperegs) {
15828                 iounmap(tp->aperegs);
15829                 tp->aperegs = NULL;
15830         }
15831
15832 err_out_iounmap:
15833         if (tp->regs) {
15834                 iounmap(tp->regs);
15835                 tp->regs = NULL;
15836         }
15837
15838 err_out_free_dev:
15839         free_netdev(dev);
15840
15841 err_out_power_down:
15842         pci_set_power_state(pdev, PCI_D3hot);
15843
15844 err_out_free_res:
15845         pci_release_regions(pdev);
15846
15847 err_out_disable_pdev:
15848         pci_disable_device(pdev);
15849         pci_set_drvdata(pdev, NULL);
15850         return err;
15851 }
15852
15853 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15854 {
15855         struct net_device *dev = pci_get_drvdata(pdev);
15856
15857         if (dev) {
15858                 struct tg3 *tp = netdev_priv(dev);
15859
15860                 if (tp->fw)
15861                         release_firmware(tp->fw);
15862
15863                 tg3_reset_task_cancel(tp);
15864
15865                 if (tg3_flag(tp, USE_PHYLIB)) {
15866                         tg3_phy_fini(tp);
15867                         tg3_mdio_fini(tp);
15868                 }
15869
15870                 unregister_netdev(dev);
15871                 if (tp->aperegs) {
15872                         iounmap(tp->aperegs);
15873                         tp->aperegs = NULL;
15874                 }
15875                 if (tp->regs) {
15876                         iounmap(tp->regs);
15877                         tp->regs = NULL;
15878                 }
15879                 free_netdev(dev);
15880                 pci_release_regions(pdev);
15881                 pci_disable_device(pdev);
15882                 pci_set_drvdata(pdev, NULL);
15883         }
15884 }
15885
15886 #ifdef CONFIG_PM_SLEEP
15887 static int tg3_suspend(struct device *device)
15888 {
15889         struct pci_dev *pdev = to_pci_dev(device);
15890         struct net_device *dev = pci_get_drvdata(pdev);
15891         struct tg3 *tp = netdev_priv(dev);
15892         int err;
15893
15894         if (!netif_running(dev))
15895                 return 0;
15896
15897         tg3_reset_task_cancel(tp);
15898         tg3_phy_stop(tp);
15899         tg3_netif_stop(tp);
15900
15901         tg3_timer_stop(tp);
15902
15903         tg3_full_lock(tp, 1);
15904         tg3_disable_ints(tp);
15905         tg3_full_unlock(tp);
15906
15907         netif_device_detach(dev);
15908
15909         tg3_full_lock(tp, 0);
15910         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15911         tg3_flag_clear(tp, INIT_COMPLETE);
15912         tg3_full_unlock(tp);
15913
15914         err = tg3_power_down_prepare(tp);
15915         if (err) {
15916                 int err2;
15917
15918                 tg3_full_lock(tp, 0);
15919
15920                 tg3_flag_set(tp, INIT_COMPLETE);
15921                 err2 = tg3_restart_hw(tp, 1);
15922                 if (err2)
15923                         goto out;
15924
15925                 tg3_timer_start(tp);
15926
15927                 netif_device_attach(dev);
15928                 tg3_netif_start(tp);
15929
15930 out:
15931                 tg3_full_unlock(tp);
15932
15933                 if (!err2)
15934                         tg3_phy_start(tp);
15935         }
15936
15937         return err;
15938 }
15939
15940 static int tg3_resume(struct device *device)
15941 {
15942         struct pci_dev *pdev = to_pci_dev(device);
15943         struct net_device *dev = pci_get_drvdata(pdev);
15944         struct tg3 *tp = netdev_priv(dev);
15945         int err;
15946
15947         if (!netif_running(dev))
15948                 return 0;
15949
15950         netif_device_attach(dev);
15951
15952         tg3_full_lock(tp, 0);
15953
15954         tg3_flag_set(tp, INIT_COMPLETE);
15955         err = tg3_restart_hw(tp, 1);
15956         if (err)
15957                 goto out;
15958
15959         tg3_timer_start(tp);
15960
15961         tg3_netif_start(tp);
15962
15963 out:
15964         tg3_full_unlock(tp);
15965
15966         if (!err)
15967                 tg3_phy_start(tp);
15968
15969         return err;
15970 }
15971
15972 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15973 #define TG3_PM_OPS (&tg3_pm_ops)
15974
15975 #else
15976
15977 #define TG3_PM_OPS NULL
15978
15979 #endif /* CONFIG_PM_SLEEP */
15980
15981 /**
15982  * tg3_io_error_detected - called when PCI error is detected
15983  * @pdev: Pointer to PCI device
15984  * @state: The current pci connection state
15985  *
15986  * This function is called after a PCI bus error affecting
15987  * this device has been detected.
15988  */
15989 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15990                                               pci_channel_state_t state)
15991 {
15992         struct net_device *netdev = pci_get_drvdata(pdev);
15993         struct tg3 *tp = netdev_priv(netdev);
15994         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15995
15996         netdev_info(netdev, "PCI I/O error detected\n");
15997
15998         rtnl_lock();
15999
16000         if (!netif_running(netdev))
16001                 goto done;
16002
16003         tg3_phy_stop(tp);
16004
16005         tg3_netif_stop(tp);
16006
16007         tg3_timer_stop(tp);
16008
16009         /* Want to make sure that the reset task doesn't run */
16010         tg3_reset_task_cancel(tp);
16011
16012         netif_device_detach(netdev);
16013
16014         /* Clean up software state, even if MMIO is blocked */
16015         tg3_full_lock(tp, 0);
16016         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16017         tg3_full_unlock(tp);
16018
16019 done:
16020         if (state == pci_channel_io_perm_failure)
16021                 err = PCI_ERS_RESULT_DISCONNECT;
16022         else
16023                 pci_disable_device(pdev);
16024
16025         rtnl_unlock();
16026
16027         return err;
16028 }
16029
16030 /**
16031  * tg3_io_slot_reset - called after the pci bus has been reset.
16032  * @pdev: Pointer to PCI device
16033  *
16034  * Restart the card from scratch, as if from a cold-boot.
16035  * At this point, the card has exprienced a hard reset,
16036  * followed by fixups by BIOS, and has its config space
16037  * set up identically to what it was at cold boot.
16038  */
16039 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16040 {
16041         struct net_device *netdev = pci_get_drvdata(pdev);
16042         struct tg3 *tp = netdev_priv(netdev);
16043         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16044         int err;
16045
16046         rtnl_lock();
16047
16048         if (pci_enable_device(pdev)) {
16049                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16050                 goto done;
16051         }
16052
16053         pci_set_master(pdev);
16054         pci_restore_state(pdev);
16055         pci_save_state(pdev);
16056
16057         if (!netif_running(netdev)) {
16058                 rc = PCI_ERS_RESULT_RECOVERED;
16059                 goto done;
16060         }
16061
16062         err = tg3_power_up(tp);
16063         if (err)
16064                 goto done;
16065
16066         rc = PCI_ERS_RESULT_RECOVERED;
16067
16068 done:
16069         rtnl_unlock();
16070
16071         return rc;
16072 }
16073
16074 /**
16075  * tg3_io_resume - called when traffic can start flowing again.
16076  * @pdev: Pointer to PCI device
16077  *
16078  * This callback is called when the error recovery driver tells
16079  * us that its OK to resume normal operation.
16080  */
16081 static void tg3_io_resume(struct pci_dev *pdev)
16082 {
16083         struct net_device *netdev = pci_get_drvdata(pdev);
16084         struct tg3 *tp = netdev_priv(netdev);
16085         int err;
16086
16087         rtnl_lock();
16088
16089         if (!netif_running(netdev))
16090                 goto done;
16091
16092         tg3_full_lock(tp, 0);
16093         tg3_flag_set(tp, INIT_COMPLETE);
16094         err = tg3_restart_hw(tp, 1);
16095         tg3_full_unlock(tp);
16096         if (err) {
16097                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16098                 goto done;
16099         }
16100
16101         netif_device_attach(netdev);
16102
16103         tg3_timer_start(tp);
16104
16105         tg3_netif_start(tp);
16106
16107         tg3_phy_start(tp);
16108
16109 done:
16110         rtnl_unlock();
16111 }
16112
16113 static struct pci_error_handlers tg3_err_handler = {
16114         .error_detected = tg3_io_error_detected,
16115         .slot_reset     = tg3_io_slot_reset,
16116         .resume         = tg3_io_resume
16117 };
16118
16119 static struct pci_driver tg3_driver = {
16120         .name           = DRV_MODULE_NAME,
16121         .id_table       = tg3_pci_tbl,
16122         .probe          = tg3_init_one,
16123         .remove         = __devexit_p(tg3_remove_one),
16124         .err_handler    = &tg3_err_handler,
16125         .driver.pm      = TG3_PM_OPS,
16126 };
16127
16128 static int __init tg3_init(void)
16129 {
16130         return pci_register_driver(&tg3_driver);
16131 }
16132
16133 static void __exit tg3_cleanup(void)
16134 {
16135         pci_unregister_driver(&tg3_driver);
16136 }
16137
16138 module_init(tg3_init);
16139 module_exit(tg3_cleanup);