]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/tg3.c
09f2c11db2479ac944bf27184148b6a56cc3203d
[mv-sheeva.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
46
47 #include <net/checksum.h>
48 #include <net/ip.h>
49
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     118
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "April 22, 2011"
95
96 #define TG3_DEF_MAC_MODE        0
97 #define TG3_DEF_RX_MODE         0
98 #define TG3_DEF_TX_MODE         0
99 #define TG3_DEF_MSG_ENABLE        \
100         (NETIF_MSG_DRV          | \
101          NETIF_MSG_PROBE        | \
102          NETIF_MSG_LINK         | \
103          NETIF_MSG_TIMER        | \
104          NETIF_MSG_IFDOWN       | \
105          NETIF_MSG_IFUP         | \
106          NETIF_MSG_RX_ERR       | \
107          NETIF_MSG_TX_ERR)
108
109 /* length of time before we decide the hardware is borked,
110  * and dev->tx_timeout() should be called to fix the problem
111  */
112
113 #define TG3_TX_TIMEOUT                  (5 * HZ)
114
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU                     60
117 #define TG3_MAX_MTU(tp) \
118         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
119
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121  * You can't change the ring sizes, but you can change where you place
122  * them in the NIC onboard memory.
123  */
124 #define TG3_RX_STD_RING_SIZE(tp) \
125         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING         200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
132 #define TG3_RSS_INDIR_TBL_SIZE          128
133
134 /* Do not place this n-ring entries value into the tp struct itself,
135  * we really want to expose these constants to GCC so that modulo et
136  * al.  operations are done with shifts and masks instead of with
137  * hw multiply/modulo instructions.  Another solution would be to
138  * replace things like '% foo' with '& (foo - 1)'.
139  */
140
141 #define TG3_TX_RING_SIZE                512
142 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
143
144 #define TG3_RX_STD_RING_BYTES(tp) \
145         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
151                                  TG3_TX_RING_SIZE)
152 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
153
154 #define TG3_DMA_BYTE_ENAB               64
155
156 #define TG3_RX_STD_DMA_SZ               1536
157 #define TG3_RX_JMB_DMA_SZ               9046
158
159 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
160
161 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
163
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
166
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
169
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171  * that are at least dword aligned when used in PCIX mode.  The driver
172  * works around this bug by double copying the packet.  This workaround
173  * is built into the normal double copy length check for efficiency.
174  *
175  * However, the double copy is only necessary on those architectures
176  * where unaligned memory accesses are inefficient.  For those architectures
177  * where unaligned memory accesses incur little penalty, we can reintegrate
178  * the 5701 in the normal rx path.  Doing so saves a device structure
179  * dereference by hardcoding the double copy threshold in place.
180  */
181 #define TG3_RX_COPY_THRESHOLD           256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
184 #else
185         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
186 #endif
187
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
190
191 #define TG3_RAW_IP_ALIGN 2
192
193 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
194
195 #define FIRMWARE_TG3            "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
198
199 static char version[] __devinitdata =
200         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
201
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
209
210 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
213
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
295         {}
296 };
297
298 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
299
300 static const struct {
301         const char string[ETH_GSTRING_LEN];
302 } ethtool_stats_keys[] = {
303         { "rx_octets" },
304         { "rx_fragments" },
305         { "rx_ucast_packets" },
306         { "rx_mcast_packets" },
307         { "rx_bcast_packets" },
308         { "rx_fcs_errors" },
309         { "rx_align_errors" },
310         { "rx_xon_pause_rcvd" },
311         { "rx_xoff_pause_rcvd" },
312         { "rx_mac_ctrl_rcvd" },
313         { "rx_xoff_entered" },
314         { "rx_frame_too_long_errors" },
315         { "rx_jabbers" },
316         { "rx_undersize_packets" },
317         { "rx_in_length_errors" },
318         { "rx_out_length_errors" },
319         { "rx_64_or_less_octet_packets" },
320         { "rx_65_to_127_octet_packets" },
321         { "rx_128_to_255_octet_packets" },
322         { "rx_256_to_511_octet_packets" },
323         { "rx_512_to_1023_octet_packets" },
324         { "rx_1024_to_1522_octet_packets" },
325         { "rx_1523_to_2047_octet_packets" },
326         { "rx_2048_to_4095_octet_packets" },
327         { "rx_4096_to_8191_octet_packets" },
328         { "rx_8192_to_9022_octet_packets" },
329
330         { "tx_octets" },
331         { "tx_collisions" },
332
333         { "tx_xon_sent" },
334         { "tx_xoff_sent" },
335         { "tx_flow_control" },
336         { "tx_mac_errors" },
337         { "tx_single_collisions" },
338         { "tx_mult_collisions" },
339         { "tx_deferred" },
340         { "tx_excessive_collisions" },
341         { "tx_late_collisions" },
342         { "tx_collide_2times" },
343         { "tx_collide_3times" },
344         { "tx_collide_4times" },
345         { "tx_collide_5times" },
346         { "tx_collide_6times" },
347         { "tx_collide_7times" },
348         { "tx_collide_8times" },
349         { "tx_collide_9times" },
350         { "tx_collide_10times" },
351         { "tx_collide_11times" },
352         { "tx_collide_12times" },
353         { "tx_collide_13times" },
354         { "tx_collide_14times" },
355         { "tx_collide_15times" },
356         { "tx_ucast_packets" },
357         { "tx_mcast_packets" },
358         { "tx_bcast_packets" },
359         { "tx_carrier_sense_errors" },
360         { "tx_discards" },
361         { "tx_errors" },
362
363         { "dma_writeq_full" },
364         { "dma_write_prioq_full" },
365         { "rxbds_empty" },
366         { "rx_discards" },
367         { "mbuf_lwm_thresh_hit" },
368         { "rx_errors" },
369         { "rx_threshold_hit" },
370
371         { "dma_readq_full" },
372         { "dma_read_prioq_full" },
373         { "tx_comp_queue_full" },
374
375         { "ring_set_send_prod_index" },
376         { "ring_status_update" },
377         { "nic_irqs" },
378         { "nic_avoided_irqs" },
379         { "nic_tx_threshold_hit" }
380 };
381
382 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
383
384
385 static const struct {
386         const char string[ETH_GSTRING_LEN];
387 } ethtool_test_keys[] = {
388         { "nvram test     (online) " },
389         { "link test      (online) " },
390         { "register test  (offline)" },
391         { "memory test    (offline)" },
392         { "loopback test  (offline)" },
393         { "interrupt test (offline)" },
394 };
395
396 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
397
398
399 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
400 {
401         writel(val, tp->regs + off);
402 }
403
404 static u32 tg3_read32(struct tg3 *tp, u32 off)
405 {
406         return readl(tp->regs + off);
407 }
408
409 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
410 {
411         writel(val, tp->aperegs + off);
412 }
413
414 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
415 {
416         return readl(tp->aperegs + off);
417 }
418
419 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
420 {
421         unsigned long flags;
422
423         spin_lock_irqsave(&tp->indirect_lock, flags);
424         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
425         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
426         spin_unlock_irqrestore(&tp->indirect_lock, flags);
427 }
428
429 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
430 {
431         writel(val, tp->regs + off);
432         readl(tp->regs + off);
433 }
434
435 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
436 {
437         unsigned long flags;
438         u32 val;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444         return val;
445 }
446
447 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
448 {
449         unsigned long flags;
450
451         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
452                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
453                                        TG3_64BIT_REG_LOW, val);
454                 return;
455         }
456         if (off == TG3_RX_STD_PROD_IDX_REG) {
457                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
458                                        TG3_64BIT_REG_LOW, val);
459                 return;
460         }
461
462         spin_lock_irqsave(&tp->indirect_lock, flags);
463         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
464         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
465         spin_unlock_irqrestore(&tp->indirect_lock, flags);
466
467         /* In indirect mode when disabling interrupts, we also need
468          * to clear the interrupt bit in the GRC local ctrl register.
469          */
470         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
471             (val == 0x1)) {
472                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
473                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
474         }
475 }
476
477 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
478 {
479         unsigned long flags;
480         u32 val;
481
482         spin_lock_irqsave(&tp->indirect_lock, flags);
483         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
484         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
485         spin_unlock_irqrestore(&tp->indirect_lock, flags);
486         return val;
487 }
488
489 /* usec_wait specifies the wait time in usec when writing to certain registers
490  * where it is unsafe to read back the register without some delay.
491  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
492  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
493  */
494 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
495 {
496         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
497                 /* Non-posted methods */
498                 tp->write32(tp, off, val);
499         else {
500                 /* Posted method */
501                 tg3_write32(tp, off, val);
502                 if (usec_wait)
503                         udelay(usec_wait);
504                 tp->read32(tp, off);
505         }
506         /* Wait again after the read for the posted method to guarantee that
507          * the wait time is met.
508          */
509         if (usec_wait)
510                 udelay(usec_wait);
511 }
512
513 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
514 {
515         tp->write32_mbox(tp, off, val);
516         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
517                 tp->read32_mbox(tp, off);
518 }
519
520 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
521 {
522         void __iomem *mbox = tp->regs + off;
523         writel(val, mbox);
524         if (tg3_flag(tp, TXD_MBOX_HWBUG))
525                 writel(val, mbox);
526         if (tg3_flag(tp, MBOX_WRITE_REORDER))
527                 readl(mbox);
528 }
529
530 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
531 {
532         return readl(tp->regs + off + GRCMBOX_BASE);
533 }
534
535 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
536 {
537         writel(val, tp->regs + off + GRCMBOX_BASE);
538 }
539
540 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
541 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
542 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
543 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
544 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
545
546 #define tw32(reg, val)                  tp->write32(tp, reg, val)
547 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
548 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
549 #define tr32(reg)                       tp->read32(tp, reg)
550
551 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
552 {
553         unsigned long flags;
554
555         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
556             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
557                 return;
558
559         spin_lock_irqsave(&tp->indirect_lock, flags);
560         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
561                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
562                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
563
564                 /* Always leave this as zero. */
565                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
566         } else {
567                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
568                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
569
570                 /* Always leave this as zero. */
571                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
572         }
573         spin_unlock_irqrestore(&tp->indirect_lock, flags);
574 }
575
576 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
577 {
578         unsigned long flags;
579
580         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
581             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
582                 *val = 0;
583                 return;
584         }
585
586         spin_lock_irqsave(&tp->indirect_lock, flags);
587         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
588                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
589                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
590
591                 /* Always leave this as zero. */
592                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
593         } else {
594                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
595                 *val = tr32(TG3PCI_MEM_WIN_DATA);
596
597                 /* Always leave this as zero. */
598                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
599         }
600         spin_unlock_irqrestore(&tp->indirect_lock, flags);
601 }
602
603 static void tg3_ape_lock_init(struct tg3 *tp)
604 {
605         int i;
606         u32 regbase;
607
608         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
609                 regbase = TG3_APE_LOCK_GRANT;
610         else
611                 regbase = TG3_APE_PER_LOCK_GRANT;
612
613         /* Make sure the driver hasn't any stale locks. */
614         for (i = 0; i < 8; i++)
615                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
616 }
617
618 static int tg3_ape_lock(struct tg3 *tp, int locknum)
619 {
620         int i, off;
621         int ret = 0;
622         u32 status, req, gnt;
623
624         if (!tg3_flag(tp, ENABLE_APE))
625                 return 0;
626
627         switch (locknum) {
628         case TG3_APE_LOCK_GRC:
629         case TG3_APE_LOCK_MEM:
630                 break;
631         default:
632                 return -EINVAL;
633         }
634
635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
636                 req = TG3_APE_LOCK_REQ;
637                 gnt = TG3_APE_LOCK_GRANT;
638         } else {
639                 req = TG3_APE_PER_LOCK_REQ;
640                 gnt = TG3_APE_PER_LOCK_GRANT;
641         }
642
643         off = 4 * locknum;
644
645         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
646
647         /* Wait for up to 1 millisecond to acquire lock. */
648         for (i = 0; i < 100; i++) {
649                 status = tg3_ape_read32(tp, gnt + off);
650                 if (status == APE_LOCK_GRANT_DRIVER)
651                         break;
652                 udelay(10);
653         }
654
655         if (status != APE_LOCK_GRANT_DRIVER) {
656                 /* Revoke the lock request. */
657                 tg3_ape_write32(tp, gnt + off,
658                                 APE_LOCK_GRANT_DRIVER);
659
660                 ret = -EBUSY;
661         }
662
663         return ret;
664 }
665
666 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
667 {
668         u32 gnt;
669
670         if (!tg3_flag(tp, ENABLE_APE))
671                 return;
672
673         switch (locknum) {
674         case TG3_APE_LOCK_GRC:
675         case TG3_APE_LOCK_MEM:
676                 break;
677         default:
678                 return;
679         }
680
681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
682                 gnt = TG3_APE_LOCK_GRANT;
683         else
684                 gnt = TG3_APE_PER_LOCK_GRANT;
685
686         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
687 }
688
689 static void tg3_disable_ints(struct tg3 *tp)
690 {
691         int i;
692
693         tw32(TG3PCI_MISC_HOST_CTRL,
694              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
695         for (i = 0; i < tp->irq_max; i++)
696                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
697 }
698
699 static void tg3_enable_ints(struct tg3 *tp)
700 {
701         int i;
702
703         tp->irq_sync = 0;
704         wmb();
705
706         tw32(TG3PCI_MISC_HOST_CTRL,
707              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
708
709         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
710         for (i = 0; i < tp->irq_cnt; i++) {
711                 struct tg3_napi *tnapi = &tp->napi[i];
712
713                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
714                 if (tg3_flag(tp, 1SHOT_MSI))
715                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
716
717                 tp->coal_now |= tnapi->coal_now;
718         }
719
720         /* Force an initial interrupt */
721         if (!tg3_flag(tp, TAGGED_STATUS) &&
722             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
723                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
724         else
725                 tw32(HOSTCC_MODE, tp->coal_now);
726
727         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
728 }
729
730 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
731 {
732         struct tg3 *tp = tnapi->tp;
733         struct tg3_hw_status *sblk = tnapi->hw_status;
734         unsigned int work_exists = 0;
735
736         /* check for phy events */
737         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
738                 if (sblk->status & SD_STATUS_LINK_CHG)
739                         work_exists = 1;
740         }
741         /* check for RX/TX work to do */
742         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
743             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
744                 work_exists = 1;
745
746         return work_exists;
747 }
748
749 /* tg3_int_reenable
750  *  similar to tg3_enable_ints, but it accurately determines whether there
751  *  is new work pending and can return without flushing the PIO write
752  *  which reenables interrupts
753  */
754 static void tg3_int_reenable(struct tg3_napi *tnapi)
755 {
756         struct tg3 *tp = tnapi->tp;
757
758         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
759         mmiowb();
760
761         /* When doing tagged status, this work check is unnecessary.
762          * The last_tag we write above tells the chip which piece of
763          * work we've completed.
764          */
765         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
766                 tw32(HOSTCC_MODE, tp->coalesce_mode |
767                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
768 }
769
770 static void tg3_switch_clocks(struct tg3 *tp)
771 {
772         u32 clock_ctrl;
773         u32 orig_clock_ctrl;
774
775         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
776                 return;
777
778         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
779
780         orig_clock_ctrl = clock_ctrl;
781         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
782                        CLOCK_CTRL_CLKRUN_OENABLE |
783                        0x1f);
784         tp->pci_clock_ctrl = clock_ctrl;
785
786         if (tg3_flag(tp, 5705_PLUS)) {
787                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
788                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
789                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
790                 }
791         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
792                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
793                             clock_ctrl |
794                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
795                             40);
796                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
797                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
798                             40);
799         }
800         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
801 }
802
803 #define PHY_BUSY_LOOPS  5000
804
805 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
806 {
807         u32 frame_val;
808         unsigned int loops;
809         int ret;
810
811         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
812                 tw32_f(MAC_MI_MODE,
813                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
814                 udelay(80);
815         }
816
817         *val = 0x0;
818
819         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
820                       MI_COM_PHY_ADDR_MASK);
821         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
822                       MI_COM_REG_ADDR_MASK);
823         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
824
825         tw32_f(MAC_MI_COM, frame_val);
826
827         loops = PHY_BUSY_LOOPS;
828         while (loops != 0) {
829                 udelay(10);
830                 frame_val = tr32(MAC_MI_COM);
831
832                 if ((frame_val & MI_COM_BUSY) == 0) {
833                         udelay(5);
834                         frame_val = tr32(MAC_MI_COM);
835                         break;
836                 }
837                 loops -= 1;
838         }
839
840         ret = -EBUSY;
841         if (loops != 0) {
842                 *val = frame_val & MI_COM_DATA_MASK;
843                 ret = 0;
844         }
845
846         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
847                 tw32_f(MAC_MI_MODE, tp->mi_mode);
848                 udelay(80);
849         }
850
851         return ret;
852 }
853
854 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
855 {
856         u32 frame_val;
857         unsigned int loops;
858         int ret;
859
860         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
861             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
862                 return 0;
863
864         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
865                 tw32_f(MAC_MI_MODE,
866                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
867                 udelay(80);
868         }
869
870         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
871                       MI_COM_PHY_ADDR_MASK);
872         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
873                       MI_COM_REG_ADDR_MASK);
874         frame_val |= (val & MI_COM_DATA_MASK);
875         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
876
877         tw32_f(MAC_MI_COM, frame_val);
878
879         loops = PHY_BUSY_LOOPS;
880         while (loops != 0) {
881                 udelay(10);
882                 frame_val = tr32(MAC_MI_COM);
883                 if ((frame_val & MI_COM_BUSY) == 0) {
884                         udelay(5);
885                         frame_val = tr32(MAC_MI_COM);
886                         break;
887                 }
888                 loops -= 1;
889         }
890
891         ret = -EBUSY;
892         if (loops != 0)
893                 ret = 0;
894
895         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
896                 tw32_f(MAC_MI_MODE, tp->mi_mode);
897                 udelay(80);
898         }
899
900         return ret;
901 }
902
903 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
904 {
905         int err;
906
907         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
908         if (err)
909                 goto done;
910
911         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
912         if (err)
913                 goto done;
914
915         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
916                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
917         if (err)
918                 goto done;
919
920         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
921
922 done:
923         return err;
924 }
925
926 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
927 {
928         int err;
929
930         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
931         if (err)
932                 goto done;
933
934         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
935         if (err)
936                 goto done;
937
938         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
939                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
940         if (err)
941                 goto done;
942
943         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
944
945 done:
946         return err;
947 }
948
949 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
950 {
951         int err;
952
953         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
954         if (!err)
955                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
956
957         return err;
958 }
959
960 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
961 {
962         int err;
963
964         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
965         if (!err)
966                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
967
968         return err;
969 }
970
971 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
972 {
973         int err;
974
975         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
976                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
977                            MII_TG3_AUXCTL_SHDWSEL_MISC);
978         if (!err)
979                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
980
981         return err;
982 }
983
984 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
985 {
986         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
987                 set |= MII_TG3_AUXCTL_MISC_WREN;
988
989         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
990 }
991
992 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
993         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
994                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
995                              MII_TG3_AUXCTL_ACTL_TX_6DB)
996
997 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
998         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1000
1001 static int tg3_bmcr_reset(struct tg3 *tp)
1002 {
1003         u32 phy_control;
1004         int limit, err;
1005
1006         /* OK, reset it, and poll the BMCR_RESET bit until it
1007          * clears or we time out.
1008          */
1009         phy_control = BMCR_RESET;
1010         err = tg3_writephy(tp, MII_BMCR, phy_control);
1011         if (err != 0)
1012                 return -EBUSY;
1013
1014         limit = 5000;
1015         while (limit--) {
1016                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1017                 if (err != 0)
1018                         return -EBUSY;
1019
1020                 if ((phy_control & BMCR_RESET) == 0) {
1021                         udelay(40);
1022                         break;
1023                 }
1024                 udelay(10);
1025         }
1026         if (limit < 0)
1027                 return -EBUSY;
1028
1029         return 0;
1030 }
1031
1032 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1033 {
1034         struct tg3 *tp = bp->priv;
1035         u32 val;
1036
1037         spin_lock_bh(&tp->lock);
1038
1039         if (tg3_readphy(tp, reg, &val))
1040                 val = -EIO;
1041
1042         spin_unlock_bh(&tp->lock);
1043
1044         return val;
1045 }
1046
1047 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1048 {
1049         struct tg3 *tp = bp->priv;
1050         u32 ret = 0;
1051
1052         spin_lock_bh(&tp->lock);
1053
1054         if (tg3_writephy(tp, reg, val))
1055                 ret = -EIO;
1056
1057         spin_unlock_bh(&tp->lock);
1058
1059         return ret;
1060 }
1061
1062 static int tg3_mdio_reset(struct mii_bus *bp)
1063 {
1064         return 0;
1065 }
1066
1067 static void tg3_mdio_config_5785(struct tg3 *tp)
1068 {
1069         u32 val;
1070         struct phy_device *phydev;
1071
1072         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1073         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1074         case PHY_ID_BCM50610:
1075         case PHY_ID_BCM50610M:
1076                 val = MAC_PHYCFG2_50610_LED_MODES;
1077                 break;
1078         case PHY_ID_BCMAC131:
1079                 val = MAC_PHYCFG2_AC131_LED_MODES;
1080                 break;
1081         case PHY_ID_RTL8211C:
1082                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1083                 break;
1084         case PHY_ID_RTL8201E:
1085                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1086                 break;
1087         default:
1088                 return;
1089         }
1090
1091         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1092                 tw32(MAC_PHYCFG2, val);
1093
1094                 val = tr32(MAC_PHYCFG1);
1095                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1096                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1097                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1098                 tw32(MAC_PHYCFG1, val);
1099
1100                 return;
1101         }
1102
1103         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1104                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1105                        MAC_PHYCFG2_FMODE_MASK_MASK |
1106                        MAC_PHYCFG2_GMODE_MASK_MASK |
1107                        MAC_PHYCFG2_ACT_MASK_MASK   |
1108                        MAC_PHYCFG2_QUAL_MASK_MASK |
1109                        MAC_PHYCFG2_INBAND_ENABLE;
1110
1111         tw32(MAC_PHYCFG2, val);
1112
1113         val = tr32(MAC_PHYCFG1);
1114         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1115                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1116         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1117                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1118                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1119                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1120                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1121         }
1122         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1123                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1124         tw32(MAC_PHYCFG1, val);
1125
1126         val = tr32(MAC_EXT_RGMII_MODE);
1127         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1128                  MAC_RGMII_MODE_RX_QUALITY |
1129                  MAC_RGMII_MODE_RX_ACTIVITY |
1130                  MAC_RGMII_MODE_RX_ENG_DET |
1131                  MAC_RGMII_MODE_TX_ENABLE |
1132                  MAC_RGMII_MODE_TX_LOWPWR |
1133                  MAC_RGMII_MODE_TX_RESET);
1134         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1135                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1136                         val |= MAC_RGMII_MODE_RX_INT_B |
1137                                MAC_RGMII_MODE_RX_QUALITY |
1138                                MAC_RGMII_MODE_RX_ACTIVITY |
1139                                MAC_RGMII_MODE_RX_ENG_DET;
1140                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1141                         val |= MAC_RGMII_MODE_TX_ENABLE |
1142                                MAC_RGMII_MODE_TX_LOWPWR |
1143                                MAC_RGMII_MODE_TX_RESET;
1144         }
1145         tw32(MAC_EXT_RGMII_MODE, val);
1146 }
1147
1148 static void tg3_mdio_start(struct tg3 *tp)
1149 {
1150         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1151         tw32_f(MAC_MI_MODE, tp->mi_mode);
1152         udelay(80);
1153
1154         if (tg3_flag(tp, MDIOBUS_INITED) &&
1155             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1156                 tg3_mdio_config_5785(tp);
1157 }
1158
1159 static int tg3_mdio_init(struct tg3 *tp)
1160 {
1161         int i;
1162         u32 reg;
1163         struct phy_device *phydev;
1164
1165         if (tg3_flag(tp, 5717_PLUS)) {
1166                 u32 is_serdes;
1167
1168                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1169
1170                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1171                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1172                 else
1173                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1174                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1175                 if (is_serdes)
1176                         tp->phy_addr += 7;
1177         } else
1178                 tp->phy_addr = TG3_PHY_MII_ADDR;
1179
1180         tg3_mdio_start(tp);
1181
1182         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1183                 return 0;
1184
1185         tp->mdio_bus = mdiobus_alloc();
1186         if (tp->mdio_bus == NULL)
1187                 return -ENOMEM;
1188
1189         tp->mdio_bus->name     = "tg3 mdio bus";
1190         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1191                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1192         tp->mdio_bus->priv     = tp;
1193         tp->mdio_bus->parent   = &tp->pdev->dev;
1194         tp->mdio_bus->read     = &tg3_mdio_read;
1195         tp->mdio_bus->write    = &tg3_mdio_write;
1196         tp->mdio_bus->reset    = &tg3_mdio_reset;
1197         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1198         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1199
1200         for (i = 0; i < PHY_MAX_ADDR; i++)
1201                 tp->mdio_bus->irq[i] = PHY_POLL;
1202
1203         /* The bus registration will look for all the PHYs on the mdio bus.
1204          * Unfortunately, it does not ensure the PHY is powered up before
1205          * accessing the PHY ID registers.  A chip reset is the
1206          * quickest way to bring the device back to an operational state..
1207          */
1208         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1209                 tg3_bmcr_reset(tp);
1210
1211         i = mdiobus_register(tp->mdio_bus);
1212         if (i) {
1213                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1214                 mdiobus_free(tp->mdio_bus);
1215                 return i;
1216         }
1217
1218         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1219
1220         if (!phydev || !phydev->drv) {
1221                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1222                 mdiobus_unregister(tp->mdio_bus);
1223                 mdiobus_free(tp->mdio_bus);
1224                 return -ENODEV;
1225         }
1226
1227         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1228         case PHY_ID_BCM57780:
1229                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1230                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1231                 break;
1232         case PHY_ID_BCM50610:
1233         case PHY_ID_BCM50610M:
1234                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1235                                      PHY_BRCM_RX_REFCLK_UNUSED |
1236                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1237                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1238                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1239                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1240                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1241                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1242                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1243                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1244                 /* fallthru */
1245         case PHY_ID_RTL8211C:
1246                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1247                 break;
1248         case PHY_ID_RTL8201E:
1249         case PHY_ID_BCMAC131:
1250                 phydev->interface = PHY_INTERFACE_MODE_MII;
1251                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1252                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1253                 break;
1254         }
1255
1256         tg3_flag_set(tp, MDIOBUS_INITED);
1257
1258         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1259                 tg3_mdio_config_5785(tp);
1260
1261         return 0;
1262 }
1263
1264 static void tg3_mdio_fini(struct tg3 *tp)
1265 {
1266         if (tg3_flag(tp, MDIOBUS_INITED)) {
1267                 tg3_flag_clear(tp, MDIOBUS_INITED);
1268                 mdiobus_unregister(tp->mdio_bus);
1269                 mdiobus_free(tp->mdio_bus);
1270         }
1271 }
1272
1273 /* tp->lock is held. */
1274 static inline void tg3_generate_fw_event(struct tg3 *tp)
1275 {
1276         u32 val;
1277
1278         val = tr32(GRC_RX_CPU_EVENT);
1279         val |= GRC_RX_CPU_DRIVER_EVENT;
1280         tw32_f(GRC_RX_CPU_EVENT, val);
1281
1282         tp->last_event_jiffies = jiffies;
1283 }
1284
1285 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1286
1287 /* tp->lock is held. */
1288 static void tg3_wait_for_event_ack(struct tg3 *tp)
1289 {
1290         int i;
1291         unsigned int delay_cnt;
1292         long time_remain;
1293
1294         /* If enough time has passed, no wait is necessary. */
1295         time_remain = (long)(tp->last_event_jiffies + 1 +
1296                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1297                       (long)jiffies;
1298         if (time_remain < 0)
1299                 return;
1300
1301         /* Check if we can shorten the wait time. */
1302         delay_cnt = jiffies_to_usecs(time_remain);
1303         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1304                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1305         delay_cnt = (delay_cnt >> 3) + 1;
1306
1307         for (i = 0; i < delay_cnt; i++) {
1308                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1309                         break;
1310                 udelay(8);
1311         }
1312 }
1313
1314 /* tp->lock is held. */
1315 static void tg3_ump_link_report(struct tg3 *tp)
1316 {
1317         u32 reg;
1318         u32 val;
1319
1320         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1321                 return;
1322
1323         tg3_wait_for_event_ack(tp);
1324
1325         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1326
1327         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1328
1329         val = 0;
1330         if (!tg3_readphy(tp, MII_BMCR, &reg))
1331                 val = reg << 16;
1332         if (!tg3_readphy(tp, MII_BMSR, &reg))
1333                 val |= (reg & 0xffff);
1334         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1335
1336         val = 0;
1337         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1338                 val = reg << 16;
1339         if (!tg3_readphy(tp, MII_LPA, &reg))
1340                 val |= (reg & 0xffff);
1341         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1342
1343         val = 0;
1344         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1345                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1346                         val = reg << 16;
1347                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1348                         val |= (reg & 0xffff);
1349         }
1350         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1351
1352         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1353                 val = reg << 16;
1354         else
1355                 val = 0;
1356         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1357
1358         tg3_generate_fw_event(tp);
1359 }
1360
1361 static void tg3_link_report(struct tg3 *tp)
1362 {
1363         if (!netif_carrier_ok(tp->dev)) {
1364                 netif_info(tp, link, tp->dev, "Link is down\n");
1365                 tg3_ump_link_report(tp);
1366         } else if (netif_msg_link(tp)) {
1367                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1368                             (tp->link_config.active_speed == SPEED_1000 ?
1369                              1000 :
1370                              (tp->link_config.active_speed == SPEED_100 ?
1371                               100 : 10)),
1372                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1373                              "full" : "half"));
1374
1375                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1376                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1377                             "on" : "off",
1378                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1379                             "on" : "off");
1380
1381                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1382                         netdev_info(tp->dev, "EEE is %s\n",
1383                                     tp->setlpicnt ? "enabled" : "disabled");
1384
1385                 tg3_ump_link_report(tp);
1386         }
1387 }
1388
1389 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1390 {
1391         u16 miireg;
1392
1393         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1394                 miireg = ADVERTISE_PAUSE_CAP;
1395         else if (flow_ctrl & FLOW_CTRL_TX)
1396                 miireg = ADVERTISE_PAUSE_ASYM;
1397         else if (flow_ctrl & FLOW_CTRL_RX)
1398                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1399         else
1400                 miireg = 0;
1401
1402         return miireg;
1403 }
1404
1405 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1406 {
1407         u16 miireg;
1408
1409         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1410                 miireg = ADVERTISE_1000XPAUSE;
1411         else if (flow_ctrl & FLOW_CTRL_TX)
1412                 miireg = ADVERTISE_1000XPSE_ASYM;
1413         else if (flow_ctrl & FLOW_CTRL_RX)
1414                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1415         else
1416                 miireg = 0;
1417
1418         return miireg;
1419 }
1420
1421 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1422 {
1423         u8 cap = 0;
1424
1425         if (lcladv & ADVERTISE_1000XPAUSE) {
1426                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1427                         if (rmtadv & LPA_1000XPAUSE)
1428                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1429                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1430                                 cap = FLOW_CTRL_RX;
1431                 } else {
1432                         if (rmtadv & LPA_1000XPAUSE)
1433                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1434                 }
1435         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1436                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1437                         cap = FLOW_CTRL_TX;
1438         }
1439
1440         return cap;
1441 }
1442
1443 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1444 {
1445         u8 autoneg;
1446         u8 flowctrl = 0;
1447         u32 old_rx_mode = tp->rx_mode;
1448         u32 old_tx_mode = tp->tx_mode;
1449
1450         if (tg3_flag(tp, USE_PHYLIB))
1451                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1452         else
1453                 autoneg = tp->link_config.autoneg;
1454
1455         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1456                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1457                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1458                 else
1459                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1460         } else
1461                 flowctrl = tp->link_config.flowctrl;
1462
1463         tp->link_config.active_flowctrl = flowctrl;
1464
1465         if (flowctrl & FLOW_CTRL_RX)
1466                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1467         else
1468                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1469
1470         if (old_rx_mode != tp->rx_mode)
1471                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1472
1473         if (flowctrl & FLOW_CTRL_TX)
1474                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1475         else
1476                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1477
1478         if (old_tx_mode != tp->tx_mode)
1479                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1480 }
1481
1482 static void tg3_adjust_link(struct net_device *dev)
1483 {
1484         u8 oldflowctrl, linkmesg = 0;
1485         u32 mac_mode, lcl_adv, rmt_adv;
1486         struct tg3 *tp = netdev_priv(dev);
1487         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1488
1489         spin_lock_bh(&tp->lock);
1490
1491         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1492                                     MAC_MODE_HALF_DUPLEX);
1493
1494         oldflowctrl = tp->link_config.active_flowctrl;
1495
1496         if (phydev->link) {
1497                 lcl_adv = 0;
1498                 rmt_adv = 0;
1499
1500                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1501                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1502                 else if (phydev->speed == SPEED_1000 ||
1503                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1504                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1505                 else
1506                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1507
1508                 if (phydev->duplex == DUPLEX_HALF)
1509                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1510                 else {
1511                         lcl_adv = tg3_advert_flowctrl_1000T(
1512                                   tp->link_config.flowctrl);
1513
1514                         if (phydev->pause)
1515                                 rmt_adv = LPA_PAUSE_CAP;
1516                         if (phydev->asym_pause)
1517                                 rmt_adv |= LPA_PAUSE_ASYM;
1518                 }
1519
1520                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1521         } else
1522                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1523
1524         if (mac_mode != tp->mac_mode) {
1525                 tp->mac_mode = mac_mode;
1526                 tw32_f(MAC_MODE, tp->mac_mode);
1527                 udelay(40);
1528         }
1529
1530         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1531                 if (phydev->speed == SPEED_10)
1532                         tw32(MAC_MI_STAT,
1533                              MAC_MI_STAT_10MBPS_MODE |
1534                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1535                 else
1536                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1537         }
1538
1539         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1540                 tw32(MAC_TX_LENGTHS,
1541                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1542                       (6 << TX_LENGTHS_IPG_SHIFT) |
1543                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1544         else
1545                 tw32(MAC_TX_LENGTHS,
1546                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547                       (6 << TX_LENGTHS_IPG_SHIFT) |
1548                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1549
1550         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1551             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1552             phydev->speed != tp->link_config.active_speed ||
1553             phydev->duplex != tp->link_config.active_duplex ||
1554             oldflowctrl != tp->link_config.active_flowctrl)
1555                 linkmesg = 1;
1556
1557         tp->link_config.active_speed = phydev->speed;
1558         tp->link_config.active_duplex = phydev->duplex;
1559
1560         spin_unlock_bh(&tp->lock);
1561
1562         if (linkmesg)
1563                 tg3_link_report(tp);
1564 }
1565
1566 static int tg3_phy_init(struct tg3 *tp)
1567 {
1568         struct phy_device *phydev;
1569
1570         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1571                 return 0;
1572
1573         /* Bring the PHY back to a known state. */
1574         tg3_bmcr_reset(tp);
1575
1576         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1577
1578         /* Attach the MAC to the PHY. */
1579         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1580                              phydev->dev_flags, phydev->interface);
1581         if (IS_ERR(phydev)) {
1582                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1583                 return PTR_ERR(phydev);
1584         }
1585
1586         /* Mask with MAC supported features. */
1587         switch (phydev->interface) {
1588         case PHY_INTERFACE_MODE_GMII:
1589         case PHY_INTERFACE_MODE_RGMII:
1590                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1591                         phydev->supported &= (PHY_GBIT_FEATURES |
1592                                               SUPPORTED_Pause |
1593                                               SUPPORTED_Asym_Pause);
1594                         break;
1595                 }
1596                 /* fallthru */
1597         case PHY_INTERFACE_MODE_MII:
1598                 phydev->supported &= (PHY_BASIC_FEATURES |
1599                                       SUPPORTED_Pause |
1600                                       SUPPORTED_Asym_Pause);
1601                 break;
1602         default:
1603                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1604                 return -EINVAL;
1605         }
1606
1607         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1608
1609         phydev->advertising = phydev->supported;
1610
1611         return 0;
1612 }
1613
1614 static void tg3_phy_start(struct tg3 *tp)
1615 {
1616         struct phy_device *phydev;
1617
1618         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1619                 return;
1620
1621         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1622
1623         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1624                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1625                 phydev->speed = tp->link_config.orig_speed;
1626                 phydev->duplex = tp->link_config.orig_duplex;
1627                 phydev->autoneg = tp->link_config.orig_autoneg;
1628                 phydev->advertising = tp->link_config.orig_advertising;
1629         }
1630
1631         phy_start(phydev);
1632
1633         phy_start_aneg(phydev);
1634 }
1635
1636 static void tg3_phy_stop(struct tg3 *tp)
1637 {
1638         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1639                 return;
1640
1641         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1642 }
1643
1644 static void tg3_phy_fini(struct tg3 *tp)
1645 {
1646         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1647                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1648                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1649         }
1650 }
1651
1652 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1653 {
1654         u32 phytest;
1655
1656         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1657                 u32 phy;
1658
1659                 tg3_writephy(tp, MII_TG3_FET_TEST,
1660                              phytest | MII_TG3_FET_SHADOW_EN);
1661                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1662                         if (enable)
1663                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1664                         else
1665                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1666                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1667                 }
1668                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1669         }
1670 }
1671
1672 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1673 {
1674         u32 reg;
1675
1676         if (!tg3_flag(tp, 5705_PLUS) ||
1677             (tg3_flag(tp, 5717_PLUS) &&
1678              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1679                 return;
1680
1681         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1682                 tg3_phy_fet_toggle_apd(tp, enable);
1683                 return;
1684         }
1685
1686         reg = MII_TG3_MISC_SHDW_WREN |
1687               MII_TG3_MISC_SHDW_SCR5_SEL |
1688               MII_TG3_MISC_SHDW_SCR5_LPED |
1689               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1690               MII_TG3_MISC_SHDW_SCR5_SDTL |
1691               MII_TG3_MISC_SHDW_SCR5_C125OE;
1692         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1693                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1694
1695         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1696
1697
1698         reg = MII_TG3_MISC_SHDW_WREN |
1699               MII_TG3_MISC_SHDW_APD_SEL |
1700               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1701         if (enable)
1702                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1703
1704         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1705 }
1706
1707 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1708 {
1709         u32 phy;
1710
1711         if (!tg3_flag(tp, 5705_PLUS) ||
1712             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1713                 return;
1714
1715         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1716                 u32 ephy;
1717
1718                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1719                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1720
1721                         tg3_writephy(tp, MII_TG3_FET_TEST,
1722                                      ephy | MII_TG3_FET_SHADOW_EN);
1723                         if (!tg3_readphy(tp, reg, &phy)) {
1724                                 if (enable)
1725                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1726                                 else
1727                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1728                                 tg3_writephy(tp, reg, phy);
1729                         }
1730                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1731                 }
1732         } else {
1733                 int ret;
1734
1735                 ret = tg3_phy_auxctl_read(tp,
1736                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1737                 if (!ret) {
1738                         if (enable)
1739                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1740                         else
1741                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1742                         tg3_phy_auxctl_write(tp,
1743                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1744                 }
1745         }
1746 }
1747
1748 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1749 {
1750         int ret;
1751         u32 val;
1752
1753         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1754                 return;
1755
1756         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1757         if (!ret)
1758                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1759                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1760 }
1761
1762 static void tg3_phy_apply_otp(struct tg3 *tp)
1763 {
1764         u32 otp, phy;
1765
1766         if (!tp->phy_otp)
1767                 return;
1768
1769         otp = tp->phy_otp;
1770
1771         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1772                 return;
1773
1774         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1775         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1776         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1777
1778         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1779               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1780         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1781
1782         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1783         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1784         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1785
1786         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1787         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1788
1789         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1790         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1791
1792         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1793               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1794         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1795
1796         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1797 }
1798
1799 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1800 {
1801         u32 val;
1802
1803         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1804                 return;
1805
1806         tp->setlpicnt = 0;
1807
1808         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1809             current_link_up == 1 &&
1810             tp->link_config.active_duplex == DUPLEX_FULL &&
1811             (tp->link_config.active_speed == SPEED_100 ||
1812              tp->link_config.active_speed == SPEED_1000)) {
1813                 u32 eeectl;
1814
1815                 if (tp->link_config.active_speed == SPEED_1000)
1816                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1817                 else
1818                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1819
1820                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1821
1822                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1823                                   TG3_CL45_D7_EEERES_STAT, &val);
1824
1825                 switch (val) {
1826                 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1827                         switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1828                         case ASIC_REV_5717:
1829                         case ASIC_REV_5719:
1830                         case ASIC_REV_57765:
1831                                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1832                                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26,
1833                                                          0x0000);
1834                                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1835                                 }
1836                         }
1837                         /* Fallthrough */
1838                 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1839                         tp->setlpicnt = 2;
1840                 }
1841         }
1842
1843         if (!tp->setlpicnt) {
1844                 val = tr32(TG3_CPMU_EEE_MODE);
1845                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1846         }
1847 }
1848
1849 static int tg3_wait_macro_done(struct tg3 *tp)
1850 {
1851         int limit = 100;
1852
1853         while (limit--) {
1854                 u32 tmp32;
1855
1856                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1857                         if ((tmp32 & 0x1000) == 0)
1858                                 break;
1859                 }
1860         }
1861         if (limit < 0)
1862                 return -EBUSY;
1863
1864         return 0;
1865 }
1866
1867 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1868 {
1869         static const u32 test_pat[4][6] = {
1870         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1871         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1872         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1873         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1874         };
1875         int chan;
1876
1877         for (chan = 0; chan < 4; chan++) {
1878                 int i;
1879
1880                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1881                              (chan * 0x2000) | 0x0200);
1882                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1883
1884                 for (i = 0; i < 6; i++)
1885                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1886                                      test_pat[chan][i]);
1887
1888                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1889                 if (tg3_wait_macro_done(tp)) {
1890                         *resetp = 1;
1891                         return -EBUSY;
1892                 }
1893
1894                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1895                              (chan * 0x2000) | 0x0200);
1896                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1897                 if (tg3_wait_macro_done(tp)) {
1898                         *resetp = 1;
1899                         return -EBUSY;
1900                 }
1901
1902                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1903                 if (tg3_wait_macro_done(tp)) {
1904                         *resetp = 1;
1905                         return -EBUSY;
1906                 }
1907
1908                 for (i = 0; i < 6; i += 2) {
1909                         u32 low, high;
1910
1911                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1912                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1913                             tg3_wait_macro_done(tp)) {
1914                                 *resetp = 1;
1915                                 return -EBUSY;
1916                         }
1917                         low &= 0x7fff;
1918                         high &= 0x000f;
1919                         if (low != test_pat[chan][i] ||
1920                             high != test_pat[chan][i+1]) {
1921                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1922                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1923                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1924
1925                                 return -EBUSY;
1926                         }
1927                 }
1928         }
1929
1930         return 0;
1931 }
1932
1933 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1934 {
1935         int chan;
1936
1937         for (chan = 0; chan < 4; chan++) {
1938                 int i;
1939
1940                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1941                              (chan * 0x2000) | 0x0200);
1942                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1943                 for (i = 0; i < 6; i++)
1944                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1945                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1946                 if (tg3_wait_macro_done(tp))
1947                         return -EBUSY;
1948         }
1949
1950         return 0;
1951 }
1952
1953 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1954 {
1955         u32 reg32, phy9_orig;
1956         int retries, do_phy_reset, err;
1957
1958         retries = 10;
1959         do_phy_reset = 1;
1960         do {
1961                 if (do_phy_reset) {
1962                         err = tg3_bmcr_reset(tp);
1963                         if (err)
1964                                 return err;
1965                         do_phy_reset = 0;
1966                 }
1967
1968                 /* Disable transmitter and interrupt.  */
1969                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1970                         continue;
1971
1972                 reg32 |= 0x3000;
1973                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1974
1975                 /* Set full-duplex, 1000 mbps.  */
1976                 tg3_writephy(tp, MII_BMCR,
1977                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1978
1979                 /* Set to master mode.  */
1980                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1981                         continue;
1982
1983                 tg3_writephy(tp, MII_TG3_CTRL,
1984                              (MII_TG3_CTRL_AS_MASTER |
1985                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1986
1987                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1988                 if (err)
1989                         return err;
1990
1991                 /* Block the PHY control access.  */
1992                 tg3_phydsp_write(tp, 0x8005, 0x0800);
1993
1994                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1995                 if (!err)
1996                         break;
1997         } while (--retries);
1998
1999         err = tg3_phy_reset_chanpat(tp);
2000         if (err)
2001                 return err;
2002
2003         tg3_phydsp_write(tp, 0x8005, 0x0000);
2004
2005         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2006         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2007
2008         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2009
2010         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2011
2012         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2013                 reg32 &= ~0x3000;
2014                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2015         } else if (!err)
2016                 err = -EBUSY;
2017
2018         return err;
2019 }
2020
2021 /* This will reset the tigon3 PHY if there is no valid
2022  * link unless the FORCE argument is non-zero.
2023  */
2024 static int tg3_phy_reset(struct tg3 *tp)
2025 {
2026         u32 val, cpmuctrl;
2027         int err;
2028
2029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2030                 val = tr32(GRC_MISC_CFG);
2031                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2032                 udelay(40);
2033         }
2034         err  = tg3_readphy(tp, MII_BMSR, &val);
2035         err |= tg3_readphy(tp, MII_BMSR, &val);
2036         if (err != 0)
2037                 return -EBUSY;
2038
2039         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2040                 netif_carrier_off(tp->dev);
2041                 tg3_link_report(tp);
2042         }
2043
2044         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2045             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2046             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2047                 err = tg3_phy_reset_5703_4_5(tp);
2048                 if (err)
2049                         return err;
2050                 goto out;
2051         }
2052
2053         cpmuctrl = 0;
2054         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2055             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2056                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2057                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2058                         tw32(TG3_CPMU_CTRL,
2059                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2060         }
2061
2062         err = tg3_bmcr_reset(tp);
2063         if (err)
2064                 return err;
2065
2066         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2067                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2068                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2069
2070                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2071         }
2072
2073         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2074             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2075                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2076                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2077                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2078                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2079                         udelay(40);
2080                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2081                 }
2082         }
2083
2084         if (tg3_flag(tp, 5717_PLUS) &&
2085             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2086                 return 0;
2087
2088         tg3_phy_apply_otp(tp);
2089
2090         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2091                 tg3_phy_toggle_apd(tp, true);
2092         else
2093                 tg3_phy_toggle_apd(tp, false);
2094
2095 out:
2096         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2097             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2098                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2099                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2100                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2101         }
2102
2103         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2104                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2105                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2106         }
2107
2108         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2109                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2110                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2111                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2112                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2113                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2114                 }
2115         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2116                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2117                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2118                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2119                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2120                                 tg3_writephy(tp, MII_TG3_TEST1,
2121                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2122                         } else
2123                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2124
2125                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2126                 }
2127         }
2128
2129         /* Set Extended packet length bit (bit 14) on all chips that */
2130         /* support jumbo frames */
2131         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2132                 /* Cannot do read-modify-write on 5401 */
2133                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2134         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2135                 /* Set bit 14 with read-modify-write to preserve other bits */
2136                 err = tg3_phy_auxctl_read(tp,
2137                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2138                 if (!err)
2139                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2140                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2141         }
2142
2143         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2144          * jumbo frames transmission.
2145          */
2146         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2147                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2148                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2149                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2150         }
2151
2152         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2153                 /* adjust output voltage */
2154                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2155         }
2156
2157         tg3_phy_toggle_automdix(tp, 1);
2158         tg3_phy_set_wirespeed(tp);
2159         return 0;
2160 }
2161
2162 static void tg3_frob_aux_power(struct tg3 *tp)
2163 {
2164         bool need_vaux = false;
2165
2166         /* The GPIOs do something completely different on 57765. */
2167         if (!tg3_flag(tp, IS_NIC) ||
2168             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2169             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2170                 return;
2171
2172         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2173              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2174              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2175              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2176             tp->pdev_peer != tp->pdev) {
2177                 struct net_device *dev_peer;
2178
2179                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2180
2181                 /* remove_one() may have been run on the peer. */
2182                 if (dev_peer) {
2183                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2184
2185                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2186                                 return;
2187
2188                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2189                             tg3_flag(tp_peer, ENABLE_ASF))
2190                                 need_vaux = true;
2191                 }
2192         }
2193
2194         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2195                 need_vaux = true;
2196
2197         if (need_vaux) {
2198                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2199                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2200                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2201                                     (GRC_LCLCTRL_GPIO_OE0 |
2202                                      GRC_LCLCTRL_GPIO_OE1 |
2203                                      GRC_LCLCTRL_GPIO_OE2 |
2204                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2205                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2206                                     100);
2207                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2208                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2209                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2210                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2211                                              GRC_LCLCTRL_GPIO_OE1 |
2212                                              GRC_LCLCTRL_GPIO_OE2 |
2213                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2214                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2215                                              tp->grc_local_ctrl;
2216                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2217
2218                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2219                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2220
2221                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2222                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2223                 } else {
2224                         u32 no_gpio2;
2225                         u32 grc_local_ctrl = 0;
2226
2227                         /* Workaround to prevent overdrawing Amps. */
2228                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2229                             ASIC_REV_5714) {
2230                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2231                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2232                                             grc_local_ctrl, 100);
2233                         }
2234
2235                         /* On 5753 and variants, GPIO2 cannot be used. */
2236                         no_gpio2 = tp->nic_sram_data_cfg &
2237                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2238
2239                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2240                                          GRC_LCLCTRL_GPIO_OE1 |
2241                                          GRC_LCLCTRL_GPIO_OE2 |
2242                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2243                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2244                         if (no_gpio2) {
2245                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2246                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2247                         }
2248                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2249                                                     grc_local_ctrl, 100);
2250
2251                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2252
2253                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2254                                                     grc_local_ctrl, 100);
2255
2256                         if (!no_gpio2) {
2257                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2258                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2259                                             grc_local_ctrl, 100);
2260                         }
2261                 }
2262         } else {
2263                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2264                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2265                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2266                                     (GRC_LCLCTRL_GPIO_OE1 |
2267                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2268
2269                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2270                                     GRC_LCLCTRL_GPIO_OE1, 100);
2271
2272                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2273                                     (GRC_LCLCTRL_GPIO_OE1 |
2274                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2275                 }
2276         }
2277 }
2278
2279 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2280 {
2281         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2282                 return 1;
2283         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2284                 if (speed != SPEED_10)
2285                         return 1;
2286         } else if (speed == SPEED_10)
2287                 return 1;
2288
2289         return 0;
2290 }
2291
2292 static int tg3_setup_phy(struct tg3 *, int);
2293
2294 #define RESET_KIND_SHUTDOWN     0
2295 #define RESET_KIND_INIT         1
2296 #define RESET_KIND_SUSPEND      2
2297
2298 static void tg3_write_sig_post_reset(struct tg3 *, int);
2299 static int tg3_halt_cpu(struct tg3 *, u32);
2300
2301 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2302 {
2303         u32 val;
2304
2305         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2306                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2307                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2308                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2309
2310                         sg_dig_ctrl |=
2311                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2312                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2313                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2314                 }
2315                 return;
2316         }
2317
2318         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2319                 tg3_bmcr_reset(tp);
2320                 val = tr32(GRC_MISC_CFG);
2321                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2322                 udelay(40);
2323                 return;
2324         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2325                 u32 phytest;
2326                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2327                         u32 phy;
2328
2329                         tg3_writephy(tp, MII_ADVERTISE, 0);
2330                         tg3_writephy(tp, MII_BMCR,
2331                                      BMCR_ANENABLE | BMCR_ANRESTART);
2332
2333                         tg3_writephy(tp, MII_TG3_FET_TEST,
2334                                      phytest | MII_TG3_FET_SHADOW_EN);
2335                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2336                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2337                                 tg3_writephy(tp,
2338                                              MII_TG3_FET_SHDW_AUXMODE4,
2339                                              phy);
2340                         }
2341                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2342                 }
2343                 return;
2344         } else if (do_low_power) {
2345                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2346                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2347
2348                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2349                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2350                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2351                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2352         }
2353
2354         /* The PHY should not be powered down on some chips because
2355          * of bugs.
2356          */
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2358             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2360              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2361                 return;
2362
2363         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2364             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2365                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2366                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2367                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2368                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2369         }
2370
2371         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2372 }
2373
2374 /* tp->lock is held. */
2375 static int tg3_nvram_lock(struct tg3 *tp)
2376 {
2377         if (tg3_flag(tp, NVRAM)) {
2378                 int i;
2379
2380                 if (tp->nvram_lock_cnt == 0) {
2381                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2382                         for (i = 0; i < 8000; i++) {
2383                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2384                                         break;
2385                                 udelay(20);
2386                         }
2387                         if (i == 8000) {
2388                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2389                                 return -ENODEV;
2390                         }
2391                 }
2392                 tp->nvram_lock_cnt++;
2393         }
2394         return 0;
2395 }
2396
2397 /* tp->lock is held. */
2398 static void tg3_nvram_unlock(struct tg3 *tp)
2399 {
2400         if (tg3_flag(tp, NVRAM)) {
2401                 if (tp->nvram_lock_cnt > 0)
2402                         tp->nvram_lock_cnt--;
2403                 if (tp->nvram_lock_cnt == 0)
2404                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2405         }
2406 }
2407
2408 /* tp->lock is held. */
2409 static void tg3_enable_nvram_access(struct tg3 *tp)
2410 {
2411         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2412                 u32 nvaccess = tr32(NVRAM_ACCESS);
2413
2414                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2415         }
2416 }
2417
2418 /* tp->lock is held. */
2419 static void tg3_disable_nvram_access(struct tg3 *tp)
2420 {
2421         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2422                 u32 nvaccess = tr32(NVRAM_ACCESS);
2423
2424                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2425         }
2426 }
2427
2428 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2429                                         u32 offset, u32 *val)
2430 {
2431         u32 tmp;
2432         int i;
2433
2434         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2435                 return -EINVAL;
2436
2437         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2438                                         EEPROM_ADDR_DEVID_MASK |
2439                                         EEPROM_ADDR_READ);
2440         tw32(GRC_EEPROM_ADDR,
2441              tmp |
2442              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2443              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2444               EEPROM_ADDR_ADDR_MASK) |
2445              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2446
2447         for (i = 0; i < 1000; i++) {
2448                 tmp = tr32(GRC_EEPROM_ADDR);
2449
2450                 if (tmp & EEPROM_ADDR_COMPLETE)
2451                         break;
2452                 msleep(1);
2453         }
2454         if (!(tmp & EEPROM_ADDR_COMPLETE))
2455                 return -EBUSY;
2456
2457         tmp = tr32(GRC_EEPROM_DATA);
2458
2459         /*
2460          * The data will always be opposite the native endian
2461          * format.  Perform a blind byteswap to compensate.
2462          */
2463         *val = swab32(tmp);
2464
2465         return 0;
2466 }
2467
2468 #define NVRAM_CMD_TIMEOUT 10000
2469
2470 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2471 {
2472         int i;
2473
2474         tw32(NVRAM_CMD, nvram_cmd);
2475         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2476                 udelay(10);
2477                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2478                         udelay(10);
2479                         break;
2480                 }
2481         }
2482
2483         if (i == NVRAM_CMD_TIMEOUT)
2484                 return -EBUSY;
2485
2486         return 0;
2487 }
2488
2489 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2490 {
2491         if (tg3_flag(tp, NVRAM) &&
2492             tg3_flag(tp, NVRAM_BUFFERED) &&
2493             tg3_flag(tp, FLASH) &&
2494             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2495             (tp->nvram_jedecnum == JEDEC_ATMEL))
2496
2497                 addr = ((addr / tp->nvram_pagesize) <<
2498                         ATMEL_AT45DB0X1B_PAGE_POS) +
2499                        (addr % tp->nvram_pagesize);
2500
2501         return addr;
2502 }
2503
2504 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2505 {
2506         if (tg3_flag(tp, NVRAM) &&
2507             tg3_flag(tp, NVRAM_BUFFERED) &&
2508             tg3_flag(tp, FLASH) &&
2509             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2510             (tp->nvram_jedecnum == JEDEC_ATMEL))
2511
2512                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2513                         tp->nvram_pagesize) +
2514                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2515
2516         return addr;
2517 }
2518
2519 /* NOTE: Data read in from NVRAM is byteswapped according to
2520  * the byteswapping settings for all other register accesses.
2521  * tg3 devices are BE devices, so on a BE machine, the data
2522  * returned will be exactly as it is seen in NVRAM.  On a LE
2523  * machine, the 32-bit value will be byteswapped.
2524  */
2525 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2526 {
2527         int ret;
2528
2529         if (!tg3_flag(tp, NVRAM))
2530                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2531
2532         offset = tg3_nvram_phys_addr(tp, offset);
2533
2534         if (offset > NVRAM_ADDR_MSK)
2535                 return -EINVAL;
2536
2537         ret = tg3_nvram_lock(tp);
2538         if (ret)
2539                 return ret;
2540
2541         tg3_enable_nvram_access(tp);
2542
2543         tw32(NVRAM_ADDR, offset);
2544         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2545                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2546
2547         if (ret == 0)
2548                 *val = tr32(NVRAM_RDDATA);
2549
2550         tg3_disable_nvram_access(tp);
2551
2552         tg3_nvram_unlock(tp);
2553
2554         return ret;
2555 }
2556
2557 /* Ensures NVRAM data is in bytestream format. */
2558 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2559 {
2560         u32 v;
2561         int res = tg3_nvram_read(tp, offset, &v);
2562         if (!res)
2563                 *val = cpu_to_be32(v);
2564         return res;
2565 }
2566
2567 /* tp->lock is held. */
2568 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2569 {
2570         u32 addr_high, addr_low;
2571         int i;
2572
2573         addr_high = ((tp->dev->dev_addr[0] << 8) |
2574                      tp->dev->dev_addr[1]);
2575         addr_low = ((tp->dev->dev_addr[2] << 24) |
2576                     (tp->dev->dev_addr[3] << 16) |
2577                     (tp->dev->dev_addr[4] <<  8) |
2578                     (tp->dev->dev_addr[5] <<  0));
2579         for (i = 0; i < 4; i++) {
2580                 if (i == 1 && skip_mac_1)
2581                         continue;
2582                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2583                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2584         }
2585
2586         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2587             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2588                 for (i = 0; i < 12; i++) {
2589                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2590                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2591                 }
2592         }
2593
2594         addr_high = (tp->dev->dev_addr[0] +
2595                      tp->dev->dev_addr[1] +
2596                      tp->dev->dev_addr[2] +
2597                      tp->dev->dev_addr[3] +
2598                      tp->dev->dev_addr[4] +
2599                      tp->dev->dev_addr[5]) &
2600                 TX_BACKOFF_SEED_MASK;
2601         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2602 }
2603
2604 static void tg3_enable_register_access(struct tg3 *tp)
2605 {
2606         /*
2607          * Make sure register accesses (indirect or otherwise) will function
2608          * correctly.
2609          */
2610         pci_write_config_dword(tp->pdev,
2611                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2612 }
2613
2614 static int tg3_power_up(struct tg3 *tp)
2615 {
2616         tg3_enable_register_access(tp);
2617
2618         pci_set_power_state(tp->pdev, PCI_D0);
2619
2620         /* Switch out of Vaux if it is a NIC */
2621         if (tg3_flag(tp, IS_NIC))
2622                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2623
2624         return 0;
2625 }
2626
2627 static int tg3_power_down_prepare(struct tg3 *tp)
2628 {
2629         u32 misc_host_ctrl;
2630         bool device_should_wake, do_low_power;
2631
2632         tg3_enable_register_access(tp);
2633
2634         /* Restore the CLKREQ setting. */
2635         if (tg3_flag(tp, CLKREQ_BUG)) {
2636                 u16 lnkctl;
2637
2638                 pci_read_config_word(tp->pdev,
2639                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2640                                      &lnkctl);
2641                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2642                 pci_write_config_word(tp->pdev,
2643                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2644                                       lnkctl);
2645         }
2646
2647         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2648         tw32(TG3PCI_MISC_HOST_CTRL,
2649              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2650
2651         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2652                              tg3_flag(tp, WOL_ENABLE);
2653
2654         if (tg3_flag(tp, USE_PHYLIB)) {
2655                 do_low_power = false;
2656                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2657                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2658                         struct phy_device *phydev;
2659                         u32 phyid, advertising;
2660
2661                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2662
2663                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2664
2665                         tp->link_config.orig_speed = phydev->speed;
2666                         tp->link_config.orig_duplex = phydev->duplex;
2667                         tp->link_config.orig_autoneg = phydev->autoneg;
2668                         tp->link_config.orig_advertising = phydev->advertising;
2669
2670                         advertising = ADVERTISED_TP |
2671                                       ADVERTISED_Pause |
2672                                       ADVERTISED_Autoneg |
2673                                       ADVERTISED_10baseT_Half;
2674
2675                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2676                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2677                                         advertising |=
2678                                                 ADVERTISED_100baseT_Half |
2679                                                 ADVERTISED_100baseT_Full |
2680                                                 ADVERTISED_10baseT_Full;
2681                                 else
2682                                         advertising |= ADVERTISED_10baseT_Full;
2683                         }
2684
2685                         phydev->advertising = advertising;
2686
2687                         phy_start_aneg(phydev);
2688
2689                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2690                         if (phyid != PHY_ID_BCMAC131) {
2691                                 phyid &= PHY_BCM_OUI_MASK;
2692                                 if (phyid == PHY_BCM_OUI_1 ||
2693                                     phyid == PHY_BCM_OUI_2 ||
2694                                     phyid == PHY_BCM_OUI_3)
2695                                         do_low_power = true;
2696                         }
2697                 }
2698         } else {
2699                 do_low_power = true;
2700
2701                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2702                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2703                         tp->link_config.orig_speed = tp->link_config.speed;
2704                         tp->link_config.orig_duplex = tp->link_config.duplex;
2705                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2706                 }
2707
2708                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2709                         tp->link_config.speed = SPEED_10;
2710                         tp->link_config.duplex = DUPLEX_HALF;
2711                         tp->link_config.autoneg = AUTONEG_ENABLE;
2712                         tg3_setup_phy(tp, 0);
2713                 }
2714         }
2715
2716         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2717                 u32 val;
2718
2719                 val = tr32(GRC_VCPU_EXT_CTRL);
2720                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2721         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2722                 int i;
2723                 u32 val;
2724
2725                 for (i = 0; i < 200; i++) {
2726                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2727                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2728                                 break;
2729                         msleep(1);
2730                 }
2731         }
2732         if (tg3_flag(tp, WOL_CAP))
2733                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2734                                                      WOL_DRV_STATE_SHUTDOWN |
2735                                                      WOL_DRV_WOL |
2736                                                      WOL_SET_MAGIC_PKT);
2737
2738         if (device_should_wake) {
2739                 u32 mac_mode;
2740
2741                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2742                         if (do_low_power &&
2743                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2744                                 tg3_phy_auxctl_write(tp,
2745                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2746                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2747                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2748                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2749                                 udelay(40);
2750                         }
2751
2752                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2753                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2754                         else
2755                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2756
2757                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2758                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2759                             ASIC_REV_5700) {
2760                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2761                                              SPEED_100 : SPEED_10;
2762                                 if (tg3_5700_link_polarity(tp, speed))
2763                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2764                                 else
2765                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2766                         }
2767                 } else {
2768                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2769                 }
2770
2771                 if (!tg3_flag(tp, 5750_PLUS))
2772                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2773
2774                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2775                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2776                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2777                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2778
2779                 if (tg3_flag(tp, ENABLE_APE))
2780                         mac_mode |= MAC_MODE_APE_TX_EN |
2781                                     MAC_MODE_APE_RX_EN |
2782                                     MAC_MODE_TDE_ENABLE;
2783
2784                 tw32_f(MAC_MODE, mac_mode);
2785                 udelay(100);
2786
2787                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2788                 udelay(10);
2789         }
2790
2791         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2792             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2793              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2794                 u32 base_val;
2795
2796                 base_val = tp->pci_clock_ctrl;
2797                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2798                              CLOCK_CTRL_TXCLK_DISABLE);
2799
2800                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2801                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2802         } else if (tg3_flag(tp, 5780_CLASS) ||
2803                    tg3_flag(tp, CPMU_PRESENT) ||
2804                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2805                 /* do nothing */
2806         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2807                 u32 newbits1, newbits2;
2808
2809                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2810                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2811                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2812                                     CLOCK_CTRL_TXCLK_DISABLE |
2813                                     CLOCK_CTRL_ALTCLK);
2814                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2815                 } else if (tg3_flag(tp, 5705_PLUS)) {
2816                         newbits1 = CLOCK_CTRL_625_CORE;
2817                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2818                 } else {
2819                         newbits1 = CLOCK_CTRL_ALTCLK;
2820                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2821                 }
2822
2823                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2824                             40);
2825
2826                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2827                             40);
2828
2829                 if (!tg3_flag(tp, 5705_PLUS)) {
2830                         u32 newbits3;
2831
2832                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2833                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2834                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2835                                             CLOCK_CTRL_TXCLK_DISABLE |
2836                                             CLOCK_CTRL_44MHZ_CORE);
2837                         } else {
2838                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2839                         }
2840
2841                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2842                                     tp->pci_clock_ctrl | newbits3, 40);
2843                 }
2844         }
2845
2846         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2847                 tg3_power_down_phy(tp, do_low_power);
2848
2849         tg3_frob_aux_power(tp);
2850
2851         /* Workaround for unstable PLL clock */
2852         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2853             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2854                 u32 val = tr32(0x7d00);
2855
2856                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2857                 tw32(0x7d00, val);
2858                 if (!tg3_flag(tp, ENABLE_ASF)) {
2859                         int err;
2860
2861                         err = tg3_nvram_lock(tp);
2862                         tg3_halt_cpu(tp, RX_CPU_BASE);
2863                         if (!err)
2864                                 tg3_nvram_unlock(tp);
2865                 }
2866         }
2867
2868         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2869
2870         return 0;
2871 }
2872
2873 static void tg3_power_down(struct tg3 *tp)
2874 {
2875         tg3_power_down_prepare(tp);
2876
2877         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2878         pci_set_power_state(tp->pdev, PCI_D3hot);
2879 }
2880
2881 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2882 {
2883         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2884         case MII_TG3_AUX_STAT_10HALF:
2885                 *speed = SPEED_10;
2886                 *duplex = DUPLEX_HALF;
2887                 break;
2888
2889         case MII_TG3_AUX_STAT_10FULL:
2890                 *speed = SPEED_10;
2891                 *duplex = DUPLEX_FULL;
2892                 break;
2893
2894         case MII_TG3_AUX_STAT_100HALF:
2895                 *speed = SPEED_100;
2896                 *duplex = DUPLEX_HALF;
2897                 break;
2898
2899         case MII_TG3_AUX_STAT_100FULL:
2900                 *speed = SPEED_100;
2901                 *duplex = DUPLEX_FULL;
2902                 break;
2903
2904         case MII_TG3_AUX_STAT_1000HALF:
2905                 *speed = SPEED_1000;
2906                 *duplex = DUPLEX_HALF;
2907                 break;
2908
2909         case MII_TG3_AUX_STAT_1000FULL:
2910                 *speed = SPEED_1000;
2911                 *duplex = DUPLEX_FULL;
2912                 break;
2913
2914         default:
2915                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2916                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2917                                  SPEED_10;
2918                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2919                                   DUPLEX_HALF;
2920                         break;
2921                 }
2922                 *speed = SPEED_INVALID;
2923                 *duplex = DUPLEX_INVALID;
2924                 break;
2925         }
2926 }
2927
2928 static void tg3_phy_copper_begin(struct tg3 *tp)
2929 {
2930         u32 new_adv;
2931         int i;
2932
2933         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2934                 /* Entering low power mode.  Disable gigabit and
2935                  * 100baseT advertisements.
2936                  */
2937                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2938
2939                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2940                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2941                 if (tg3_flag(tp, WOL_SPEED_100MB))
2942                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2943
2944                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2945         } else if (tp->link_config.speed == SPEED_INVALID) {
2946                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2947                         tp->link_config.advertising &=
2948                                 ~(ADVERTISED_1000baseT_Half |
2949                                   ADVERTISED_1000baseT_Full);
2950
2951                 new_adv = ADVERTISE_CSMA;
2952                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2953                         new_adv |= ADVERTISE_10HALF;
2954                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2955                         new_adv |= ADVERTISE_10FULL;
2956                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2957                         new_adv |= ADVERTISE_100HALF;
2958                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2959                         new_adv |= ADVERTISE_100FULL;
2960
2961                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2962
2963                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2964
2965                 if (tp->link_config.advertising &
2966                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2967                         new_adv = 0;
2968                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2969                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2970                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2971                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2972                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2973                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2974                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2975                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2976                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2977                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2978                 } else {
2979                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2980                 }
2981         } else {
2982                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2983                 new_adv |= ADVERTISE_CSMA;
2984
2985                 /* Asking for a specific link mode. */
2986                 if (tp->link_config.speed == SPEED_1000) {
2987                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2988
2989                         if (tp->link_config.duplex == DUPLEX_FULL)
2990                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2991                         else
2992                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2993                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2994                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2995                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2996                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2997                 } else {
2998                         if (tp->link_config.speed == SPEED_100) {
2999                                 if (tp->link_config.duplex == DUPLEX_FULL)
3000                                         new_adv |= ADVERTISE_100FULL;
3001                                 else
3002                                         new_adv |= ADVERTISE_100HALF;
3003                         } else {
3004                                 if (tp->link_config.duplex == DUPLEX_FULL)
3005                                         new_adv |= ADVERTISE_10FULL;
3006                                 else
3007                                         new_adv |= ADVERTISE_10HALF;
3008                         }
3009                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3010
3011                         new_adv = 0;
3012                 }
3013
3014                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
3015         }
3016
3017         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
3018                 u32 val;
3019
3020                 tw32(TG3_CPMU_EEE_MODE,
3021                      tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3022
3023                 TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3024
3025                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3026                 case ASIC_REV_5717:
3027                 case ASIC_REV_57765:
3028                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3029                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3030                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3031                         /* Fall through */
3032                 case ASIC_REV_5719:
3033                         val = MII_TG3_DSP_TAP26_ALNOKO |
3034                               MII_TG3_DSP_TAP26_RMRXSTO |
3035                               MII_TG3_DSP_TAP26_OPCSINPT;
3036                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3037                 }
3038
3039                 val = 0;
3040                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3041                         /* Advertise 100-BaseTX EEE ability */
3042                         if (tp->link_config.advertising &
3043                             ADVERTISED_100baseT_Full)
3044                                 val |= MDIO_AN_EEE_ADV_100TX;
3045                         /* Advertise 1000-BaseT EEE ability */
3046                         if (tp->link_config.advertising &
3047                             ADVERTISED_1000baseT_Full)
3048                                 val |= MDIO_AN_EEE_ADV_1000T;
3049                 }
3050                 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3051
3052                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3053         }
3054
3055         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3056             tp->link_config.speed != SPEED_INVALID) {
3057                 u32 bmcr, orig_bmcr;
3058
3059                 tp->link_config.active_speed = tp->link_config.speed;
3060                 tp->link_config.active_duplex = tp->link_config.duplex;
3061
3062                 bmcr = 0;
3063                 switch (tp->link_config.speed) {
3064                 default:
3065                 case SPEED_10:
3066                         break;
3067
3068                 case SPEED_100:
3069                         bmcr |= BMCR_SPEED100;
3070                         break;
3071
3072                 case SPEED_1000:
3073                         bmcr |= TG3_BMCR_SPEED1000;
3074                         break;
3075                 }
3076
3077                 if (tp->link_config.duplex == DUPLEX_FULL)
3078                         bmcr |= BMCR_FULLDPLX;
3079
3080                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3081                     (bmcr != orig_bmcr)) {
3082                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3083                         for (i = 0; i < 1500; i++) {
3084                                 u32 tmp;
3085
3086                                 udelay(10);
3087                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3088                                     tg3_readphy(tp, MII_BMSR, &tmp))
3089                                         continue;
3090                                 if (!(tmp & BMSR_LSTATUS)) {
3091                                         udelay(40);
3092                                         break;
3093                                 }
3094                         }
3095                         tg3_writephy(tp, MII_BMCR, bmcr);
3096                         udelay(40);
3097                 }
3098         } else {
3099                 tg3_writephy(tp, MII_BMCR,
3100                              BMCR_ANENABLE | BMCR_ANRESTART);
3101         }
3102 }
3103
3104 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3105 {
3106         int err;
3107
3108         /* Turn off tap power management. */
3109         /* Set Extended packet length bit */
3110         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3111
3112         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3113         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3114         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3115         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3116         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3117
3118         udelay(40);
3119
3120         return err;
3121 }
3122
3123 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3124 {
3125         u32 adv_reg, all_mask = 0;
3126
3127         if (mask & ADVERTISED_10baseT_Half)
3128                 all_mask |= ADVERTISE_10HALF;
3129         if (mask & ADVERTISED_10baseT_Full)
3130                 all_mask |= ADVERTISE_10FULL;
3131         if (mask & ADVERTISED_100baseT_Half)
3132                 all_mask |= ADVERTISE_100HALF;
3133         if (mask & ADVERTISED_100baseT_Full)
3134                 all_mask |= ADVERTISE_100FULL;
3135
3136         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3137                 return 0;
3138
3139         if ((adv_reg & all_mask) != all_mask)
3140                 return 0;
3141         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3142                 u32 tg3_ctrl;
3143
3144                 all_mask = 0;
3145                 if (mask & ADVERTISED_1000baseT_Half)
3146                         all_mask |= ADVERTISE_1000HALF;
3147                 if (mask & ADVERTISED_1000baseT_Full)
3148                         all_mask |= ADVERTISE_1000FULL;
3149
3150                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3151                         return 0;
3152
3153                 if ((tg3_ctrl & all_mask) != all_mask)
3154                         return 0;
3155         }
3156         return 1;
3157 }
3158
3159 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3160 {
3161         u32 curadv, reqadv;
3162
3163         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3164                 return 1;
3165
3166         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3167         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3168
3169         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3170                 if (curadv != reqadv)
3171                         return 0;
3172
3173                 if (tg3_flag(tp, PAUSE_AUTONEG))
3174                         tg3_readphy(tp, MII_LPA, rmtadv);
3175         } else {
3176                 /* Reprogram the advertisement register, even if it
3177                  * does not affect the current link.  If the link
3178                  * gets renegotiated in the future, we can save an
3179                  * additional renegotiation cycle by advertising
3180                  * it correctly in the first place.
3181                  */
3182                 if (curadv != reqadv) {
3183                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3184                                      ADVERTISE_PAUSE_ASYM);
3185                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3186                 }
3187         }
3188
3189         return 1;
3190 }
3191
3192 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3193 {
3194         int current_link_up;
3195         u32 bmsr, val;
3196         u32 lcl_adv, rmt_adv;
3197         u16 current_speed;
3198         u8 current_duplex;
3199         int i, err;
3200
3201         tw32(MAC_EVENT, 0);
3202
3203         tw32_f(MAC_STATUS,
3204              (MAC_STATUS_SYNC_CHANGED |
3205               MAC_STATUS_CFG_CHANGED |
3206               MAC_STATUS_MI_COMPLETION |
3207               MAC_STATUS_LNKSTATE_CHANGED));
3208         udelay(40);
3209
3210         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3211                 tw32_f(MAC_MI_MODE,
3212                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3213                 udelay(80);
3214         }
3215
3216         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3217
3218         /* Some third-party PHYs need to be reset on link going
3219          * down.
3220          */
3221         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3222              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3223              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3224             netif_carrier_ok(tp->dev)) {
3225                 tg3_readphy(tp, MII_BMSR, &bmsr);
3226                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3227                     !(bmsr & BMSR_LSTATUS))
3228                         force_reset = 1;
3229         }
3230         if (force_reset)
3231                 tg3_phy_reset(tp);
3232
3233         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3234                 tg3_readphy(tp, MII_BMSR, &bmsr);
3235                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3236                     !tg3_flag(tp, INIT_COMPLETE))
3237                         bmsr = 0;
3238
3239                 if (!(bmsr & BMSR_LSTATUS)) {
3240                         err = tg3_init_5401phy_dsp(tp);
3241                         if (err)
3242                                 return err;
3243
3244                         tg3_readphy(tp, MII_BMSR, &bmsr);
3245                         for (i = 0; i < 1000; i++) {
3246                                 udelay(10);
3247                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3248                                     (bmsr & BMSR_LSTATUS)) {
3249                                         udelay(40);
3250                                         break;
3251                                 }
3252                         }
3253
3254                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3255                             TG3_PHY_REV_BCM5401_B0 &&
3256                             !(bmsr & BMSR_LSTATUS) &&
3257                             tp->link_config.active_speed == SPEED_1000) {
3258                                 err = tg3_phy_reset(tp);
3259                                 if (!err)
3260                                         err = tg3_init_5401phy_dsp(tp);
3261                                 if (err)
3262                                         return err;
3263                         }
3264                 }
3265         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3266                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3267                 /* 5701 {A0,B0} CRC bug workaround */
3268                 tg3_writephy(tp, 0x15, 0x0a75);
3269                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3270                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3271                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3272         }
3273
3274         /* Clear pending interrupts... */
3275         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3276         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3277
3278         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3279                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3280         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3281                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3282
3283         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3284             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3285                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3286                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3287                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3288                 else
3289                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3290         }
3291
3292         current_link_up = 0;
3293         current_speed = SPEED_INVALID;
3294         current_duplex = DUPLEX_INVALID;
3295
3296         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3297                 err = tg3_phy_auxctl_read(tp,
3298                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3299                                           &val);
3300                 if (!err && !(val & (1 << 10))) {
3301                         tg3_phy_auxctl_write(tp,
3302                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3303                                              val | (1 << 10));
3304                         goto relink;
3305                 }
3306         }
3307
3308         bmsr = 0;
3309         for (i = 0; i < 100; i++) {
3310                 tg3_readphy(tp, MII_BMSR, &bmsr);
3311                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3312                     (bmsr & BMSR_LSTATUS))
3313                         break;
3314                 udelay(40);
3315         }
3316
3317         if (bmsr & BMSR_LSTATUS) {
3318                 u32 aux_stat, bmcr;
3319
3320                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3321                 for (i = 0; i < 2000; i++) {
3322                         udelay(10);
3323                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3324                             aux_stat)
3325                                 break;
3326                 }
3327
3328                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3329                                              &current_speed,
3330                                              &current_duplex);
3331
3332                 bmcr = 0;
3333                 for (i = 0; i < 200; i++) {
3334                         tg3_readphy(tp, MII_BMCR, &bmcr);
3335                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3336                                 continue;
3337                         if (bmcr && bmcr != 0x7fff)
3338                                 break;
3339                         udelay(10);
3340                 }
3341
3342                 lcl_adv = 0;
3343                 rmt_adv = 0;
3344
3345                 tp->link_config.active_speed = current_speed;
3346                 tp->link_config.active_duplex = current_duplex;
3347
3348                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3349                         if ((bmcr & BMCR_ANENABLE) &&
3350                             tg3_copper_is_advertising_all(tp,
3351                                                 tp->link_config.advertising)) {
3352                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3353                                                                   &rmt_adv))
3354                                         current_link_up = 1;
3355                         }
3356                 } else {
3357                         if (!(bmcr & BMCR_ANENABLE) &&
3358                             tp->link_config.speed == current_speed &&
3359                             tp->link_config.duplex == current_duplex &&
3360                             tp->link_config.flowctrl ==
3361                             tp->link_config.active_flowctrl) {
3362                                 current_link_up = 1;
3363                         }
3364                 }
3365
3366                 if (current_link_up == 1 &&
3367                     tp->link_config.active_duplex == DUPLEX_FULL)
3368                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3369         }
3370
3371 relink:
3372         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3373                 tg3_phy_copper_begin(tp);
3374
3375                 tg3_readphy(tp, MII_BMSR, &bmsr);
3376                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3377                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3378                         current_link_up = 1;
3379         }
3380
3381         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3382         if (current_link_up == 1) {
3383                 if (tp->link_config.active_speed == SPEED_100 ||
3384                     tp->link_config.active_speed == SPEED_10)
3385                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3386                 else
3387                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3388         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3389                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3390         else
3391                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3392
3393         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3394         if (tp->link_config.active_duplex == DUPLEX_HALF)
3395                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3396
3397         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3398                 if (current_link_up == 1 &&
3399                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3400                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3401                 else
3402                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3403         }
3404
3405         /* ??? Without this setting Netgear GA302T PHY does not
3406          * ??? send/receive packets...
3407          */
3408         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3409             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3410                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3411                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3412                 udelay(80);
3413         }
3414
3415         tw32_f(MAC_MODE, tp->mac_mode);
3416         udelay(40);
3417
3418         tg3_phy_eee_adjust(tp, current_link_up);
3419
3420         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3421                 /* Polled via timer. */
3422                 tw32_f(MAC_EVENT, 0);
3423         } else {
3424                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3425         }
3426         udelay(40);
3427
3428         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3429             current_link_up == 1 &&
3430             tp->link_config.active_speed == SPEED_1000 &&
3431             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3432                 udelay(120);
3433                 tw32_f(MAC_STATUS,
3434                      (MAC_STATUS_SYNC_CHANGED |
3435                       MAC_STATUS_CFG_CHANGED));
3436                 udelay(40);
3437                 tg3_write_mem(tp,
3438                               NIC_SRAM_FIRMWARE_MBOX,
3439                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3440         }
3441
3442         /* Prevent send BD corruption. */
3443         if (tg3_flag(tp, CLKREQ_BUG)) {
3444                 u16 oldlnkctl, newlnkctl;
3445
3446                 pci_read_config_word(tp->pdev,
3447                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3448                                      &oldlnkctl);
3449                 if (tp->link_config.active_speed == SPEED_100 ||
3450                     tp->link_config.active_speed == SPEED_10)
3451                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3452                 else
3453                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3454                 if (newlnkctl != oldlnkctl)
3455                         pci_write_config_word(tp->pdev,
3456                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3457                                               newlnkctl);
3458         }
3459
3460         if (current_link_up != netif_carrier_ok(tp->dev)) {
3461                 if (current_link_up)
3462                         netif_carrier_on(tp->dev);
3463                 else
3464                         netif_carrier_off(tp->dev);
3465                 tg3_link_report(tp);
3466         }
3467
3468         return 0;
3469 }
3470
3471 struct tg3_fiber_aneginfo {
3472         int state;
3473 #define ANEG_STATE_UNKNOWN              0
3474 #define ANEG_STATE_AN_ENABLE            1
3475 #define ANEG_STATE_RESTART_INIT         2
3476 #define ANEG_STATE_RESTART              3
3477 #define ANEG_STATE_DISABLE_LINK_OK      4
3478 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3479 #define ANEG_STATE_ABILITY_DETECT       6
3480 #define ANEG_STATE_ACK_DETECT_INIT      7
3481 #define ANEG_STATE_ACK_DETECT           8
3482 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3483 #define ANEG_STATE_COMPLETE_ACK         10
3484 #define ANEG_STATE_IDLE_DETECT_INIT     11
3485 #define ANEG_STATE_IDLE_DETECT          12
3486 #define ANEG_STATE_LINK_OK              13
3487 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3488 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3489
3490         u32 flags;
3491 #define MR_AN_ENABLE            0x00000001
3492 #define MR_RESTART_AN           0x00000002
3493 #define MR_AN_COMPLETE          0x00000004
3494 #define MR_PAGE_RX              0x00000008
3495 #define MR_NP_LOADED            0x00000010
3496 #define MR_TOGGLE_TX            0x00000020
3497 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3498 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3499 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3500 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3501 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3502 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3503 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3504 #define MR_TOGGLE_RX            0x00002000
3505 #define MR_NP_RX                0x00004000
3506
3507 #define MR_LINK_OK              0x80000000
3508
3509         unsigned long link_time, cur_time;
3510
3511         u32 ability_match_cfg;
3512         int ability_match_count;
3513
3514         char ability_match, idle_match, ack_match;
3515
3516         u32 txconfig, rxconfig;
3517 #define ANEG_CFG_NP             0x00000080
3518 #define ANEG_CFG_ACK            0x00000040
3519 #define ANEG_CFG_RF2            0x00000020
3520 #define ANEG_CFG_RF1            0x00000010
3521 #define ANEG_CFG_PS2            0x00000001
3522 #define ANEG_CFG_PS1            0x00008000
3523 #define ANEG_CFG_HD             0x00004000
3524 #define ANEG_CFG_FD             0x00002000
3525 #define ANEG_CFG_INVAL          0x00001f06
3526
3527 };
3528 #define ANEG_OK         0
3529 #define ANEG_DONE       1
3530 #define ANEG_TIMER_ENAB 2
3531 #define ANEG_FAILED     -1
3532
3533 #define ANEG_STATE_SETTLE_TIME  10000
3534
3535 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3536                                    struct tg3_fiber_aneginfo *ap)
3537 {
3538         u16 flowctrl;
3539         unsigned long delta;
3540         u32 rx_cfg_reg;
3541         int ret;
3542
3543         if (ap->state == ANEG_STATE_UNKNOWN) {
3544                 ap->rxconfig = 0;
3545                 ap->link_time = 0;
3546                 ap->cur_time = 0;
3547                 ap->ability_match_cfg = 0;
3548                 ap->ability_match_count = 0;
3549                 ap->ability_match = 0;
3550                 ap->idle_match = 0;
3551                 ap->ack_match = 0;
3552         }
3553         ap->cur_time++;
3554
3555         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3556                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3557
3558                 if (rx_cfg_reg != ap->ability_match_cfg) {
3559                         ap->ability_match_cfg = rx_cfg_reg;
3560                         ap->ability_match = 0;
3561                         ap->ability_match_count = 0;
3562                 } else {
3563                         if (++ap->ability_match_count > 1) {
3564                                 ap->ability_match = 1;
3565                                 ap->ability_match_cfg = rx_cfg_reg;
3566                         }
3567                 }
3568                 if (rx_cfg_reg & ANEG_CFG_ACK)
3569                         ap->ack_match = 1;
3570                 else
3571                         ap->ack_match = 0;
3572
3573                 ap->idle_match = 0;
3574         } else {
3575                 ap->idle_match = 1;
3576                 ap->ability_match_cfg = 0;
3577                 ap->ability_match_count = 0;
3578                 ap->ability_match = 0;
3579                 ap->ack_match = 0;
3580
3581                 rx_cfg_reg = 0;
3582         }
3583
3584         ap->rxconfig = rx_cfg_reg;
3585         ret = ANEG_OK;
3586
3587         switch (ap->state) {
3588         case ANEG_STATE_UNKNOWN:
3589                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3590                         ap->state = ANEG_STATE_AN_ENABLE;
3591
3592                 /* fallthru */
3593         case ANEG_STATE_AN_ENABLE:
3594                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3595                 if (ap->flags & MR_AN_ENABLE) {
3596                         ap->link_time = 0;
3597                         ap->cur_time = 0;
3598                         ap->ability_match_cfg = 0;
3599                         ap->ability_match_count = 0;
3600                         ap->ability_match = 0;
3601                         ap->idle_match = 0;
3602                         ap->ack_match = 0;
3603
3604                         ap->state = ANEG_STATE_RESTART_INIT;
3605                 } else {
3606                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3607                 }
3608                 break;
3609
3610         case ANEG_STATE_RESTART_INIT:
3611                 ap->link_time = ap->cur_time;
3612                 ap->flags &= ~(MR_NP_LOADED);
3613                 ap->txconfig = 0;
3614                 tw32(MAC_TX_AUTO_NEG, 0);
3615                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3616                 tw32_f(MAC_MODE, tp->mac_mode);
3617                 udelay(40);
3618
3619                 ret = ANEG_TIMER_ENAB;
3620                 ap->state = ANEG_STATE_RESTART;
3621
3622                 /* fallthru */
3623         case ANEG_STATE_RESTART:
3624                 delta = ap->cur_time - ap->link_time;
3625                 if (delta > ANEG_STATE_SETTLE_TIME)
3626                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3627                 else
3628                         ret = ANEG_TIMER_ENAB;
3629                 break;
3630
3631         case ANEG_STATE_DISABLE_LINK_OK:
3632                 ret = ANEG_DONE;
3633                 break;
3634
3635         case ANEG_STATE_ABILITY_DETECT_INIT:
3636                 ap->flags &= ~(MR_TOGGLE_TX);
3637                 ap->txconfig = ANEG_CFG_FD;
3638                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3639                 if (flowctrl & ADVERTISE_1000XPAUSE)
3640                         ap->txconfig |= ANEG_CFG_PS1;
3641                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3642                         ap->txconfig |= ANEG_CFG_PS2;
3643                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3644                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3645                 tw32_f(MAC_MODE, tp->mac_mode);
3646                 udelay(40);
3647
3648                 ap->state = ANEG_STATE_ABILITY_DETECT;
3649                 break;
3650
3651         case ANEG_STATE_ABILITY_DETECT:
3652                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3653                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3654                 break;
3655
3656         case ANEG_STATE_ACK_DETECT_INIT:
3657                 ap->txconfig |= ANEG_CFG_ACK;
3658                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3659                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3660                 tw32_f(MAC_MODE, tp->mac_mode);
3661                 udelay(40);
3662
3663                 ap->state = ANEG_STATE_ACK_DETECT;
3664
3665                 /* fallthru */
3666         case ANEG_STATE_ACK_DETECT:
3667                 if (ap->ack_match != 0) {
3668                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3669                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3670                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3671                         } else {
3672                                 ap->state = ANEG_STATE_AN_ENABLE;
3673                         }
3674                 } else if (ap->ability_match != 0 &&
3675                            ap->rxconfig == 0) {
3676                         ap->state = ANEG_STATE_AN_ENABLE;
3677                 }
3678                 break;
3679
3680         case ANEG_STATE_COMPLETE_ACK_INIT:
3681                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3682                         ret = ANEG_FAILED;
3683                         break;
3684                 }
3685                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3686                                MR_LP_ADV_HALF_DUPLEX |
3687                                MR_LP_ADV_SYM_PAUSE |
3688                                MR_LP_ADV_ASYM_PAUSE |
3689                                MR_LP_ADV_REMOTE_FAULT1 |
3690                                MR_LP_ADV_REMOTE_FAULT2 |
3691                                MR_LP_ADV_NEXT_PAGE |
3692                                MR_TOGGLE_RX |
3693                                MR_NP_RX);
3694                 if (ap->rxconfig & ANEG_CFG_FD)
3695                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3696                 if (ap->rxconfig & ANEG_CFG_HD)
3697                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3698                 if (ap->rxconfig & ANEG_CFG_PS1)
3699                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3700                 if (ap->rxconfig & ANEG_CFG_PS2)
3701                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3702                 if (ap->rxconfig & ANEG_CFG_RF1)
3703                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3704                 if (ap->rxconfig & ANEG_CFG_RF2)
3705                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3706                 if (ap->rxconfig & ANEG_CFG_NP)
3707                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3708
3709                 ap->link_time = ap->cur_time;
3710
3711                 ap->flags ^= (MR_TOGGLE_TX);
3712                 if (ap->rxconfig & 0x0008)
3713                         ap->flags |= MR_TOGGLE_RX;
3714                 if (ap->rxconfig & ANEG_CFG_NP)
3715                         ap->flags |= MR_NP_RX;
3716                 ap->flags |= MR_PAGE_RX;
3717
3718                 ap->state = ANEG_STATE_COMPLETE_ACK;
3719                 ret = ANEG_TIMER_ENAB;
3720                 break;
3721
3722         case ANEG_STATE_COMPLETE_ACK:
3723                 if (ap->ability_match != 0 &&
3724                     ap->rxconfig == 0) {
3725                         ap->state = ANEG_STATE_AN_ENABLE;
3726                         break;
3727                 }
3728                 delta = ap->cur_time - ap->link_time;
3729                 if (delta > ANEG_STATE_SETTLE_TIME) {
3730                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3731                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3732                         } else {
3733                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3734                                     !(ap->flags & MR_NP_RX)) {
3735                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3736                                 } else {
3737                                         ret = ANEG_FAILED;
3738                                 }
3739                         }
3740                 }
3741                 break;
3742
3743         case ANEG_STATE_IDLE_DETECT_INIT:
3744                 ap->link_time = ap->cur_time;
3745                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3746                 tw32_f(MAC_MODE, tp->mac_mode);
3747                 udelay(40);
3748
3749                 ap->state = ANEG_STATE_IDLE_DETECT;
3750                 ret = ANEG_TIMER_ENAB;
3751                 break;
3752
3753         case ANEG_STATE_IDLE_DETECT:
3754                 if (ap->ability_match != 0 &&
3755                     ap->rxconfig == 0) {
3756                         ap->state = ANEG_STATE_AN_ENABLE;
3757                         break;
3758                 }
3759                 delta = ap->cur_time - ap->link_time;
3760                 if (delta > ANEG_STATE_SETTLE_TIME) {
3761                         /* XXX another gem from the Broadcom driver :( */
3762                         ap->state = ANEG_STATE_LINK_OK;
3763                 }
3764                 break;
3765
3766         case ANEG_STATE_LINK_OK:
3767                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3768                 ret = ANEG_DONE;
3769                 break;
3770
3771         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3772                 /* ??? unimplemented */
3773                 break;
3774
3775         case ANEG_STATE_NEXT_PAGE_WAIT:
3776                 /* ??? unimplemented */
3777                 break;
3778
3779         default:
3780                 ret = ANEG_FAILED;
3781                 break;
3782         }
3783
3784         return ret;
3785 }
3786
3787 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3788 {
3789         int res = 0;
3790         struct tg3_fiber_aneginfo aninfo;
3791         int status = ANEG_FAILED;
3792         unsigned int tick;
3793         u32 tmp;
3794
3795         tw32_f(MAC_TX_AUTO_NEG, 0);
3796
3797         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3798         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3799         udelay(40);
3800
3801         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3802         udelay(40);
3803
3804         memset(&aninfo, 0, sizeof(aninfo));
3805         aninfo.flags |= MR_AN_ENABLE;
3806         aninfo.state = ANEG_STATE_UNKNOWN;
3807         aninfo.cur_time = 0;
3808         tick = 0;
3809         while (++tick < 195000) {
3810                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3811                 if (status == ANEG_DONE || status == ANEG_FAILED)
3812                         break;
3813
3814                 udelay(1);
3815         }
3816
3817         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3818         tw32_f(MAC_MODE, tp->mac_mode);
3819         udelay(40);
3820
3821         *txflags = aninfo.txconfig;
3822         *rxflags = aninfo.flags;
3823
3824         if (status == ANEG_DONE &&
3825             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3826                              MR_LP_ADV_FULL_DUPLEX)))
3827                 res = 1;
3828
3829         return res;
3830 }
3831
3832 static void tg3_init_bcm8002(struct tg3 *tp)
3833 {
3834         u32 mac_status = tr32(MAC_STATUS);
3835         int i;
3836
3837         /* Reset when initting first time or we have a link. */
3838         if (tg3_flag(tp, INIT_COMPLETE) &&
3839             !(mac_status & MAC_STATUS_PCS_SYNCED))
3840                 return;
3841
3842         /* Set PLL lock range. */
3843         tg3_writephy(tp, 0x16, 0x8007);
3844
3845         /* SW reset */
3846         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3847
3848         /* Wait for reset to complete. */
3849         /* XXX schedule_timeout() ... */
3850         for (i = 0; i < 500; i++)
3851                 udelay(10);
3852
3853         /* Config mode; select PMA/Ch 1 regs. */
3854         tg3_writephy(tp, 0x10, 0x8411);
3855
3856         /* Enable auto-lock and comdet, select txclk for tx. */
3857         tg3_writephy(tp, 0x11, 0x0a10);
3858
3859         tg3_writephy(tp, 0x18, 0x00a0);
3860         tg3_writephy(tp, 0x16, 0x41ff);
3861
3862         /* Assert and deassert POR. */
3863         tg3_writephy(tp, 0x13, 0x0400);
3864         udelay(40);
3865         tg3_writephy(tp, 0x13, 0x0000);
3866
3867         tg3_writephy(tp, 0x11, 0x0a50);
3868         udelay(40);
3869         tg3_writephy(tp, 0x11, 0x0a10);
3870
3871         /* Wait for signal to stabilize */
3872         /* XXX schedule_timeout() ... */
3873         for (i = 0; i < 15000; i++)
3874                 udelay(10);
3875
3876         /* Deselect the channel register so we can read the PHYID
3877          * later.
3878          */
3879         tg3_writephy(tp, 0x10, 0x8011);
3880 }
3881
3882 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3883 {
3884         u16 flowctrl;
3885         u32 sg_dig_ctrl, sg_dig_status;
3886         u32 serdes_cfg, expected_sg_dig_ctrl;
3887         int workaround, port_a;
3888         int current_link_up;
3889
3890         serdes_cfg = 0;
3891         expected_sg_dig_ctrl = 0;
3892         workaround = 0;
3893         port_a = 1;
3894         current_link_up = 0;
3895
3896         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3897             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3898                 workaround = 1;
3899                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3900                         port_a = 0;
3901
3902                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3903                 /* preserve bits 20-23 for voltage regulator */
3904                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3905         }
3906
3907         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3908
3909         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3910                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3911                         if (workaround) {
3912                                 u32 val = serdes_cfg;
3913
3914                                 if (port_a)
3915                                         val |= 0xc010000;
3916                                 else
3917                                         val |= 0x4010000;
3918                                 tw32_f(MAC_SERDES_CFG, val);
3919                         }
3920
3921                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3922                 }
3923                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3924                         tg3_setup_flow_control(tp, 0, 0);
3925                         current_link_up = 1;
3926                 }
3927                 goto out;
3928         }
3929
3930         /* Want auto-negotiation.  */
3931         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3932
3933         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3934         if (flowctrl & ADVERTISE_1000XPAUSE)
3935                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3936         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3937                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3938
3939         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3940                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3941                     tp->serdes_counter &&
3942                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3943                                     MAC_STATUS_RCVD_CFG)) ==
3944                      MAC_STATUS_PCS_SYNCED)) {
3945                         tp->serdes_counter--;
3946                         current_link_up = 1;
3947                         goto out;
3948                 }
3949 restart_autoneg:
3950                 if (workaround)
3951                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3952                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3953                 udelay(5);
3954                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3955
3956                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3957                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3958         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3959                                  MAC_STATUS_SIGNAL_DET)) {
3960                 sg_dig_status = tr32(SG_DIG_STATUS);
3961                 mac_status = tr32(MAC_STATUS);
3962
3963                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3964                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3965                         u32 local_adv = 0, remote_adv = 0;
3966
3967                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3968                                 local_adv |= ADVERTISE_1000XPAUSE;
3969                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3970                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3971
3972                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3973                                 remote_adv |= LPA_1000XPAUSE;
3974                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3975                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3976
3977                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3978                         current_link_up = 1;
3979                         tp->serdes_counter = 0;
3980                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3981                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3982                         if (tp->serdes_counter)
3983                                 tp->serdes_counter--;
3984                         else {
3985                                 if (workaround) {
3986                                         u32 val = serdes_cfg;
3987
3988                                         if (port_a)
3989                                                 val |= 0xc010000;
3990                                         else
3991                                                 val |= 0x4010000;
3992
3993                                         tw32_f(MAC_SERDES_CFG, val);
3994                                 }
3995
3996                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3997                                 udelay(40);
3998
3999                                 /* Link parallel detection - link is up */
4000                                 /* only if we have PCS_SYNC and not */
4001                                 /* receiving config code words */
4002                                 mac_status = tr32(MAC_STATUS);
4003                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4004                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4005                                         tg3_setup_flow_control(tp, 0, 0);
4006                                         current_link_up = 1;
4007                                         tp->phy_flags |=
4008                                                 TG3_PHYFLG_PARALLEL_DETECT;
4009                                         tp->serdes_counter =
4010                                                 SERDES_PARALLEL_DET_TIMEOUT;
4011                                 } else
4012                                         goto restart_autoneg;
4013                         }
4014                 }
4015         } else {
4016                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4017                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4018         }
4019
4020 out:
4021         return current_link_up;
4022 }
4023
4024 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4025 {
4026         int current_link_up = 0;
4027
4028         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4029                 goto out;
4030
4031         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4032                 u32 txflags, rxflags;
4033                 int i;
4034
4035                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4036                         u32 local_adv = 0, remote_adv = 0;
4037
4038                         if (txflags & ANEG_CFG_PS1)
4039                                 local_adv |= ADVERTISE_1000XPAUSE;
4040                         if (txflags & ANEG_CFG_PS2)
4041                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4042
4043                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4044                                 remote_adv |= LPA_1000XPAUSE;
4045                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4046                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4047
4048                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4049
4050                         current_link_up = 1;
4051                 }
4052                 for (i = 0; i < 30; i++) {
4053                         udelay(20);
4054                         tw32_f(MAC_STATUS,
4055                                (MAC_STATUS_SYNC_CHANGED |
4056                                 MAC_STATUS_CFG_CHANGED));
4057                         udelay(40);
4058                         if ((tr32(MAC_STATUS) &
4059                              (MAC_STATUS_SYNC_CHANGED |
4060                               MAC_STATUS_CFG_CHANGED)) == 0)
4061                                 break;
4062                 }
4063
4064                 mac_status = tr32(MAC_STATUS);
4065                 if (current_link_up == 0 &&
4066                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4067                     !(mac_status & MAC_STATUS_RCVD_CFG))
4068                         current_link_up = 1;
4069         } else {
4070                 tg3_setup_flow_control(tp, 0, 0);
4071
4072                 /* Forcing 1000FD link up. */
4073                 current_link_up = 1;
4074
4075                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4076                 udelay(40);
4077
4078                 tw32_f(MAC_MODE, tp->mac_mode);
4079                 udelay(40);
4080         }
4081
4082 out:
4083         return current_link_up;
4084 }
4085
4086 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4087 {
4088         u32 orig_pause_cfg;
4089         u16 orig_active_speed;
4090         u8 orig_active_duplex;
4091         u32 mac_status;
4092         int current_link_up;
4093         int i;
4094
4095         orig_pause_cfg = tp->link_config.active_flowctrl;
4096         orig_active_speed = tp->link_config.active_speed;
4097         orig_active_duplex = tp->link_config.active_duplex;
4098
4099         if (!tg3_flag(tp, HW_AUTONEG) &&
4100             netif_carrier_ok(tp->dev) &&
4101             tg3_flag(tp, INIT_COMPLETE)) {
4102                 mac_status = tr32(MAC_STATUS);
4103                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4104                                MAC_STATUS_SIGNAL_DET |
4105                                MAC_STATUS_CFG_CHANGED |
4106                                MAC_STATUS_RCVD_CFG);
4107                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4108                                    MAC_STATUS_SIGNAL_DET)) {
4109                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4110                                             MAC_STATUS_CFG_CHANGED));
4111                         return 0;
4112                 }
4113         }
4114
4115         tw32_f(MAC_TX_AUTO_NEG, 0);
4116
4117         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4118         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4119         tw32_f(MAC_MODE, tp->mac_mode);
4120         udelay(40);
4121
4122         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4123                 tg3_init_bcm8002(tp);
4124
4125         /* Enable link change event even when serdes polling.  */
4126         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4127         udelay(40);
4128
4129         current_link_up = 0;
4130         mac_status = tr32(MAC_STATUS);
4131
4132         if (tg3_flag(tp, HW_AUTONEG))
4133                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4134         else
4135                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4136
4137         tp->napi[0].hw_status->status =
4138                 (SD_STATUS_UPDATED |
4139                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4140
4141         for (i = 0; i < 100; i++) {
4142                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4143                                     MAC_STATUS_CFG_CHANGED));
4144                 udelay(5);
4145                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4146                                          MAC_STATUS_CFG_CHANGED |
4147                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4148                         break;
4149         }
4150
4151         mac_status = tr32(MAC_STATUS);
4152         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4153                 current_link_up = 0;
4154                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4155                     tp->serdes_counter == 0) {
4156                         tw32_f(MAC_MODE, (tp->mac_mode |
4157                                           MAC_MODE_SEND_CONFIGS));
4158                         udelay(1);
4159                         tw32_f(MAC_MODE, tp->mac_mode);
4160                 }
4161         }
4162
4163         if (current_link_up == 1) {
4164                 tp->link_config.active_speed = SPEED_1000;
4165                 tp->link_config.active_duplex = DUPLEX_FULL;
4166                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4167                                     LED_CTRL_LNKLED_OVERRIDE |
4168                                     LED_CTRL_1000MBPS_ON));
4169         } else {
4170                 tp->link_config.active_speed = SPEED_INVALID;
4171                 tp->link_config.active_duplex = DUPLEX_INVALID;
4172                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4173                                     LED_CTRL_LNKLED_OVERRIDE |
4174                                     LED_CTRL_TRAFFIC_OVERRIDE));
4175         }
4176
4177         if (current_link_up != netif_carrier_ok(tp->dev)) {
4178                 if (current_link_up)
4179                         netif_carrier_on(tp->dev);
4180                 else
4181                         netif_carrier_off(tp->dev);
4182                 tg3_link_report(tp);
4183         } else {
4184                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4185                 if (orig_pause_cfg != now_pause_cfg ||
4186                     orig_active_speed != tp->link_config.active_speed ||
4187                     orig_active_duplex != tp->link_config.active_duplex)
4188                         tg3_link_report(tp);
4189         }
4190
4191         return 0;
4192 }
4193
4194 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4195 {
4196         int current_link_up, err = 0;
4197         u32 bmsr, bmcr;
4198         u16 current_speed;
4199         u8 current_duplex;
4200         u32 local_adv, remote_adv;
4201
4202         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4203         tw32_f(MAC_MODE, tp->mac_mode);
4204         udelay(40);
4205
4206         tw32(MAC_EVENT, 0);
4207
4208         tw32_f(MAC_STATUS,
4209              (MAC_STATUS_SYNC_CHANGED |
4210               MAC_STATUS_CFG_CHANGED |
4211               MAC_STATUS_MI_COMPLETION |
4212               MAC_STATUS_LNKSTATE_CHANGED));
4213         udelay(40);
4214
4215         if (force_reset)
4216                 tg3_phy_reset(tp);
4217
4218         current_link_up = 0;
4219         current_speed = SPEED_INVALID;
4220         current_duplex = DUPLEX_INVALID;
4221
4222         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4223         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4224         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4225                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4226                         bmsr |= BMSR_LSTATUS;
4227                 else
4228                         bmsr &= ~BMSR_LSTATUS;
4229         }
4230
4231         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4232
4233         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4234             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4235                 /* do nothing, just check for link up at the end */
4236         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4237                 u32 adv, new_adv;
4238
4239                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4240                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4241                                   ADVERTISE_1000XPAUSE |
4242                                   ADVERTISE_1000XPSE_ASYM |
4243                                   ADVERTISE_SLCT);
4244
4245                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4246
4247                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4248                         new_adv |= ADVERTISE_1000XHALF;
4249                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4250                         new_adv |= ADVERTISE_1000XFULL;
4251
4252                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4253                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4254                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4255                         tg3_writephy(tp, MII_BMCR, bmcr);
4256
4257                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4258                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4259                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4260
4261                         return err;
4262                 }
4263         } else {
4264                 u32 new_bmcr;
4265
4266                 bmcr &= ~BMCR_SPEED1000;
4267                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4268
4269                 if (tp->link_config.duplex == DUPLEX_FULL)
4270                         new_bmcr |= BMCR_FULLDPLX;
4271
4272                 if (new_bmcr != bmcr) {
4273                         /* BMCR_SPEED1000 is a reserved bit that needs
4274                          * to be set on write.
4275                          */
4276                         new_bmcr |= BMCR_SPEED1000;
4277
4278                         /* Force a linkdown */
4279                         if (netif_carrier_ok(tp->dev)) {
4280                                 u32 adv;
4281
4282                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4283                                 adv &= ~(ADVERTISE_1000XFULL |
4284                                          ADVERTISE_1000XHALF |
4285                                          ADVERTISE_SLCT);
4286                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4287                                 tg3_writephy(tp, MII_BMCR, bmcr |
4288                                                            BMCR_ANRESTART |
4289                                                            BMCR_ANENABLE);
4290                                 udelay(10);
4291                                 netif_carrier_off(tp->dev);
4292                         }
4293                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4294                         bmcr = new_bmcr;
4295                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4296                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4297                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4298                             ASIC_REV_5714) {
4299                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4300                                         bmsr |= BMSR_LSTATUS;
4301                                 else
4302                                         bmsr &= ~BMSR_LSTATUS;
4303                         }
4304                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4305                 }
4306         }
4307
4308         if (bmsr & BMSR_LSTATUS) {
4309                 current_speed = SPEED_1000;
4310                 current_link_up = 1;
4311                 if (bmcr & BMCR_FULLDPLX)
4312                         current_duplex = DUPLEX_FULL;
4313                 else
4314                         current_duplex = DUPLEX_HALF;
4315
4316                 local_adv = 0;
4317                 remote_adv = 0;
4318
4319                 if (bmcr & BMCR_ANENABLE) {
4320                         u32 common;
4321
4322                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4323                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4324                         common = local_adv & remote_adv;
4325                         if (common & (ADVERTISE_1000XHALF |
4326                                       ADVERTISE_1000XFULL)) {
4327                                 if (common & ADVERTISE_1000XFULL)
4328                                         current_duplex = DUPLEX_FULL;
4329                                 else
4330                                         current_duplex = DUPLEX_HALF;
4331                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4332                                 /* Link is up via parallel detect */
4333                         } else {
4334                                 current_link_up = 0;
4335                         }
4336                 }
4337         }
4338
4339         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4340                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4341
4342         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4343         if (tp->link_config.active_duplex == DUPLEX_HALF)
4344                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4345
4346         tw32_f(MAC_MODE, tp->mac_mode);
4347         udelay(40);
4348
4349         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4350
4351         tp->link_config.active_speed = current_speed;
4352         tp->link_config.active_duplex = current_duplex;
4353
4354         if (current_link_up != netif_carrier_ok(tp->dev)) {
4355                 if (current_link_up)
4356                         netif_carrier_on(tp->dev);
4357                 else {
4358                         netif_carrier_off(tp->dev);
4359                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4360                 }
4361                 tg3_link_report(tp);
4362         }
4363         return err;
4364 }
4365
4366 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4367 {
4368         if (tp->serdes_counter) {
4369                 /* Give autoneg time to complete. */
4370                 tp->serdes_counter--;
4371                 return;
4372         }
4373
4374         if (!netif_carrier_ok(tp->dev) &&
4375             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4376                 u32 bmcr;
4377
4378                 tg3_readphy(tp, MII_BMCR, &bmcr);
4379                 if (bmcr & BMCR_ANENABLE) {
4380                         u32 phy1, phy2;
4381
4382                         /* Select shadow register 0x1f */
4383                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4384                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4385
4386                         /* Select expansion interrupt status register */
4387                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4388                                          MII_TG3_DSP_EXP1_INT_STAT);
4389                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4390                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4391
4392                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4393                                 /* We have signal detect and not receiving
4394                                  * config code words, link is up by parallel
4395                                  * detection.
4396                                  */
4397
4398                                 bmcr &= ~BMCR_ANENABLE;
4399                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4400                                 tg3_writephy(tp, MII_BMCR, bmcr);
4401                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4402                         }
4403                 }
4404         } else if (netif_carrier_ok(tp->dev) &&
4405                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4406                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4407                 u32 phy2;
4408
4409                 /* Select expansion interrupt status register */
4410                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4411                                  MII_TG3_DSP_EXP1_INT_STAT);
4412                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4413                 if (phy2 & 0x20) {
4414                         u32 bmcr;
4415
4416                         /* Config code words received, turn on autoneg. */
4417                         tg3_readphy(tp, MII_BMCR, &bmcr);
4418                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4419
4420                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4421
4422                 }
4423         }
4424 }
4425
4426 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4427 {
4428         u32 val;
4429         int err;
4430
4431         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4432                 err = tg3_setup_fiber_phy(tp, force_reset);
4433         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4434                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4435         else
4436                 err = tg3_setup_copper_phy(tp, force_reset);
4437
4438         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4439                 u32 scale;
4440
4441                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4442                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4443                         scale = 65;
4444                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4445                         scale = 6;
4446                 else
4447                         scale = 12;
4448
4449                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4450                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4451                 tw32(GRC_MISC_CFG, val);
4452         }
4453
4454         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4455               (6 << TX_LENGTHS_IPG_SHIFT);
4456         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4457                 val |= tr32(MAC_TX_LENGTHS) &
4458                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4459                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4460
4461         if (tp->link_config.active_speed == SPEED_1000 &&
4462             tp->link_config.active_duplex == DUPLEX_HALF)
4463                 tw32(MAC_TX_LENGTHS, val |
4464                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4465         else
4466                 tw32(MAC_TX_LENGTHS, val |
4467                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4468
4469         if (!tg3_flag(tp, 5705_PLUS)) {
4470                 if (netif_carrier_ok(tp->dev)) {
4471                         tw32(HOSTCC_STAT_COAL_TICKS,
4472                              tp->coal.stats_block_coalesce_usecs);
4473                 } else {
4474                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4475                 }
4476         }
4477
4478         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4479                 val = tr32(PCIE_PWR_MGMT_THRESH);
4480                 if (!netif_carrier_ok(tp->dev))
4481                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4482                               tp->pwrmgmt_thresh;
4483                 else
4484                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4485                 tw32(PCIE_PWR_MGMT_THRESH, val);
4486         }
4487
4488         return err;
4489 }
4490
4491 static inline int tg3_irq_sync(struct tg3 *tp)
4492 {
4493         return tp->irq_sync;
4494 }
4495
4496 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4497 {
4498         int i;
4499
4500         dst = (u32 *)((u8 *)dst + off);
4501         for (i = 0; i < len; i += sizeof(u32))
4502                 *dst++ = tr32(off + i);
4503 }
4504
4505 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4506 {
4507         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4508         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4509         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4510         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4511         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4512         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4513         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4514         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4515         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4516         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4517         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4518         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4519         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4520         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4521         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4522         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4523         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4524         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4525         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4526
4527         if (tg3_flag(tp, SUPPORT_MSIX))
4528                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4529
4530         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4531         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4532         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4533         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4534         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4535         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4536         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4537         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4538
4539         if (!tg3_flag(tp, 5705_PLUS)) {
4540                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4541                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4542                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4543         }
4544
4545         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4546         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4547         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4548         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4549         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4550
4551         if (tg3_flag(tp, NVRAM))
4552                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4553 }
4554
4555 static void tg3_dump_state(struct tg3 *tp)
4556 {
4557         int i;
4558         u32 *regs;
4559
4560         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4561         if (!regs) {
4562                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4563                 return;
4564         }
4565
4566         if (tg3_flag(tp, PCI_EXPRESS)) {
4567                 /* Read up to but not including private PCI registers */
4568                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4569                         regs[i / sizeof(u32)] = tr32(i);
4570         } else
4571                 tg3_dump_legacy_regs(tp, regs);
4572
4573         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4574                 if (!regs[i + 0] && !regs[i + 1] &&
4575                     !regs[i + 2] && !regs[i + 3])
4576                         continue;
4577
4578                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4579                            i * 4,
4580                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4581         }
4582
4583         kfree(regs);
4584
4585         for (i = 0; i < tp->irq_cnt; i++) {
4586                 struct tg3_napi *tnapi = &tp->napi[i];
4587
4588                 /* SW status block */
4589                 netdev_err(tp->dev,
4590                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4591                            i,
4592                            tnapi->hw_status->status,
4593                            tnapi->hw_status->status_tag,
4594                            tnapi->hw_status->rx_jumbo_consumer,
4595                            tnapi->hw_status->rx_consumer,
4596                            tnapi->hw_status->rx_mini_consumer,
4597                            tnapi->hw_status->idx[0].rx_producer,
4598                            tnapi->hw_status->idx[0].tx_consumer);
4599
4600                 netdev_err(tp->dev,
4601                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4602                            i,
4603                            tnapi->last_tag, tnapi->last_irq_tag,
4604                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4605                            tnapi->rx_rcb_ptr,
4606                            tnapi->prodring.rx_std_prod_idx,
4607                            tnapi->prodring.rx_std_cons_idx,
4608                            tnapi->prodring.rx_jmb_prod_idx,
4609                            tnapi->prodring.rx_jmb_cons_idx);
4610         }
4611 }
4612
4613 /* This is called whenever we suspect that the system chipset is re-
4614  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4615  * is bogus tx completions. We try to recover by setting the
4616  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4617  * in the workqueue.
4618  */
4619 static void tg3_tx_recover(struct tg3 *tp)
4620 {
4621         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4622                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4623
4624         netdev_warn(tp->dev,
4625                     "The system may be re-ordering memory-mapped I/O "
4626                     "cycles to the network device, attempting to recover. "
4627                     "Please report the problem to the driver maintainer "
4628                     "and include system chipset information.\n");
4629
4630         spin_lock(&tp->lock);
4631         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4632         spin_unlock(&tp->lock);
4633 }
4634
4635 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4636 {
4637         /* Tell compiler to fetch tx indices from memory. */
4638         barrier();
4639         return tnapi->tx_pending -
4640                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4641 }
4642
4643 /* Tigon3 never reports partial packet sends.  So we do not
4644  * need special logic to handle SKBs that have not had all
4645  * of their frags sent yet, like SunGEM does.
4646  */
4647 static void tg3_tx(struct tg3_napi *tnapi)
4648 {
4649         struct tg3 *tp = tnapi->tp;
4650         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4651         u32 sw_idx = tnapi->tx_cons;
4652         struct netdev_queue *txq;
4653         int index = tnapi - tp->napi;
4654
4655         if (tg3_flag(tp, ENABLE_TSS))
4656                 index--;
4657
4658         txq = netdev_get_tx_queue(tp->dev, index);
4659
4660         while (sw_idx != hw_idx) {
4661                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4662                 struct sk_buff *skb = ri->skb;
4663                 int i, tx_bug = 0;
4664
4665                 if (unlikely(skb == NULL)) {
4666                         tg3_tx_recover(tp);
4667                         return;
4668                 }
4669
4670                 pci_unmap_single(tp->pdev,
4671                                  dma_unmap_addr(ri, mapping),
4672                                  skb_headlen(skb),
4673                                  PCI_DMA_TODEVICE);
4674
4675                 ri->skb = NULL;
4676
4677                 sw_idx = NEXT_TX(sw_idx);
4678
4679                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4680                         ri = &tnapi->tx_buffers[sw_idx];
4681                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4682                                 tx_bug = 1;
4683
4684                         pci_unmap_page(tp->pdev,
4685                                        dma_unmap_addr(ri, mapping),
4686                                        skb_shinfo(skb)->frags[i].size,
4687                                        PCI_DMA_TODEVICE);
4688                         sw_idx = NEXT_TX(sw_idx);
4689                 }
4690
4691                 dev_kfree_skb(skb);
4692
4693                 if (unlikely(tx_bug)) {
4694                         tg3_tx_recover(tp);
4695                         return;
4696                 }
4697         }
4698
4699         tnapi->tx_cons = sw_idx;
4700
4701         /* Need to make the tx_cons update visible to tg3_start_xmit()
4702          * before checking for netif_queue_stopped().  Without the
4703          * memory barrier, there is a small possibility that tg3_start_xmit()
4704          * will miss it and cause the queue to be stopped forever.
4705          */
4706         smp_mb();
4707
4708         if (unlikely(netif_tx_queue_stopped(txq) &&
4709                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4710                 __netif_tx_lock(txq, smp_processor_id());
4711                 if (netif_tx_queue_stopped(txq) &&
4712                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4713                         netif_tx_wake_queue(txq);
4714                 __netif_tx_unlock(txq);
4715         }
4716 }
4717
4718 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4719 {
4720         if (!ri->skb)
4721                 return;
4722
4723         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4724                          map_sz, PCI_DMA_FROMDEVICE);
4725         dev_kfree_skb_any(ri->skb);
4726         ri->skb = NULL;
4727 }
4728
4729 /* Returns size of skb allocated or < 0 on error.
4730  *
4731  * We only need to fill in the address because the other members
4732  * of the RX descriptor are invariant, see tg3_init_rings.
4733  *
4734  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4735  * posting buffers we only dirty the first cache line of the RX
4736  * descriptor (containing the address).  Whereas for the RX status
4737  * buffers the cpu only reads the last cacheline of the RX descriptor
4738  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4739  */
4740 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4741                             u32 opaque_key, u32 dest_idx_unmasked)
4742 {
4743         struct tg3_rx_buffer_desc *desc;
4744         struct ring_info *map;
4745         struct sk_buff *skb;
4746         dma_addr_t mapping;
4747         int skb_size, dest_idx;
4748
4749         switch (opaque_key) {
4750         case RXD_OPAQUE_RING_STD:
4751                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4752                 desc = &tpr->rx_std[dest_idx];
4753                 map = &tpr->rx_std_buffers[dest_idx];
4754                 skb_size = tp->rx_pkt_map_sz;
4755                 break;
4756
4757         case RXD_OPAQUE_RING_JUMBO:
4758                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4759                 desc = &tpr->rx_jmb[dest_idx].std;
4760                 map = &tpr->rx_jmb_buffers[dest_idx];
4761                 skb_size = TG3_RX_JMB_MAP_SZ;
4762                 break;
4763
4764         default:
4765                 return -EINVAL;
4766         }
4767
4768         /* Do not overwrite any of the map or rp information
4769          * until we are sure we can commit to a new buffer.
4770          *
4771          * Callers depend upon this behavior and assume that
4772          * we leave everything unchanged if we fail.
4773          */
4774         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4775         if (skb == NULL)
4776                 return -ENOMEM;
4777
4778         skb_reserve(skb, tp->rx_offset);
4779
4780         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4781                                  PCI_DMA_FROMDEVICE);
4782         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4783                 dev_kfree_skb(skb);
4784                 return -EIO;
4785         }
4786
4787         map->skb = skb;
4788         dma_unmap_addr_set(map, mapping, mapping);
4789
4790         desc->addr_hi = ((u64)mapping >> 32);
4791         desc->addr_lo = ((u64)mapping & 0xffffffff);
4792
4793         return skb_size;
4794 }
4795
4796 /* We only need to move over in the address because the other
4797  * members of the RX descriptor are invariant.  See notes above
4798  * tg3_alloc_rx_skb for full details.
4799  */
4800 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4801                            struct tg3_rx_prodring_set *dpr,
4802                            u32 opaque_key, int src_idx,
4803                            u32 dest_idx_unmasked)
4804 {
4805         struct tg3 *tp = tnapi->tp;
4806         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4807         struct ring_info *src_map, *dest_map;
4808         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4809         int dest_idx;
4810
4811         switch (opaque_key) {
4812         case RXD_OPAQUE_RING_STD:
4813                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4814                 dest_desc = &dpr->rx_std[dest_idx];
4815                 dest_map = &dpr->rx_std_buffers[dest_idx];
4816                 src_desc = &spr->rx_std[src_idx];
4817                 src_map = &spr->rx_std_buffers[src_idx];
4818                 break;
4819
4820         case RXD_OPAQUE_RING_JUMBO:
4821                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4822                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4823                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4824                 src_desc = &spr->rx_jmb[src_idx].std;
4825                 src_map = &spr->rx_jmb_buffers[src_idx];
4826                 break;
4827
4828         default:
4829                 return;
4830         }
4831
4832         dest_map->skb = src_map->skb;
4833         dma_unmap_addr_set(dest_map, mapping,
4834                            dma_unmap_addr(src_map, mapping));
4835         dest_desc->addr_hi = src_desc->addr_hi;
4836         dest_desc->addr_lo = src_desc->addr_lo;
4837
4838         /* Ensure that the update to the skb happens after the physical
4839          * addresses have been transferred to the new BD location.
4840          */
4841         smp_wmb();
4842
4843         src_map->skb = NULL;
4844 }
4845
4846 /* The RX ring scheme is composed of multiple rings which post fresh
4847  * buffers to the chip, and one special ring the chip uses to report
4848  * status back to the host.
4849  *
4850  * The special ring reports the status of received packets to the
4851  * host.  The chip does not write into the original descriptor the
4852  * RX buffer was obtained from.  The chip simply takes the original
4853  * descriptor as provided by the host, updates the status and length
4854  * field, then writes this into the next status ring entry.
4855  *
4856  * Each ring the host uses to post buffers to the chip is described
4857  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4858  * it is first placed into the on-chip ram.  When the packet's length
4859  * is known, it walks down the TG3_BDINFO entries to select the ring.
4860  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4861  * which is within the range of the new packet's length is chosen.
4862  *
4863  * The "separate ring for rx status" scheme may sound queer, but it makes
4864  * sense from a cache coherency perspective.  If only the host writes
4865  * to the buffer post rings, and only the chip writes to the rx status
4866  * rings, then cache lines never move beyond shared-modified state.
4867  * If both the host and chip were to write into the same ring, cache line
4868  * eviction could occur since both entities want it in an exclusive state.
4869  */
4870 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4871 {
4872         struct tg3 *tp = tnapi->tp;
4873         u32 work_mask, rx_std_posted = 0;
4874         u32 std_prod_idx, jmb_prod_idx;
4875         u32 sw_idx = tnapi->rx_rcb_ptr;
4876         u16 hw_idx;
4877         int received;
4878         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4879
4880         hw_idx = *(tnapi->rx_rcb_prod_idx);
4881         /*
4882          * We need to order the read of hw_idx and the read of
4883          * the opaque cookie.
4884          */
4885         rmb();
4886         work_mask = 0;
4887         received = 0;
4888         std_prod_idx = tpr->rx_std_prod_idx;
4889         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4890         while (sw_idx != hw_idx && budget > 0) {
4891                 struct ring_info *ri;
4892                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4893                 unsigned int len;
4894                 struct sk_buff *skb;
4895                 dma_addr_t dma_addr;
4896                 u32 opaque_key, desc_idx, *post_ptr;
4897
4898                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4899                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4900                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4901                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4902                         dma_addr = dma_unmap_addr(ri, mapping);
4903                         skb = ri->skb;
4904                         post_ptr = &std_prod_idx;
4905                         rx_std_posted++;
4906                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4907                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4908                         dma_addr = dma_unmap_addr(ri, mapping);
4909                         skb = ri->skb;
4910                         post_ptr = &jmb_prod_idx;
4911                 } else
4912                         goto next_pkt_nopost;
4913
4914                 work_mask |= opaque_key;
4915
4916                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4917                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4918                 drop_it:
4919                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4920                                        desc_idx, *post_ptr);
4921                 drop_it_no_recycle:
4922                         /* Other statistics kept track of by card. */
4923                         tp->rx_dropped++;
4924                         goto next_pkt;
4925                 }
4926
4927                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4928                       ETH_FCS_LEN;
4929
4930                 if (len > TG3_RX_COPY_THRESH(tp)) {
4931                         int skb_size;
4932
4933                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4934                                                     *post_ptr);
4935                         if (skb_size < 0)
4936                                 goto drop_it;
4937
4938                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4939                                          PCI_DMA_FROMDEVICE);
4940
4941                         /* Ensure that the update to the skb happens
4942                          * after the usage of the old DMA mapping.
4943                          */
4944                         smp_wmb();
4945
4946                         ri->skb = NULL;
4947
4948                         skb_put(skb, len);
4949                 } else {
4950                         struct sk_buff *copy_skb;
4951
4952                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4953                                        desc_idx, *post_ptr);
4954
4955                         copy_skb = netdev_alloc_skb(tp->dev, len +
4956                                                     TG3_RAW_IP_ALIGN);
4957                         if (copy_skb == NULL)
4958                                 goto drop_it_no_recycle;
4959
4960                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4961                         skb_put(copy_skb, len);
4962                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4963                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4964                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4965
4966                         /* We'll reuse the original ring buffer. */
4967                         skb = copy_skb;
4968                 }
4969
4970                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4971                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4972                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4973                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4974                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4975                 else
4976                         skb_checksum_none_assert(skb);
4977
4978                 skb->protocol = eth_type_trans(skb, tp->dev);
4979
4980                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4981                     skb->protocol != htons(ETH_P_8021Q)) {
4982                         dev_kfree_skb(skb);
4983                         goto drop_it_no_recycle;
4984                 }
4985
4986                 if (desc->type_flags & RXD_FLAG_VLAN &&
4987                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4988                         __vlan_hwaccel_put_tag(skb,
4989                                                desc->err_vlan & RXD_VLAN_MASK);
4990
4991                 napi_gro_receive(&tnapi->napi, skb);
4992
4993                 received++;
4994                 budget--;
4995
4996 next_pkt:
4997                 (*post_ptr)++;
4998
4999                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5000                         tpr->rx_std_prod_idx = std_prod_idx &
5001                                                tp->rx_std_ring_mask;
5002                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5003                                      tpr->rx_std_prod_idx);
5004                         work_mask &= ~RXD_OPAQUE_RING_STD;
5005                         rx_std_posted = 0;
5006                 }
5007 next_pkt_nopost:
5008                 sw_idx++;
5009                 sw_idx &= tp->rx_ret_ring_mask;
5010
5011                 /* Refresh hw_idx to see if there is new work */
5012                 if (sw_idx == hw_idx) {
5013                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5014                         rmb();
5015                 }
5016         }
5017
5018         /* ACK the status ring. */
5019         tnapi->rx_rcb_ptr = sw_idx;
5020         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5021
5022         /* Refill RX ring(s). */
5023         if (!tg3_flag(tp, ENABLE_RSS)) {
5024                 if (work_mask & RXD_OPAQUE_RING_STD) {
5025                         tpr->rx_std_prod_idx = std_prod_idx &
5026                                                tp->rx_std_ring_mask;
5027                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5028                                      tpr->rx_std_prod_idx);
5029                 }
5030                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5031                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5032                                                tp->rx_jmb_ring_mask;
5033                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5034                                      tpr->rx_jmb_prod_idx);
5035                 }
5036                 mmiowb();
5037         } else if (work_mask) {
5038                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5039                  * updated before the producer indices can be updated.
5040                  */
5041                 smp_wmb();
5042
5043                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5044                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5045
5046                 if (tnapi != &tp->napi[1])
5047                         napi_schedule(&tp->napi[1].napi);
5048         }
5049
5050         return received;
5051 }
5052
5053 static void tg3_poll_link(struct tg3 *tp)
5054 {
5055         /* handle link change and other phy events */
5056         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5057                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5058
5059                 if (sblk->status & SD_STATUS_LINK_CHG) {
5060                         sblk->status = SD_STATUS_UPDATED |
5061                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5062                         spin_lock(&tp->lock);
5063                         if (tg3_flag(tp, USE_PHYLIB)) {
5064                                 tw32_f(MAC_STATUS,
5065                                      (MAC_STATUS_SYNC_CHANGED |
5066                                       MAC_STATUS_CFG_CHANGED |
5067                                       MAC_STATUS_MI_COMPLETION |
5068                                       MAC_STATUS_LNKSTATE_CHANGED));
5069                                 udelay(40);
5070                         } else
5071                                 tg3_setup_phy(tp, 0);
5072                         spin_unlock(&tp->lock);
5073                 }
5074         }
5075 }
5076
5077 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5078                                 struct tg3_rx_prodring_set *dpr,
5079                                 struct tg3_rx_prodring_set *spr)
5080 {
5081         u32 si, di, cpycnt, src_prod_idx;
5082         int i, err = 0;
5083
5084         while (1) {
5085                 src_prod_idx = spr->rx_std_prod_idx;
5086
5087                 /* Make sure updates to the rx_std_buffers[] entries and the
5088                  * standard producer index are seen in the correct order.
5089                  */
5090                 smp_rmb();
5091
5092                 if (spr->rx_std_cons_idx == src_prod_idx)
5093                         break;
5094
5095                 if (spr->rx_std_cons_idx < src_prod_idx)
5096                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5097                 else
5098                         cpycnt = tp->rx_std_ring_mask + 1 -
5099                                  spr->rx_std_cons_idx;
5100
5101                 cpycnt = min(cpycnt,
5102                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5103
5104                 si = spr->rx_std_cons_idx;
5105                 di = dpr->rx_std_prod_idx;
5106
5107                 for (i = di; i < di + cpycnt; i++) {
5108                         if (dpr->rx_std_buffers[i].skb) {
5109                                 cpycnt = i - di;
5110                                 err = -ENOSPC;
5111                                 break;
5112                         }
5113                 }
5114
5115                 if (!cpycnt)
5116                         break;
5117
5118                 /* Ensure that updates to the rx_std_buffers ring and the
5119                  * shadowed hardware producer ring from tg3_recycle_skb() are
5120                  * ordered correctly WRT the skb check above.
5121                  */
5122                 smp_rmb();
5123
5124                 memcpy(&dpr->rx_std_buffers[di],
5125                        &spr->rx_std_buffers[si],
5126                        cpycnt * sizeof(struct ring_info));
5127
5128                 for (i = 0; i < cpycnt; i++, di++, si++) {
5129                         struct tg3_rx_buffer_desc *sbd, *dbd;
5130                         sbd = &spr->rx_std[si];
5131                         dbd = &dpr->rx_std[di];
5132                         dbd->addr_hi = sbd->addr_hi;
5133                         dbd->addr_lo = sbd->addr_lo;
5134                 }
5135
5136                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5137                                        tp->rx_std_ring_mask;
5138                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5139                                        tp->rx_std_ring_mask;
5140         }
5141
5142         while (1) {
5143                 src_prod_idx = spr->rx_jmb_prod_idx;
5144
5145                 /* Make sure updates to the rx_jmb_buffers[] entries and
5146                  * the jumbo producer index are seen in the correct order.
5147                  */
5148                 smp_rmb();
5149
5150                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5151                         break;
5152
5153                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5154                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5155                 else
5156                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5157                                  spr->rx_jmb_cons_idx;
5158
5159                 cpycnt = min(cpycnt,
5160                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5161
5162                 si = spr->rx_jmb_cons_idx;
5163                 di = dpr->rx_jmb_prod_idx;
5164
5165                 for (i = di; i < di + cpycnt; i++) {
5166                         if (dpr->rx_jmb_buffers[i].skb) {
5167                                 cpycnt = i - di;
5168                                 err = -ENOSPC;
5169                                 break;
5170                         }
5171                 }
5172
5173                 if (!cpycnt)
5174                         break;
5175
5176                 /* Ensure that updates to the rx_jmb_buffers ring and the
5177                  * shadowed hardware producer ring from tg3_recycle_skb() are
5178                  * ordered correctly WRT the skb check above.
5179                  */
5180                 smp_rmb();
5181
5182                 memcpy(&dpr->rx_jmb_buffers[di],
5183                        &spr->rx_jmb_buffers[si],
5184                        cpycnt * sizeof(struct ring_info));
5185
5186                 for (i = 0; i < cpycnt; i++, di++, si++) {
5187                         struct tg3_rx_buffer_desc *sbd, *dbd;
5188                         sbd = &spr->rx_jmb[si].std;
5189                         dbd = &dpr->rx_jmb[di].std;
5190                         dbd->addr_hi = sbd->addr_hi;
5191                         dbd->addr_lo = sbd->addr_lo;
5192                 }
5193
5194                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5195                                        tp->rx_jmb_ring_mask;
5196                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5197                                        tp->rx_jmb_ring_mask;
5198         }
5199
5200         return err;
5201 }
5202
5203 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5204 {
5205         struct tg3 *tp = tnapi->tp;
5206
5207         /* run TX completion thread */
5208         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5209                 tg3_tx(tnapi);
5210                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5211                         return work_done;
5212         }
5213
5214         /* run RX thread, within the bounds set by NAPI.
5215          * All RX "locking" is done by ensuring outside
5216          * code synchronizes with tg3->napi.poll()
5217          */
5218         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5219                 work_done += tg3_rx(tnapi, budget - work_done);
5220
5221         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5222                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5223                 int i, err = 0;
5224                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5225                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5226
5227                 for (i = 1; i < tp->irq_cnt; i++)
5228                         err |= tg3_rx_prodring_xfer(tp, dpr,
5229                                                     &tp->napi[i].prodring);
5230
5231                 wmb();
5232
5233                 if (std_prod_idx != dpr->rx_std_prod_idx)
5234                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5235                                      dpr->rx_std_prod_idx);
5236
5237                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5238                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5239                                      dpr->rx_jmb_prod_idx);
5240
5241                 mmiowb();
5242
5243                 if (err)
5244                         tw32_f(HOSTCC_MODE, tp->coal_now);
5245         }
5246
5247         return work_done;
5248 }
5249
5250 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5251 {
5252         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5253         struct tg3 *tp = tnapi->tp;
5254         int work_done = 0;
5255         struct tg3_hw_status *sblk = tnapi->hw_status;
5256
5257         while (1) {
5258                 work_done = tg3_poll_work(tnapi, work_done, budget);
5259
5260                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5261                         goto tx_recovery;
5262
5263                 if (unlikely(work_done >= budget))
5264                         break;
5265
5266                 /* tp->last_tag is used in tg3_int_reenable() below
5267                  * to tell the hw how much work has been processed,
5268                  * so we must read it before checking for more work.
5269                  */
5270                 tnapi->last_tag = sblk->status_tag;
5271                 tnapi->last_irq_tag = tnapi->last_tag;
5272                 rmb();
5273
5274                 /* check for RX/TX work to do */
5275                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5276                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5277                         napi_complete(napi);
5278                         /* Reenable interrupts. */
5279                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5280                         mmiowb();
5281                         break;
5282                 }
5283         }
5284
5285         return work_done;
5286
5287 tx_recovery:
5288         /* work_done is guaranteed to be less than budget. */
5289         napi_complete(napi);
5290         schedule_work(&tp->reset_task);
5291         return work_done;
5292 }
5293
5294 static void tg3_process_error(struct tg3 *tp)
5295 {
5296         u32 val;
5297         bool real_error = false;
5298
5299         if (tg3_flag(tp, ERROR_PROCESSED))
5300                 return;
5301
5302         /* Check Flow Attention register */
5303         val = tr32(HOSTCC_FLOW_ATTN);
5304         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5305                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5306                 real_error = true;
5307         }
5308
5309         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5310                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5311                 real_error = true;
5312         }
5313
5314         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5315                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5316                 real_error = true;
5317         }
5318
5319         if (!real_error)
5320                 return;
5321
5322         tg3_dump_state(tp);
5323
5324         tg3_flag_set(tp, ERROR_PROCESSED);
5325         schedule_work(&tp->reset_task);
5326 }
5327
5328 static int tg3_poll(struct napi_struct *napi, int budget)
5329 {
5330         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5331         struct tg3 *tp = tnapi->tp;
5332         int work_done = 0;
5333         struct tg3_hw_status *sblk = tnapi->hw_status;
5334
5335         while (1) {
5336                 if (sblk->status & SD_STATUS_ERROR)
5337                         tg3_process_error(tp);
5338
5339                 tg3_poll_link(tp);
5340
5341                 work_done = tg3_poll_work(tnapi, work_done, budget);
5342
5343                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5344                         goto tx_recovery;
5345
5346                 if (unlikely(work_done >= budget))
5347                         break;
5348
5349                 if (tg3_flag(tp, TAGGED_STATUS)) {
5350                         /* tp->last_tag is used in tg3_int_reenable() below
5351                          * to tell the hw how much work has been processed,
5352                          * so we must read it before checking for more work.
5353                          */
5354                         tnapi->last_tag = sblk->status_tag;
5355                         tnapi->last_irq_tag = tnapi->last_tag;
5356                         rmb();
5357                 } else
5358                         sblk->status &= ~SD_STATUS_UPDATED;
5359
5360                 if (likely(!tg3_has_work(tnapi))) {
5361                         napi_complete(napi);
5362                         tg3_int_reenable(tnapi);
5363                         break;
5364                 }
5365         }
5366
5367         return work_done;
5368
5369 tx_recovery:
5370         /* work_done is guaranteed to be less than budget. */
5371         napi_complete(napi);
5372         schedule_work(&tp->reset_task);
5373         return work_done;
5374 }
5375
5376 static void tg3_napi_disable(struct tg3 *tp)
5377 {
5378         int i;
5379
5380         for (i = tp->irq_cnt - 1; i >= 0; i--)
5381                 napi_disable(&tp->napi[i].napi);
5382 }
5383
5384 static void tg3_napi_enable(struct tg3 *tp)
5385 {
5386         int i;
5387
5388         for (i = 0; i < tp->irq_cnt; i++)
5389                 napi_enable(&tp->napi[i].napi);
5390 }
5391
5392 static void tg3_napi_init(struct tg3 *tp)
5393 {
5394         int i;
5395
5396         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5397         for (i = 1; i < tp->irq_cnt; i++)
5398                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5399 }
5400
5401 static void tg3_napi_fini(struct tg3 *tp)
5402 {
5403         int i;
5404
5405         for (i = 0; i < tp->irq_cnt; i++)
5406                 netif_napi_del(&tp->napi[i].napi);
5407 }
5408
5409 static inline void tg3_netif_stop(struct tg3 *tp)
5410 {
5411         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5412         tg3_napi_disable(tp);
5413         netif_tx_disable(tp->dev);
5414 }
5415
5416 static inline void tg3_netif_start(struct tg3 *tp)
5417 {
5418         /* NOTE: unconditional netif_tx_wake_all_queues is only
5419          * appropriate so long as all callers are assured to
5420          * have free tx slots (such as after tg3_init_hw)
5421          */
5422         netif_tx_wake_all_queues(tp->dev);
5423
5424         tg3_napi_enable(tp);
5425         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5426         tg3_enable_ints(tp);
5427 }
5428
5429 static void tg3_irq_quiesce(struct tg3 *tp)
5430 {
5431         int i;
5432
5433         BUG_ON(tp->irq_sync);
5434
5435         tp->irq_sync = 1;
5436         smp_mb();
5437
5438         for (i = 0; i < tp->irq_cnt; i++)
5439                 synchronize_irq(tp->napi[i].irq_vec);
5440 }
5441
5442 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5443  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5444  * with as well.  Most of the time, this is not necessary except when
5445  * shutting down the device.
5446  */
5447 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5448 {
5449         spin_lock_bh(&tp->lock);
5450         if (irq_sync)
5451                 tg3_irq_quiesce(tp);
5452 }
5453
5454 static inline void tg3_full_unlock(struct tg3 *tp)
5455 {
5456         spin_unlock_bh(&tp->lock);
5457 }
5458
5459 /* One-shot MSI handler - Chip automatically disables interrupt
5460  * after sending MSI so driver doesn't have to do it.
5461  */
5462 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5463 {
5464         struct tg3_napi *tnapi = dev_id;
5465         struct tg3 *tp = tnapi->tp;
5466
5467         prefetch(tnapi->hw_status);
5468         if (tnapi->rx_rcb)
5469                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5470
5471         if (likely(!tg3_irq_sync(tp)))
5472                 napi_schedule(&tnapi->napi);
5473
5474         return IRQ_HANDLED;
5475 }
5476
5477 /* MSI ISR - No need to check for interrupt sharing and no need to
5478  * flush status block and interrupt mailbox. PCI ordering rules
5479  * guarantee that MSI will arrive after the status block.
5480  */
5481 static irqreturn_t tg3_msi(int irq, void *dev_id)
5482 {
5483         struct tg3_napi *tnapi = dev_id;
5484         struct tg3 *tp = tnapi->tp;
5485
5486         prefetch(tnapi->hw_status);
5487         if (tnapi->rx_rcb)
5488                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5489         /*
5490          * Writing any value to intr-mbox-0 clears PCI INTA# and
5491          * chip-internal interrupt pending events.
5492          * Writing non-zero to intr-mbox-0 additional tells the
5493          * NIC to stop sending us irqs, engaging "in-intr-handler"
5494          * event coalescing.
5495          */
5496         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5497         if (likely(!tg3_irq_sync(tp)))
5498                 napi_schedule(&tnapi->napi);
5499
5500         return IRQ_RETVAL(1);
5501 }
5502
5503 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5504 {
5505         struct tg3_napi *tnapi = dev_id;
5506         struct tg3 *tp = tnapi->tp;
5507         struct tg3_hw_status *sblk = tnapi->hw_status;
5508         unsigned int handled = 1;
5509
5510         /* In INTx mode, it is possible for the interrupt to arrive at
5511          * the CPU before the status block posted prior to the interrupt.
5512          * Reading the PCI State register will confirm whether the
5513          * interrupt is ours and will flush the status block.
5514          */
5515         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5516                 if (tg3_flag(tp, CHIP_RESETTING) ||
5517                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5518                         handled = 0;
5519                         goto out;
5520                 }
5521         }
5522
5523         /*
5524          * Writing any value to intr-mbox-0 clears PCI INTA# and
5525          * chip-internal interrupt pending events.
5526          * Writing non-zero to intr-mbox-0 additional tells the
5527          * NIC to stop sending us irqs, engaging "in-intr-handler"
5528          * event coalescing.
5529          *
5530          * Flush the mailbox to de-assert the IRQ immediately to prevent
5531          * spurious interrupts.  The flush impacts performance but
5532          * excessive spurious interrupts can be worse in some cases.
5533          */
5534         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5535         if (tg3_irq_sync(tp))
5536                 goto out;
5537         sblk->status &= ~SD_STATUS_UPDATED;
5538         if (likely(tg3_has_work(tnapi))) {
5539                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5540                 napi_schedule(&tnapi->napi);
5541         } else {
5542                 /* No work, shared interrupt perhaps?  re-enable
5543                  * interrupts, and flush that PCI write
5544                  */
5545                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5546                                0x00000000);
5547         }
5548 out:
5549         return IRQ_RETVAL(handled);
5550 }
5551
5552 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5553 {
5554         struct tg3_napi *tnapi = dev_id;
5555         struct tg3 *tp = tnapi->tp;
5556         struct tg3_hw_status *sblk = tnapi->hw_status;
5557         unsigned int handled = 1;
5558
5559         /* In INTx mode, it is possible for the interrupt to arrive at
5560          * the CPU before the status block posted prior to the interrupt.
5561          * Reading the PCI State register will confirm whether the
5562          * interrupt is ours and will flush the status block.
5563          */
5564         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5565                 if (tg3_flag(tp, CHIP_RESETTING) ||
5566                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5567                         handled = 0;
5568                         goto out;
5569                 }
5570         }
5571
5572         /*
5573          * writing any value to intr-mbox-0 clears PCI INTA# and
5574          * chip-internal interrupt pending events.
5575          * writing non-zero to intr-mbox-0 additional tells the
5576          * NIC to stop sending us irqs, engaging "in-intr-handler"
5577          * event coalescing.
5578          *
5579          * Flush the mailbox to de-assert the IRQ immediately to prevent
5580          * spurious interrupts.  The flush impacts performance but
5581          * excessive spurious interrupts can be worse in some cases.
5582          */
5583         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5584
5585         /*
5586          * In a shared interrupt configuration, sometimes other devices'
5587          * interrupts will scream.  We record the current status tag here
5588          * so that the above check can report that the screaming interrupts
5589          * are unhandled.  Eventually they will be silenced.
5590          */
5591         tnapi->last_irq_tag = sblk->status_tag;
5592
5593         if (tg3_irq_sync(tp))
5594                 goto out;
5595
5596         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5597
5598         napi_schedule(&tnapi->napi);
5599
5600 out:
5601         return IRQ_RETVAL(handled);
5602 }
5603
5604 /* ISR for interrupt test */
5605 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5606 {
5607         struct tg3_napi *tnapi = dev_id;
5608         struct tg3 *tp = tnapi->tp;
5609         struct tg3_hw_status *sblk = tnapi->hw_status;
5610
5611         if ((sblk->status & SD_STATUS_UPDATED) ||
5612             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5613                 tg3_disable_ints(tp);
5614                 return IRQ_RETVAL(1);
5615         }
5616         return IRQ_RETVAL(0);
5617 }
5618
5619 static int tg3_init_hw(struct tg3 *, int);
5620 static int tg3_halt(struct tg3 *, int, int);
5621
5622 /* Restart hardware after configuration changes, self-test, etc.
5623  * Invoked with tp->lock held.
5624  */
5625 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5626         __releases(tp->lock)
5627         __acquires(tp->lock)
5628 {
5629         int err;
5630
5631         err = tg3_init_hw(tp, reset_phy);
5632         if (err) {
5633                 netdev_err(tp->dev,
5634                            "Failed to re-initialize device, aborting\n");
5635                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5636                 tg3_full_unlock(tp);
5637                 del_timer_sync(&tp->timer);
5638                 tp->irq_sync = 0;
5639                 tg3_napi_enable(tp);
5640                 dev_close(tp->dev);
5641                 tg3_full_lock(tp, 0);
5642         }
5643         return err;
5644 }
5645
5646 #ifdef CONFIG_NET_POLL_CONTROLLER
5647 static void tg3_poll_controller(struct net_device *dev)
5648 {
5649         int i;
5650         struct tg3 *tp = netdev_priv(dev);
5651
5652         for (i = 0; i < tp->irq_cnt; i++)
5653                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5654 }
5655 #endif
5656
5657 static void tg3_reset_task(struct work_struct *work)
5658 {
5659         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5660         int err;
5661         unsigned int restart_timer;
5662
5663         tg3_full_lock(tp, 0);
5664
5665         if (!netif_running(tp->dev)) {
5666                 tg3_full_unlock(tp);
5667                 return;
5668         }
5669
5670         tg3_full_unlock(tp);
5671
5672         tg3_phy_stop(tp);
5673
5674         tg3_netif_stop(tp);
5675
5676         tg3_full_lock(tp, 1);
5677
5678         restart_timer = tg3_flag(tp, RESTART_TIMER);
5679         tg3_flag_clear(tp, RESTART_TIMER);
5680
5681         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5682                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5683                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5684                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5685                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5686         }
5687
5688         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5689         err = tg3_init_hw(tp, 1);
5690         if (err)
5691                 goto out;
5692
5693         tg3_netif_start(tp);
5694
5695         if (restart_timer)
5696                 mod_timer(&tp->timer, jiffies + 1);
5697
5698 out:
5699         tg3_full_unlock(tp);
5700
5701         if (!err)
5702                 tg3_phy_start(tp);
5703 }
5704
5705 static void tg3_tx_timeout(struct net_device *dev)
5706 {
5707         struct tg3 *tp = netdev_priv(dev);
5708
5709         if (netif_msg_tx_err(tp)) {
5710                 netdev_err(dev, "transmit timed out, resetting\n");
5711                 tg3_dump_state(tp);
5712         }
5713
5714         schedule_work(&tp->reset_task);
5715 }
5716
5717 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5718 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5719 {
5720         u32 base = (u32) mapping & 0xffffffff;
5721
5722         return (base > 0xffffdcc0) && (base + len + 8 < base);
5723 }
5724
5725 /* Test for DMA addresses > 40-bit */
5726 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5727                                           int len)
5728 {
5729 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5730         if (tg3_flag(tp, 40BIT_DMA_BUG))
5731                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5732         return 0;
5733 #else
5734         return 0;
5735 #endif
5736 }
5737
5738 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5739                         dma_addr_t mapping, int len, u32 flags,
5740                         u32 mss_and_is_end)
5741 {
5742         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5743         int is_end = (mss_and_is_end & 0x1);
5744         u32 mss = (mss_and_is_end >> 1);
5745         u32 vlan_tag = 0;
5746
5747         if (is_end)
5748                 flags |= TXD_FLAG_END;
5749         if (flags & TXD_FLAG_VLAN) {
5750                 vlan_tag = flags >> 16;
5751                 flags &= 0xffff;
5752         }
5753         vlan_tag |= (mss << TXD_MSS_SHIFT);
5754
5755         txd->addr_hi = ((u64) mapping >> 32);
5756         txd->addr_lo = ((u64) mapping & 0xffffffff);
5757         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5758         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5759 }
5760
5761 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5762                                 struct sk_buff *skb, int last)
5763 {
5764         int i;
5765         u32 entry = tnapi->tx_prod;
5766         struct ring_info *txb = &tnapi->tx_buffers[entry];
5767
5768         pci_unmap_single(tnapi->tp->pdev,
5769                          dma_unmap_addr(txb, mapping),
5770                          skb_headlen(skb),
5771                          PCI_DMA_TODEVICE);
5772         for (i = 0; i <= last; i++) {
5773                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5774
5775                 entry = NEXT_TX(entry);
5776                 txb = &tnapi->tx_buffers[entry];
5777
5778                 pci_unmap_page(tnapi->tp->pdev,
5779                                dma_unmap_addr(txb, mapping),
5780                                frag->size, PCI_DMA_TODEVICE);
5781         }
5782 }
5783
5784 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5785 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5786                                        struct sk_buff *skb,
5787                                        u32 base_flags, u32 mss)
5788 {
5789         struct tg3 *tp = tnapi->tp;
5790         struct sk_buff *new_skb;
5791         dma_addr_t new_addr = 0;
5792         u32 entry = tnapi->tx_prod;
5793         int ret = 0;
5794
5795         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5796                 new_skb = skb_copy(skb, GFP_ATOMIC);
5797         else {
5798                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5799
5800                 new_skb = skb_copy_expand(skb,
5801                                           skb_headroom(skb) + more_headroom,
5802                                           skb_tailroom(skb), GFP_ATOMIC);
5803         }
5804
5805         if (!new_skb) {
5806                 ret = -1;
5807         } else {
5808                 /* New SKB is guaranteed to be linear. */
5809                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5810                                           PCI_DMA_TODEVICE);
5811                 /* Make sure the mapping succeeded */
5812                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5813                         ret = -1;
5814                         dev_kfree_skb(new_skb);
5815
5816                 /* Make sure new skb does not cross any 4G boundaries.
5817                  * Drop the packet if it does.
5818                  */
5819                 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5820                            tg3_4g_overflow_test(new_addr, new_skb->len)) {
5821                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5822                                          PCI_DMA_TODEVICE);
5823                         ret = -1;
5824                         dev_kfree_skb(new_skb);
5825                 } else {
5826                         tnapi->tx_buffers[entry].skb = new_skb;
5827                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5828                                            mapping, new_addr);
5829
5830                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5831                                     base_flags, 1 | (mss << 1));
5832                 }
5833         }
5834
5835         dev_kfree_skb(skb);
5836
5837         return ret;
5838 }
5839
5840 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5841
5842 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5843  * TSO header is greater than 80 bytes.
5844  */
5845 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5846 {
5847         struct sk_buff *segs, *nskb;
5848         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5849
5850         /* Estimate the number of fragments in the worst case */
5851         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5852                 netif_stop_queue(tp->dev);
5853
5854                 /* netif_tx_stop_queue() must be done before checking
5855                  * checking tx index in tg3_tx_avail() below, because in
5856                  * tg3_tx(), we update tx index before checking for
5857                  * netif_tx_queue_stopped().
5858                  */
5859                 smp_mb();
5860                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5861                         return NETDEV_TX_BUSY;
5862
5863                 netif_wake_queue(tp->dev);
5864         }
5865
5866         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5867         if (IS_ERR(segs))
5868                 goto tg3_tso_bug_end;
5869
5870         do {
5871                 nskb = segs;
5872                 segs = segs->next;
5873                 nskb->next = NULL;
5874                 tg3_start_xmit(nskb, tp->dev);
5875         } while (segs);
5876
5877 tg3_tso_bug_end:
5878         dev_kfree_skb(skb);
5879
5880         return NETDEV_TX_OK;
5881 }
5882
5883 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5884  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5885  */
5886 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5887 {
5888         struct tg3 *tp = netdev_priv(dev);
5889         u32 len, entry, base_flags, mss;
5890         int i = -1, would_hit_hwbug;
5891         dma_addr_t mapping;
5892         struct tg3_napi *tnapi;
5893         struct netdev_queue *txq;
5894         unsigned int last;
5895
5896         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5897         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5898         if (tg3_flag(tp, ENABLE_TSS))
5899                 tnapi++;
5900
5901         /* We are running in BH disabled context with netif_tx_lock
5902          * and TX reclaim runs via tp->napi.poll inside of a software
5903          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5904          * no IRQ context deadlocks to worry about either.  Rejoice!
5905          */
5906         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5907                 if (!netif_tx_queue_stopped(txq)) {
5908                         netif_tx_stop_queue(txq);
5909
5910                         /* This is a hard error, log it. */
5911                         netdev_err(dev,
5912                                    "BUG! Tx Ring full when queue awake!\n");
5913                 }
5914                 return NETDEV_TX_BUSY;
5915         }
5916
5917         entry = tnapi->tx_prod;
5918         base_flags = 0;
5919         if (skb->ip_summed == CHECKSUM_PARTIAL)
5920                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5921
5922         mss = skb_shinfo(skb)->gso_size;
5923         if (mss) {
5924                 struct iphdr *iph;
5925                 u32 tcp_opt_len, hdr_len;
5926
5927                 if (skb_header_cloned(skb) &&
5928                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5929                         dev_kfree_skb(skb);
5930                         goto out_unlock;
5931                 }
5932
5933                 iph = ip_hdr(skb);
5934                 tcp_opt_len = tcp_optlen(skb);
5935
5936                 if (skb_is_gso_v6(skb)) {
5937                         hdr_len = skb_headlen(skb) - ETH_HLEN;
5938                 } else {
5939                         u32 ip_tcp_len;
5940
5941                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5942                         hdr_len = ip_tcp_len + tcp_opt_len;
5943
5944                         iph->check = 0;
5945                         iph->tot_len = htons(mss + hdr_len);
5946                 }
5947
5948                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5949                     tg3_flag(tp, TSO_BUG))
5950                         return tg3_tso_bug(tp, skb);
5951
5952                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5953                                TXD_FLAG_CPU_POST_DMA);
5954
5955                 if (tg3_flag(tp, HW_TSO_1) ||
5956                     tg3_flag(tp, HW_TSO_2) ||
5957                     tg3_flag(tp, HW_TSO_3)) {
5958                         tcp_hdr(skb)->check = 0;
5959                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5960                 } else
5961                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5962                                                                  iph->daddr, 0,
5963                                                                  IPPROTO_TCP,
5964                                                                  0);
5965
5966                 if (tg3_flag(tp, HW_TSO_3)) {
5967                         mss |= (hdr_len & 0xc) << 12;
5968                         if (hdr_len & 0x10)
5969                                 base_flags |= 0x00000010;
5970                         base_flags |= (hdr_len & 0x3e0) << 5;
5971                 } else if (tg3_flag(tp, HW_TSO_2))
5972                         mss |= hdr_len << 9;
5973                 else if (tg3_flag(tp, HW_TSO_1) ||
5974                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5975                         if (tcp_opt_len || iph->ihl > 5) {
5976                                 int tsflags;
5977
5978                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5979                                 mss |= (tsflags << 11);
5980                         }
5981                 } else {
5982                         if (tcp_opt_len || iph->ihl > 5) {
5983                                 int tsflags;
5984
5985                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5986                                 base_flags |= tsflags << 12;
5987                         }
5988                 }
5989         }
5990
5991         if (vlan_tx_tag_present(skb))
5992                 base_flags |= (TXD_FLAG_VLAN |
5993                                (vlan_tx_tag_get(skb) << 16));
5994
5995         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
5996             !mss && skb->len > VLAN_ETH_FRAME_LEN)
5997                 base_flags |= TXD_FLAG_JMB_PKT;
5998
5999         len = skb_headlen(skb);
6000
6001         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6002         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6003                 dev_kfree_skb(skb);
6004                 goto out_unlock;
6005         }
6006
6007         tnapi->tx_buffers[entry].skb = skb;
6008         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6009
6010         would_hit_hwbug = 0;
6011
6012         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6013                 would_hit_hwbug = 1;
6014
6015         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6016             tg3_4g_overflow_test(mapping, len))
6017                 would_hit_hwbug = 1;
6018
6019         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6020             tg3_40bit_overflow_test(tp, mapping, len))
6021                 would_hit_hwbug = 1;
6022
6023         if (tg3_flag(tp, 5701_DMA_BUG))
6024                 would_hit_hwbug = 1;
6025
6026         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6027                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6028
6029         entry = NEXT_TX(entry);
6030
6031         /* Now loop through additional data fragments, and queue them. */
6032         if (skb_shinfo(skb)->nr_frags > 0) {
6033                 last = skb_shinfo(skb)->nr_frags - 1;
6034                 for (i = 0; i <= last; i++) {
6035                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6036
6037                         len = frag->size;
6038                         mapping = pci_map_page(tp->pdev,
6039                                                frag->page,
6040                                                frag->page_offset,
6041                                                len, PCI_DMA_TODEVICE);
6042
6043                         tnapi->tx_buffers[entry].skb = NULL;
6044                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6045                                            mapping);
6046                         if (pci_dma_mapping_error(tp->pdev, mapping))
6047                                 goto dma_error;
6048
6049                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6050                             len <= 8)
6051                                 would_hit_hwbug = 1;
6052
6053                         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6054                             tg3_4g_overflow_test(mapping, len))
6055                                 would_hit_hwbug = 1;
6056
6057                         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6058                             tg3_40bit_overflow_test(tp, mapping, len))
6059                                 would_hit_hwbug = 1;
6060
6061                         if (tg3_flag(tp, HW_TSO_1) ||
6062                             tg3_flag(tp, HW_TSO_2) ||
6063                             tg3_flag(tp, HW_TSO_3))
6064                                 tg3_set_txd(tnapi, entry, mapping, len,
6065                                             base_flags, (i == last)|(mss << 1));
6066                         else
6067                                 tg3_set_txd(tnapi, entry, mapping, len,
6068                                             base_flags, (i == last));
6069
6070                         entry = NEXT_TX(entry);
6071                 }
6072         }
6073
6074         if (would_hit_hwbug) {
6075                 tg3_skb_error_unmap(tnapi, skb, i);
6076
6077                 /* If the workaround fails due to memory/mapping
6078                  * failure, silently drop this packet.
6079                  */
6080                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6081                         goto out_unlock;
6082
6083                 entry = NEXT_TX(tnapi->tx_prod);
6084         }
6085
6086         /* Packets are ready, update Tx producer idx local and on card. */
6087         tw32_tx_mbox(tnapi->prodmbox, entry);
6088
6089         tnapi->tx_prod = entry;
6090         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6091                 netif_tx_stop_queue(txq);
6092
6093                 /* netif_tx_stop_queue() must be done before checking
6094                  * checking tx index in tg3_tx_avail() below, because in
6095                  * tg3_tx(), we update tx index before checking for
6096                  * netif_tx_queue_stopped().
6097                  */
6098                 smp_mb();
6099                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6100                         netif_tx_wake_queue(txq);
6101         }
6102
6103 out_unlock:
6104         mmiowb();
6105
6106         return NETDEV_TX_OK;
6107
6108 dma_error:
6109         tg3_skb_error_unmap(tnapi, skb, i);
6110         dev_kfree_skb(skb);
6111         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6112         return NETDEV_TX_OK;
6113 }
6114
6115 static void tg3_set_loopback(struct net_device *dev, u32 features)
6116 {
6117         struct tg3 *tp = netdev_priv(dev);
6118
6119         if (features & NETIF_F_LOOPBACK) {
6120                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6121                         return;
6122
6123                 /*
6124                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6125                  * loopback mode if Half-Duplex mode was negotiated earlier.
6126                  */
6127                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6128
6129                 /* Enable internal MAC loopback mode */
6130                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6131                 spin_lock_bh(&tp->lock);
6132                 tw32(MAC_MODE, tp->mac_mode);
6133                 netif_carrier_on(tp->dev);
6134                 spin_unlock_bh(&tp->lock);
6135                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6136         } else {
6137                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6138                         return;
6139
6140                 /* Disable internal MAC loopback mode */
6141                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6142                 spin_lock_bh(&tp->lock);
6143                 tw32(MAC_MODE, tp->mac_mode);
6144                 /* Force link status check */
6145                 tg3_setup_phy(tp, 1);
6146                 spin_unlock_bh(&tp->lock);
6147                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6148         }
6149 }
6150
6151 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6152 {
6153         struct tg3 *tp = netdev_priv(dev);
6154
6155         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6156                 features &= ~NETIF_F_ALL_TSO;
6157
6158         return features;
6159 }
6160
6161 static int tg3_set_features(struct net_device *dev, u32 features)
6162 {
6163         u32 changed = dev->features ^ features;
6164
6165         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6166                 tg3_set_loopback(dev, features);
6167
6168         return 0;
6169 }
6170
6171 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6172                                int new_mtu)
6173 {
6174         dev->mtu = new_mtu;
6175
6176         if (new_mtu > ETH_DATA_LEN) {
6177                 if (tg3_flag(tp, 5780_CLASS)) {
6178                         netdev_update_features(dev);
6179                         tg3_flag_clear(tp, TSO_CAPABLE);
6180                 } else {
6181                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6182                 }
6183         } else {
6184                 if (tg3_flag(tp, 5780_CLASS)) {
6185                         tg3_flag_set(tp, TSO_CAPABLE);
6186                         netdev_update_features(dev);
6187                 }
6188                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6189         }
6190 }
6191
6192 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6193 {
6194         struct tg3 *tp = netdev_priv(dev);
6195         int err;
6196
6197         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6198                 return -EINVAL;
6199
6200         if (!netif_running(dev)) {
6201                 /* We'll just catch it later when the
6202                  * device is up'd.
6203                  */
6204                 tg3_set_mtu(dev, tp, new_mtu);
6205                 return 0;
6206         }
6207
6208         tg3_phy_stop(tp);
6209
6210         tg3_netif_stop(tp);
6211
6212         tg3_full_lock(tp, 1);
6213
6214         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6215
6216         tg3_set_mtu(dev, tp, new_mtu);
6217
6218         err = tg3_restart_hw(tp, 0);
6219
6220         if (!err)
6221                 tg3_netif_start(tp);
6222
6223         tg3_full_unlock(tp);
6224
6225         if (!err)
6226                 tg3_phy_start(tp);
6227
6228         return err;
6229 }
6230
6231 static void tg3_rx_prodring_free(struct tg3 *tp,
6232                                  struct tg3_rx_prodring_set *tpr)
6233 {
6234         int i;
6235
6236         if (tpr != &tp->napi[0].prodring) {
6237                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6238                      i = (i + 1) & tp->rx_std_ring_mask)
6239                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6240                                         tp->rx_pkt_map_sz);
6241
6242                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6243                         for (i = tpr->rx_jmb_cons_idx;
6244                              i != tpr->rx_jmb_prod_idx;
6245                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6246                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6247                                                 TG3_RX_JMB_MAP_SZ);
6248                         }
6249                 }
6250
6251                 return;
6252         }
6253
6254         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6255                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6256                                 tp->rx_pkt_map_sz);
6257
6258         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6259                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6260                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6261                                         TG3_RX_JMB_MAP_SZ);
6262         }
6263 }
6264
6265 /* Initialize rx rings for packet processing.
6266  *
6267  * The chip has been shut down and the driver detached from
6268  * the networking, so no interrupts or new tx packets will
6269  * end up in the driver.  tp->{tx,}lock are held and thus
6270  * we may not sleep.
6271  */
6272 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6273                                  struct tg3_rx_prodring_set *tpr)
6274 {
6275         u32 i, rx_pkt_dma_sz;
6276
6277         tpr->rx_std_cons_idx = 0;
6278         tpr->rx_std_prod_idx = 0;
6279         tpr->rx_jmb_cons_idx = 0;
6280         tpr->rx_jmb_prod_idx = 0;
6281
6282         if (tpr != &tp->napi[0].prodring) {
6283                 memset(&tpr->rx_std_buffers[0], 0,
6284                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6285                 if (tpr->rx_jmb_buffers)
6286                         memset(&tpr->rx_jmb_buffers[0], 0,
6287                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6288                 goto done;
6289         }
6290
6291         /* Zero out all descriptors. */
6292         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6293
6294         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6295         if (tg3_flag(tp, 5780_CLASS) &&
6296             tp->dev->mtu > ETH_DATA_LEN)
6297                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6298         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6299
6300         /* Initialize invariants of the rings, we only set this
6301          * stuff once.  This works because the card does not
6302          * write into the rx buffer posting rings.
6303          */
6304         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6305                 struct tg3_rx_buffer_desc *rxd;
6306
6307                 rxd = &tpr->rx_std[i];
6308                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6309                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6310                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6311                                (i << RXD_OPAQUE_INDEX_SHIFT));
6312         }
6313
6314         /* Now allocate fresh SKBs for each rx ring. */
6315         for (i = 0; i < tp->rx_pending; i++) {
6316                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6317                         netdev_warn(tp->dev,
6318                                     "Using a smaller RX standard ring. Only "
6319                                     "%d out of %d buffers were allocated "
6320                                     "successfully\n", i, tp->rx_pending);
6321                         if (i == 0)
6322                                 goto initfail;
6323                         tp->rx_pending = i;
6324                         break;
6325                 }
6326         }
6327
6328         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6329                 goto done;
6330
6331         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6332
6333         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6334                 goto done;
6335
6336         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6337                 struct tg3_rx_buffer_desc *rxd;
6338
6339                 rxd = &tpr->rx_jmb[i].std;
6340                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6341                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6342                                   RXD_FLAG_JUMBO;
6343                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6344                        (i << RXD_OPAQUE_INDEX_SHIFT));
6345         }
6346
6347         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6348                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6349                         netdev_warn(tp->dev,
6350                                     "Using a smaller RX jumbo ring. Only %d "
6351                                     "out of %d buffers were allocated "
6352                                     "successfully\n", i, tp->rx_jumbo_pending);
6353                         if (i == 0)
6354                                 goto initfail;
6355                         tp->rx_jumbo_pending = i;
6356                         break;
6357                 }
6358         }
6359
6360 done:
6361         return 0;
6362
6363 initfail:
6364         tg3_rx_prodring_free(tp, tpr);
6365         return -ENOMEM;
6366 }
6367
6368 static void tg3_rx_prodring_fini(struct tg3 *tp,
6369                                  struct tg3_rx_prodring_set *tpr)
6370 {
6371         kfree(tpr->rx_std_buffers);
6372         tpr->rx_std_buffers = NULL;
6373         kfree(tpr->rx_jmb_buffers);
6374         tpr->rx_jmb_buffers = NULL;
6375         if (tpr->rx_std) {
6376                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6377                                   tpr->rx_std, tpr->rx_std_mapping);
6378                 tpr->rx_std = NULL;
6379         }
6380         if (tpr->rx_jmb) {
6381                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6382                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6383                 tpr->rx_jmb = NULL;
6384         }
6385 }
6386
6387 static int tg3_rx_prodring_init(struct tg3 *tp,
6388                                 struct tg3_rx_prodring_set *tpr)
6389 {
6390         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6391                                       GFP_KERNEL);
6392         if (!tpr->rx_std_buffers)
6393                 return -ENOMEM;
6394
6395         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6396                                          TG3_RX_STD_RING_BYTES(tp),
6397                                          &tpr->rx_std_mapping,
6398                                          GFP_KERNEL);
6399         if (!tpr->rx_std)
6400                 goto err_out;
6401
6402         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6403                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6404                                               GFP_KERNEL);
6405                 if (!tpr->rx_jmb_buffers)
6406                         goto err_out;
6407
6408                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6409                                                  TG3_RX_JMB_RING_BYTES(tp),
6410                                                  &tpr->rx_jmb_mapping,
6411                                                  GFP_KERNEL);
6412                 if (!tpr->rx_jmb)
6413                         goto err_out;
6414         }
6415
6416         return 0;
6417
6418 err_out:
6419         tg3_rx_prodring_fini(tp, tpr);
6420         return -ENOMEM;
6421 }
6422
6423 /* Free up pending packets in all rx/tx rings.
6424  *
6425  * The chip has been shut down and the driver detached from
6426  * the networking, so no interrupts or new tx packets will
6427  * end up in the driver.  tp->{tx,}lock is not held and we are not
6428  * in an interrupt context and thus may sleep.
6429  */
6430 static void tg3_free_rings(struct tg3 *tp)
6431 {
6432         int i, j;
6433
6434         for (j = 0; j < tp->irq_cnt; j++) {
6435                 struct tg3_napi *tnapi = &tp->napi[j];
6436
6437                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6438
6439                 if (!tnapi->tx_buffers)
6440                         continue;
6441
6442                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6443                         struct ring_info *txp;
6444                         struct sk_buff *skb;
6445                         unsigned int k;
6446
6447                         txp = &tnapi->tx_buffers[i];
6448                         skb = txp->skb;
6449
6450                         if (skb == NULL) {
6451                                 i++;
6452                                 continue;
6453                         }
6454
6455                         pci_unmap_single(tp->pdev,
6456                                          dma_unmap_addr(txp, mapping),
6457                                          skb_headlen(skb),
6458                                          PCI_DMA_TODEVICE);
6459                         txp->skb = NULL;
6460
6461                         i++;
6462
6463                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6464                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6465                                 pci_unmap_page(tp->pdev,
6466                                                dma_unmap_addr(txp, mapping),
6467                                                skb_shinfo(skb)->frags[k].size,
6468                                                PCI_DMA_TODEVICE);
6469                                 i++;
6470                         }
6471
6472                         dev_kfree_skb_any(skb);
6473                 }
6474         }
6475 }
6476
6477 /* Initialize tx/rx rings for packet processing.
6478  *
6479  * The chip has been shut down and the driver detached from
6480  * the networking, so no interrupts or new tx packets will
6481  * end up in the driver.  tp->{tx,}lock are held and thus
6482  * we may not sleep.
6483  */
6484 static int tg3_init_rings(struct tg3 *tp)
6485 {
6486         int i;
6487
6488         /* Free up all the SKBs. */
6489         tg3_free_rings(tp);
6490
6491         for (i = 0; i < tp->irq_cnt; i++) {
6492                 struct tg3_napi *tnapi = &tp->napi[i];
6493
6494                 tnapi->last_tag = 0;
6495                 tnapi->last_irq_tag = 0;
6496                 tnapi->hw_status->status = 0;
6497                 tnapi->hw_status->status_tag = 0;
6498                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6499
6500                 tnapi->tx_prod = 0;
6501                 tnapi->tx_cons = 0;
6502                 if (tnapi->tx_ring)
6503                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6504
6505                 tnapi->rx_rcb_ptr = 0;
6506                 if (tnapi->rx_rcb)
6507                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6508
6509                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6510                         tg3_free_rings(tp);
6511                         return -ENOMEM;
6512                 }
6513         }
6514
6515         return 0;
6516 }
6517
6518 /*
6519  * Must not be invoked with interrupt sources disabled and
6520  * the hardware shutdown down.
6521  */
6522 static void tg3_free_consistent(struct tg3 *tp)
6523 {
6524         int i;
6525
6526         for (i = 0; i < tp->irq_cnt; i++) {
6527                 struct tg3_napi *tnapi = &tp->napi[i];
6528
6529                 if (tnapi->tx_ring) {
6530                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6531                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6532                         tnapi->tx_ring = NULL;
6533                 }
6534
6535                 kfree(tnapi->tx_buffers);
6536                 tnapi->tx_buffers = NULL;
6537
6538                 if (tnapi->rx_rcb) {
6539                         dma_free_coherent(&tp->pdev->dev,
6540                                           TG3_RX_RCB_RING_BYTES(tp),
6541                                           tnapi->rx_rcb,
6542                                           tnapi->rx_rcb_mapping);
6543                         tnapi->rx_rcb = NULL;
6544                 }
6545
6546                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6547
6548                 if (tnapi->hw_status) {
6549                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6550                                           tnapi->hw_status,
6551                                           tnapi->status_mapping);
6552                         tnapi->hw_status = NULL;
6553                 }
6554         }
6555
6556         if (tp->hw_stats) {
6557                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6558                                   tp->hw_stats, tp->stats_mapping);
6559                 tp->hw_stats = NULL;
6560         }
6561 }
6562
6563 /*
6564  * Must not be invoked with interrupt sources disabled and
6565  * the hardware shutdown down.  Can sleep.
6566  */
6567 static int tg3_alloc_consistent(struct tg3 *tp)
6568 {
6569         int i;
6570
6571         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6572                                           sizeof(struct tg3_hw_stats),
6573                                           &tp->stats_mapping,
6574                                           GFP_KERNEL);
6575         if (!tp->hw_stats)
6576                 goto err_out;
6577
6578         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6579
6580         for (i = 0; i < tp->irq_cnt; i++) {
6581                 struct tg3_napi *tnapi = &tp->napi[i];
6582                 struct tg3_hw_status *sblk;
6583
6584                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6585                                                       TG3_HW_STATUS_SIZE,
6586                                                       &tnapi->status_mapping,
6587                                                       GFP_KERNEL);
6588                 if (!tnapi->hw_status)
6589                         goto err_out;
6590
6591                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6592                 sblk = tnapi->hw_status;
6593
6594                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6595                         goto err_out;
6596
6597                 /* If multivector TSS is enabled, vector 0 does not handle
6598                  * tx interrupts.  Don't allocate any resources for it.
6599                  */
6600                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6601                     (i && tg3_flag(tp, ENABLE_TSS))) {
6602                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6603                                                     TG3_TX_RING_SIZE,
6604                                                     GFP_KERNEL);
6605                         if (!tnapi->tx_buffers)
6606                                 goto err_out;
6607
6608                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6609                                                             TG3_TX_RING_BYTES,
6610                                                         &tnapi->tx_desc_mapping,
6611                                                             GFP_KERNEL);
6612                         if (!tnapi->tx_ring)
6613                                 goto err_out;
6614                 }
6615
6616                 /*
6617                  * When RSS is enabled, the status block format changes
6618                  * slightly.  The "rx_jumbo_consumer", "reserved",
6619                  * and "rx_mini_consumer" members get mapped to the
6620                  * other three rx return ring producer indexes.
6621                  */
6622                 switch (i) {
6623                 default:
6624                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6625                         break;
6626                 case 2:
6627                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6628                         break;
6629                 case 3:
6630                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6631                         break;
6632                 case 4:
6633                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6634                         break;
6635                 }
6636
6637                 /*
6638                  * If multivector RSS is enabled, vector 0 does not handle
6639                  * rx or tx interrupts.  Don't allocate any resources for it.
6640                  */
6641                 if (!i && tg3_flag(tp, ENABLE_RSS))
6642                         continue;
6643
6644                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6645                                                    TG3_RX_RCB_RING_BYTES(tp),
6646                                                    &tnapi->rx_rcb_mapping,
6647                                                    GFP_KERNEL);
6648                 if (!tnapi->rx_rcb)
6649                         goto err_out;
6650
6651                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6652         }
6653
6654         return 0;
6655
6656 err_out:
6657         tg3_free_consistent(tp);
6658         return -ENOMEM;
6659 }
6660
6661 #define MAX_WAIT_CNT 1000
6662
6663 /* To stop a block, clear the enable bit and poll till it
6664  * clears.  tp->lock is held.
6665  */
6666 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6667 {
6668         unsigned int i;
6669         u32 val;
6670
6671         if (tg3_flag(tp, 5705_PLUS)) {
6672                 switch (ofs) {
6673                 case RCVLSC_MODE:
6674                 case DMAC_MODE:
6675                 case MBFREE_MODE:
6676                 case BUFMGR_MODE:
6677                 case MEMARB_MODE:
6678                         /* We can't enable/disable these bits of the
6679                          * 5705/5750, just say success.
6680                          */
6681                         return 0;
6682
6683                 default:
6684                         break;
6685                 }
6686         }
6687
6688         val = tr32(ofs);
6689         val &= ~enable_bit;
6690         tw32_f(ofs, val);
6691
6692         for (i = 0; i < MAX_WAIT_CNT; i++) {
6693                 udelay(100);
6694                 val = tr32(ofs);
6695                 if ((val & enable_bit) == 0)
6696                         break;
6697         }
6698
6699         if (i == MAX_WAIT_CNT && !silent) {
6700                 dev_err(&tp->pdev->dev,
6701                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6702                         ofs, enable_bit);
6703                 return -ENODEV;
6704         }
6705
6706         return 0;
6707 }
6708
6709 /* tp->lock is held. */
6710 static int tg3_abort_hw(struct tg3 *tp, int silent)
6711 {
6712         int i, err;
6713
6714         tg3_disable_ints(tp);
6715
6716         tp->rx_mode &= ~RX_MODE_ENABLE;
6717         tw32_f(MAC_RX_MODE, tp->rx_mode);
6718         udelay(10);
6719
6720         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6721         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6722         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6723         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6724         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6725         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6726
6727         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6728         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6729         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6730         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6731         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6732         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6733         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6734
6735         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6736         tw32_f(MAC_MODE, tp->mac_mode);
6737         udelay(40);
6738
6739         tp->tx_mode &= ~TX_MODE_ENABLE;
6740         tw32_f(MAC_TX_MODE, tp->tx_mode);
6741
6742         for (i = 0; i < MAX_WAIT_CNT; i++) {
6743                 udelay(100);
6744                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6745                         break;
6746         }
6747         if (i >= MAX_WAIT_CNT) {
6748                 dev_err(&tp->pdev->dev,
6749                         "%s timed out, TX_MODE_ENABLE will not clear "
6750                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6751                 err |= -ENODEV;
6752         }
6753
6754         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6755         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6756         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6757
6758         tw32(FTQ_RESET, 0xffffffff);
6759         tw32(FTQ_RESET, 0x00000000);
6760
6761         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6762         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6763
6764         for (i = 0; i < tp->irq_cnt; i++) {
6765                 struct tg3_napi *tnapi = &tp->napi[i];
6766                 if (tnapi->hw_status)
6767                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6768         }
6769         if (tp->hw_stats)
6770                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6771
6772         return err;
6773 }
6774
6775 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6776 {
6777         int i;
6778         u32 apedata;
6779
6780         /* NCSI does not support APE events */
6781         if (tg3_flag(tp, APE_HAS_NCSI))
6782                 return;
6783
6784         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6785         if (apedata != APE_SEG_SIG_MAGIC)
6786                 return;
6787
6788         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6789         if (!(apedata & APE_FW_STATUS_READY))
6790                 return;
6791
6792         /* Wait for up to 1 millisecond for APE to service previous event. */
6793         for (i = 0; i < 10; i++) {
6794                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6795                         return;
6796
6797                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6798
6799                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6800                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6801                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6802
6803                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6804
6805                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6806                         break;
6807
6808                 udelay(100);
6809         }
6810
6811         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6812                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6813 }
6814
6815 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6816 {
6817         u32 event;
6818         u32 apedata;
6819
6820         if (!tg3_flag(tp, ENABLE_APE))
6821                 return;
6822
6823         switch (kind) {
6824         case RESET_KIND_INIT:
6825                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6826                                 APE_HOST_SEG_SIG_MAGIC);
6827                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6828                                 APE_HOST_SEG_LEN_MAGIC);
6829                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6830                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6831                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6832                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6833                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6834                                 APE_HOST_BEHAV_NO_PHYLOCK);
6835                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6836                                     TG3_APE_HOST_DRVR_STATE_START);
6837
6838                 event = APE_EVENT_STATUS_STATE_START;
6839                 break;
6840         case RESET_KIND_SHUTDOWN:
6841                 /* With the interface we are currently using,
6842                  * APE does not track driver state.  Wiping
6843                  * out the HOST SEGMENT SIGNATURE forces
6844                  * the APE to assume OS absent status.
6845                  */
6846                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6847
6848                 if (device_may_wakeup(&tp->pdev->dev) &&
6849                     tg3_flag(tp, WOL_ENABLE)) {
6850                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6851                                             TG3_APE_HOST_WOL_SPEED_AUTO);
6852                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6853                 } else
6854                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6855
6856                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6857
6858                 event = APE_EVENT_STATUS_STATE_UNLOAD;
6859                 break;
6860         case RESET_KIND_SUSPEND:
6861                 event = APE_EVENT_STATUS_STATE_SUSPEND;
6862                 break;
6863         default:
6864                 return;
6865         }
6866
6867         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6868
6869         tg3_ape_send_event(tp, event);
6870 }
6871
6872 /* tp->lock is held. */
6873 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6874 {
6875         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6876                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6877
6878         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6879                 switch (kind) {
6880                 case RESET_KIND_INIT:
6881                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6882                                       DRV_STATE_START);
6883                         break;
6884
6885                 case RESET_KIND_SHUTDOWN:
6886                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6887                                       DRV_STATE_UNLOAD);
6888                         break;
6889
6890                 case RESET_KIND_SUSPEND:
6891                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6892                                       DRV_STATE_SUSPEND);
6893                         break;
6894
6895                 default:
6896                         break;
6897                 }
6898         }
6899
6900         if (kind == RESET_KIND_INIT ||
6901             kind == RESET_KIND_SUSPEND)
6902                 tg3_ape_driver_state_change(tp, kind);
6903 }
6904
6905 /* tp->lock is held. */
6906 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6907 {
6908         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6909                 switch (kind) {
6910                 case RESET_KIND_INIT:
6911                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6912                                       DRV_STATE_START_DONE);
6913                         break;
6914
6915                 case RESET_KIND_SHUTDOWN:
6916                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6917                                       DRV_STATE_UNLOAD_DONE);
6918                         break;
6919
6920                 default:
6921                         break;
6922                 }
6923         }
6924
6925         if (kind == RESET_KIND_SHUTDOWN)
6926                 tg3_ape_driver_state_change(tp, kind);
6927 }
6928
6929 /* tp->lock is held. */
6930 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6931 {
6932         if (tg3_flag(tp, ENABLE_ASF)) {
6933                 switch (kind) {
6934                 case RESET_KIND_INIT:
6935                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6936                                       DRV_STATE_START);
6937                         break;
6938
6939                 case RESET_KIND_SHUTDOWN:
6940                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6941                                       DRV_STATE_UNLOAD);
6942                         break;
6943
6944                 case RESET_KIND_SUSPEND:
6945                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6946                                       DRV_STATE_SUSPEND);
6947                         break;
6948
6949                 default:
6950                         break;
6951                 }
6952         }
6953 }
6954
6955 static int tg3_poll_fw(struct tg3 *tp)
6956 {
6957         int i;
6958         u32 val;
6959
6960         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6961                 /* Wait up to 20ms for init done. */
6962                 for (i = 0; i < 200; i++) {
6963                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6964                                 return 0;
6965                         udelay(100);
6966                 }
6967                 return -ENODEV;
6968         }
6969
6970         /* Wait for firmware initialization to complete. */
6971         for (i = 0; i < 100000; i++) {
6972                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6973                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6974                         break;
6975                 udelay(10);
6976         }
6977
6978         /* Chip might not be fitted with firmware.  Some Sun onboard
6979          * parts are configured like that.  So don't signal the timeout
6980          * of the above loop as an error, but do report the lack of
6981          * running firmware once.
6982          */
6983         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6984                 tg3_flag_set(tp, NO_FWARE_REPORTED);
6985
6986                 netdev_info(tp->dev, "No firmware running\n");
6987         }
6988
6989         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6990                 /* The 57765 A0 needs a little more
6991                  * time to do some important work.
6992                  */
6993                 mdelay(10);
6994         }
6995
6996         return 0;
6997 }
6998
6999 /* Save PCI command register before chip reset */
7000 static void tg3_save_pci_state(struct tg3 *tp)
7001 {
7002         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7003 }
7004
7005 /* Restore PCI state after chip reset */
7006 static void tg3_restore_pci_state(struct tg3 *tp)
7007 {
7008         u32 val;
7009
7010         /* Re-enable indirect register accesses. */
7011         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7012                                tp->misc_host_ctrl);
7013
7014         /* Set MAX PCI retry to zero. */
7015         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7016         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7017             tg3_flag(tp, PCIX_MODE))
7018                 val |= PCISTATE_RETRY_SAME_DMA;
7019         /* Allow reads and writes to the APE register and memory space. */
7020         if (tg3_flag(tp, ENABLE_APE))
7021                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7022                        PCISTATE_ALLOW_APE_SHMEM_WR |
7023                        PCISTATE_ALLOW_APE_PSPACE_WR;
7024         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7025
7026         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7027
7028         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7029                 if (tg3_flag(tp, PCI_EXPRESS))
7030                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7031                 else {
7032                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7033                                               tp->pci_cacheline_sz);
7034                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7035                                               tp->pci_lat_timer);
7036                 }
7037         }
7038
7039         /* Make sure PCI-X relaxed ordering bit is clear. */
7040         if (tg3_flag(tp, PCIX_MODE)) {
7041                 u16 pcix_cmd;
7042
7043                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7044                                      &pcix_cmd);
7045                 pcix_cmd &= ~PCI_X_CMD_ERO;
7046                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7047                                       pcix_cmd);
7048         }
7049
7050         if (tg3_flag(tp, 5780_CLASS)) {
7051
7052                 /* Chip reset on 5780 will reset MSI enable bit,
7053                  * so need to restore it.
7054                  */
7055                 if (tg3_flag(tp, USING_MSI)) {
7056                         u16 ctrl;
7057
7058                         pci_read_config_word(tp->pdev,
7059                                              tp->msi_cap + PCI_MSI_FLAGS,
7060                                              &ctrl);
7061                         pci_write_config_word(tp->pdev,
7062                                               tp->msi_cap + PCI_MSI_FLAGS,
7063                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7064                         val = tr32(MSGINT_MODE);
7065                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7066                 }
7067         }
7068 }
7069
7070 static void tg3_stop_fw(struct tg3 *);
7071
7072 /* tp->lock is held. */
7073 static int tg3_chip_reset(struct tg3 *tp)
7074 {
7075         u32 val;
7076         void (*write_op)(struct tg3 *, u32, u32);
7077         int i, err;
7078
7079         tg3_nvram_lock(tp);
7080
7081         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7082
7083         /* No matching tg3_nvram_unlock() after this because
7084          * chip reset below will undo the nvram lock.
7085          */
7086         tp->nvram_lock_cnt = 0;
7087
7088         /* GRC_MISC_CFG core clock reset will clear the memory
7089          * enable bit in PCI register 4 and the MSI enable bit
7090          * on some chips, so we save relevant registers here.
7091          */
7092         tg3_save_pci_state(tp);
7093
7094         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7095             tg3_flag(tp, 5755_PLUS))
7096                 tw32(GRC_FASTBOOT_PC, 0);
7097
7098         /*
7099          * We must avoid the readl() that normally takes place.
7100          * It locks machines, causes machine checks, and other
7101          * fun things.  So, temporarily disable the 5701
7102          * hardware workaround, while we do the reset.
7103          */
7104         write_op = tp->write32;
7105         if (write_op == tg3_write_flush_reg32)
7106                 tp->write32 = tg3_write32;
7107
7108         /* Prevent the irq handler from reading or writing PCI registers
7109          * during chip reset when the memory enable bit in the PCI command
7110          * register may be cleared.  The chip does not generate interrupt
7111          * at this time, but the irq handler may still be called due to irq
7112          * sharing or irqpoll.
7113          */
7114         tg3_flag_set(tp, CHIP_RESETTING);
7115         for (i = 0; i < tp->irq_cnt; i++) {
7116                 struct tg3_napi *tnapi = &tp->napi[i];
7117                 if (tnapi->hw_status) {
7118                         tnapi->hw_status->status = 0;
7119                         tnapi->hw_status->status_tag = 0;
7120                 }
7121                 tnapi->last_tag = 0;
7122                 tnapi->last_irq_tag = 0;
7123         }
7124         smp_mb();
7125
7126         for (i = 0; i < tp->irq_cnt; i++)
7127                 synchronize_irq(tp->napi[i].irq_vec);
7128
7129         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7130                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7131                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7132         }
7133
7134         /* do the reset */
7135         val = GRC_MISC_CFG_CORECLK_RESET;
7136
7137         if (tg3_flag(tp, PCI_EXPRESS)) {
7138                 /* Force PCIe 1.0a mode */
7139                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7140                     !tg3_flag(tp, 57765_PLUS) &&
7141                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7142                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7143                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7144
7145                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7146                         tw32(GRC_MISC_CFG, (1 << 29));
7147                         val |= (1 << 29);
7148                 }
7149         }
7150
7151         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7152                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7153                 tw32(GRC_VCPU_EXT_CTRL,
7154                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7155         }
7156
7157         /* Manage gphy power for all CPMU absent PCIe devices. */
7158         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7159                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7160
7161         tw32(GRC_MISC_CFG, val);
7162
7163         /* restore 5701 hardware bug workaround write method */
7164         tp->write32 = write_op;
7165
7166         /* Unfortunately, we have to delay before the PCI read back.
7167          * Some 575X chips even will not respond to a PCI cfg access
7168          * when the reset command is given to the chip.
7169          *
7170          * How do these hardware designers expect things to work
7171          * properly if the PCI write is posted for a long period
7172          * of time?  It is always necessary to have some method by
7173          * which a register read back can occur to push the write
7174          * out which does the reset.
7175          *
7176          * For most tg3 variants the trick below was working.
7177          * Ho hum...
7178          */
7179         udelay(120);
7180
7181         /* Flush PCI posted writes.  The normal MMIO registers
7182          * are inaccessible at this time so this is the only
7183          * way to make this reliably (actually, this is no longer
7184          * the case, see above).  I tried to use indirect
7185          * register read/write but this upset some 5701 variants.
7186          */
7187         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7188
7189         udelay(120);
7190
7191         if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7192                 u16 val16;
7193
7194                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7195                         int i;
7196                         u32 cfg_val;
7197
7198                         /* Wait for link training to complete.  */
7199                         for (i = 0; i < 5000; i++)
7200                                 udelay(100);
7201
7202                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7203                         pci_write_config_dword(tp->pdev, 0xc4,
7204                                                cfg_val | (1 << 15));
7205                 }
7206
7207                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7208                 pci_read_config_word(tp->pdev,
7209                                      tp->pcie_cap + PCI_EXP_DEVCTL,
7210                                      &val16);
7211                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7212                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7213                 /*
7214                  * Older PCIe devices only support the 128 byte
7215                  * MPS setting.  Enforce the restriction.
7216                  */
7217                 if (!tg3_flag(tp, CPMU_PRESENT))
7218                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7219                 pci_write_config_word(tp->pdev,
7220                                       tp->pcie_cap + PCI_EXP_DEVCTL,
7221                                       val16);
7222
7223                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7224
7225                 /* Clear error status */
7226                 pci_write_config_word(tp->pdev,
7227                                       tp->pcie_cap + PCI_EXP_DEVSTA,
7228                                       PCI_EXP_DEVSTA_CED |
7229                                       PCI_EXP_DEVSTA_NFED |
7230                                       PCI_EXP_DEVSTA_FED |
7231                                       PCI_EXP_DEVSTA_URD);
7232         }
7233
7234         tg3_restore_pci_state(tp);
7235
7236         tg3_flag_clear(tp, CHIP_RESETTING);
7237         tg3_flag_clear(tp, ERROR_PROCESSED);
7238
7239         val = 0;
7240         if (tg3_flag(tp, 5780_CLASS))
7241                 val = tr32(MEMARB_MODE);
7242         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7243
7244         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7245                 tg3_stop_fw(tp);
7246                 tw32(0x5000, 0x400);
7247         }
7248
7249         tw32(GRC_MODE, tp->grc_mode);
7250
7251         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7252                 val = tr32(0xc4);
7253
7254                 tw32(0xc4, val | (1 << 15));
7255         }
7256
7257         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7258             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7259                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7260                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7261                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7262                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7263         }
7264
7265         if (tg3_flag(tp, ENABLE_APE))
7266                 tp->mac_mode = MAC_MODE_APE_TX_EN |
7267                                MAC_MODE_APE_RX_EN |
7268                                MAC_MODE_TDE_ENABLE;
7269
7270         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7271                 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7272                 val = tp->mac_mode;
7273         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7274                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7275                 val = tp->mac_mode;
7276         } else
7277                 val = 0;
7278
7279         tw32_f(MAC_MODE, val);
7280         udelay(40);
7281
7282         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7283
7284         err = tg3_poll_fw(tp);
7285         if (err)
7286                 return err;
7287
7288         tg3_mdio_start(tp);
7289
7290         if (tg3_flag(tp, PCI_EXPRESS) &&
7291             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7292             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7293             !tg3_flag(tp, 57765_PLUS)) {
7294                 val = tr32(0x7c00);
7295
7296                 tw32(0x7c00, val | (1 << 25));
7297         }
7298
7299         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7300                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7301                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7302         }
7303
7304         /* Reprobe ASF enable state.  */
7305         tg3_flag_clear(tp, ENABLE_ASF);
7306         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7307         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7308         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7309                 u32 nic_cfg;
7310
7311                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7312                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7313                         tg3_flag_set(tp, ENABLE_ASF);
7314                         tp->last_event_jiffies = jiffies;
7315                         if (tg3_flag(tp, 5750_PLUS))
7316                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7317                 }
7318         }
7319
7320         return 0;
7321 }
7322
7323 /* tp->lock is held. */
7324 static void tg3_stop_fw(struct tg3 *tp)
7325 {
7326         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7327                 /* Wait for RX cpu to ACK the previous event. */
7328                 tg3_wait_for_event_ack(tp);
7329
7330                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7331
7332                 tg3_generate_fw_event(tp);
7333
7334                 /* Wait for RX cpu to ACK this event. */
7335                 tg3_wait_for_event_ack(tp);
7336         }
7337 }
7338
7339 /* tp->lock is held. */
7340 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7341 {
7342         int err;
7343
7344         tg3_stop_fw(tp);
7345
7346         tg3_write_sig_pre_reset(tp, kind);
7347
7348         tg3_abort_hw(tp, silent);
7349         err = tg3_chip_reset(tp);
7350
7351         __tg3_set_mac_addr(tp, 0);
7352
7353         tg3_write_sig_legacy(tp, kind);
7354         tg3_write_sig_post_reset(tp, kind);
7355
7356         if (err)
7357                 return err;
7358
7359         return 0;
7360 }
7361
7362 #define RX_CPU_SCRATCH_BASE     0x30000
7363 #define RX_CPU_SCRATCH_SIZE     0x04000
7364 #define TX_CPU_SCRATCH_BASE     0x34000
7365 #define TX_CPU_SCRATCH_SIZE     0x04000
7366
7367 /* tp->lock is held. */
7368 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7369 {
7370         int i;
7371
7372         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7373
7374         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7375                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7376
7377                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7378                 return 0;
7379         }
7380         if (offset == RX_CPU_BASE) {
7381                 for (i = 0; i < 10000; i++) {
7382                         tw32(offset + CPU_STATE, 0xffffffff);
7383                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7384                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7385                                 break;
7386                 }
7387
7388                 tw32(offset + CPU_STATE, 0xffffffff);
7389                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7390                 udelay(10);
7391         } else {
7392                 for (i = 0; i < 10000; i++) {
7393                         tw32(offset + CPU_STATE, 0xffffffff);
7394                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7395                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7396                                 break;
7397                 }
7398         }
7399
7400         if (i >= 10000) {
7401                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7402                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7403                 return -ENODEV;
7404         }
7405
7406         /* Clear firmware's nvram arbitration. */
7407         if (tg3_flag(tp, NVRAM))
7408                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7409         return 0;
7410 }
7411
7412 struct fw_info {
7413         unsigned int fw_base;
7414         unsigned int fw_len;
7415         const __be32 *fw_data;
7416 };
7417
7418 /* tp->lock is held. */
7419 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7420                                  int cpu_scratch_size, struct fw_info *info)
7421 {
7422         int err, lock_err, i;
7423         void (*write_op)(struct tg3 *, u32, u32);
7424
7425         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7426                 netdev_err(tp->dev,
7427                            "%s: Trying to load TX cpu firmware which is 5705\n",
7428                            __func__);
7429                 return -EINVAL;
7430         }
7431
7432         if (tg3_flag(tp, 5705_PLUS))
7433                 write_op = tg3_write_mem;
7434         else
7435                 write_op = tg3_write_indirect_reg32;
7436
7437         /* It is possible that bootcode is still loading at this point.
7438          * Get the nvram lock first before halting the cpu.
7439          */
7440         lock_err = tg3_nvram_lock(tp);
7441         err = tg3_halt_cpu(tp, cpu_base);
7442         if (!lock_err)
7443                 tg3_nvram_unlock(tp);
7444         if (err)
7445                 goto out;
7446
7447         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7448                 write_op(tp, cpu_scratch_base + i, 0);
7449         tw32(cpu_base + CPU_STATE, 0xffffffff);
7450         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7451         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7452                 write_op(tp, (cpu_scratch_base +
7453                               (info->fw_base & 0xffff) +
7454                               (i * sizeof(u32))),
7455                               be32_to_cpu(info->fw_data[i]));
7456
7457         err = 0;
7458
7459 out:
7460         return err;
7461 }
7462
7463 /* tp->lock is held. */
7464 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7465 {
7466         struct fw_info info;
7467         const __be32 *fw_data;
7468         int err, i;
7469
7470         fw_data = (void *)tp->fw->data;
7471
7472         /* Firmware blob starts with version numbers, followed by
7473            start address and length. We are setting complete length.
7474            length = end_address_of_bss - start_address_of_text.
7475            Remainder is the blob to be loaded contiguously
7476            from start address. */
7477
7478         info.fw_base = be32_to_cpu(fw_data[1]);
7479         info.fw_len = tp->fw->size - 12;
7480         info.fw_data = &fw_data[3];
7481
7482         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7483                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7484                                     &info);
7485         if (err)
7486                 return err;
7487
7488         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7489                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7490                                     &info);
7491         if (err)
7492                 return err;
7493
7494         /* Now startup only the RX cpu. */
7495         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7496         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7497
7498         for (i = 0; i < 5; i++) {
7499                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7500                         break;
7501                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7502                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7503                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7504                 udelay(1000);
7505         }
7506         if (i >= 5) {
7507                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7508                            "should be %08x\n", __func__,
7509                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7510                 return -ENODEV;
7511         }
7512         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7513         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7514
7515         return 0;
7516 }
7517
7518 /* tp->lock is held. */
7519 static int tg3_load_tso_firmware(struct tg3 *tp)
7520 {
7521         struct fw_info info;
7522         const __be32 *fw_data;
7523         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7524         int err, i;
7525
7526         if (tg3_flag(tp, HW_TSO_1) ||
7527             tg3_flag(tp, HW_TSO_2) ||
7528             tg3_flag(tp, HW_TSO_3))
7529                 return 0;
7530
7531         fw_data = (void *)tp->fw->data;
7532
7533         /* Firmware blob starts with version numbers, followed by
7534            start address and length. We are setting complete length.
7535            length = end_address_of_bss - start_address_of_text.
7536            Remainder is the blob to be loaded contiguously
7537            from start address. */
7538
7539         info.fw_base = be32_to_cpu(fw_data[1]);
7540         cpu_scratch_size = tp->fw_len;
7541         info.fw_len = tp->fw->size - 12;
7542         info.fw_data = &fw_data[3];
7543
7544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7545                 cpu_base = RX_CPU_BASE;
7546                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7547         } else {
7548                 cpu_base = TX_CPU_BASE;
7549                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7550                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7551         }
7552
7553         err = tg3_load_firmware_cpu(tp, cpu_base,
7554                                     cpu_scratch_base, cpu_scratch_size,
7555                                     &info);
7556         if (err)
7557                 return err;
7558
7559         /* Now startup the cpu. */
7560         tw32(cpu_base + CPU_STATE, 0xffffffff);
7561         tw32_f(cpu_base + CPU_PC, info.fw_base);
7562
7563         for (i = 0; i < 5; i++) {
7564                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7565                         break;
7566                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7567                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7568                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7569                 udelay(1000);
7570         }
7571         if (i >= 5) {
7572                 netdev_err(tp->dev,
7573                            "%s fails to set CPU PC, is %08x should be %08x\n",
7574                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7575                 return -ENODEV;
7576         }
7577         tw32(cpu_base + CPU_STATE, 0xffffffff);
7578         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7579         return 0;
7580 }
7581
7582
7583 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7584 {
7585         struct tg3 *tp = netdev_priv(dev);
7586         struct sockaddr *addr = p;
7587         int err = 0, skip_mac_1 = 0;
7588
7589         if (!is_valid_ether_addr(addr->sa_data))
7590                 return -EINVAL;
7591
7592         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7593
7594         if (!netif_running(dev))
7595                 return 0;
7596
7597         if (tg3_flag(tp, ENABLE_ASF)) {
7598                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7599
7600                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7601                 addr0_low = tr32(MAC_ADDR_0_LOW);
7602                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7603                 addr1_low = tr32(MAC_ADDR_1_LOW);
7604
7605                 /* Skip MAC addr 1 if ASF is using it. */
7606                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7607                     !(addr1_high == 0 && addr1_low == 0))
7608                         skip_mac_1 = 1;
7609         }
7610         spin_lock_bh(&tp->lock);
7611         __tg3_set_mac_addr(tp, skip_mac_1);
7612         spin_unlock_bh(&tp->lock);
7613
7614         return err;
7615 }
7616
7617 /* tp->lock is held. */
7618 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7619                            dma_addr_t mapping, u32 maxlen_flags,
7620                            u32 nic_addr)
7621 {
7622         tg3_write_mem(tp,
7623                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7624                       ((u64) mapping >> 32));
7625         tg3_write_mem(tp,
7626                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7627                       ((u64) mapping & 0xffffffff));
7628         tg3_write_mem(tp,
7629                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7630                        maxlen_flags);
7631
7632         if (!tg3_flag(tp, 5705_PLUS))
7633                 tg3_write_mem(tp,
7634                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7635                               nic_addr);
7636 }
7637
7638 static void __tg3_set_rx_mode(struct net_device *);
7639 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7640 {
7641         int i;
7642
7643         if (!tg3_flag(tp, ENABLE_TSS)) {
7644                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7645                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7646                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7647         } else {
7648                 tw32(HOSTCC_TXCOL_TICKS, 0);
7649                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7650                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7651         }
7652
7653         if (!tg3_flag(tp, ENABLE_RSS)) {
7654                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7655                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7656                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7657         } else {
7658                 tw32(HOSTCC_RXCOL_TICKS, 0);
7659                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7660                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7661         }
7662
7663         if (!tg3_flag(tp, 5705_PLUS)) {
7664                 u32 val = ec->stats_block_coalesce_usecs;
7665
7666                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7667                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7668
7669                 if (!netif_carrier_ok(tp->dev))
7670                         val = 0;
7671
7672                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7673         }
7674
7675         for (i = 0; i < tp->irq_cnt - 1; i++) {
7676                 u32 reg;
7677
7678                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7679                 tw32(reg, ec->rx_coalesce_usecs);
7680                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7681                 tw32(reg, ec->rx_max_coalesced_frames);
7682                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7683                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7684
7685                 if (tg3_flag(tp, ENABLE_TSS)) {
7686                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7687                         tw32(reg, ec->tx_coalesce_usecs);
7688                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7689                         tw32(reg, ec->tx_max_coalesced_frames);
7690                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7691                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7692                 }
7693         }
7694
7695         for (; i < tp->irq_max - 1; i++) {
7696                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7697                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7698                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7699
7700                 if (tg3_flag(tp, ENABLE_TSS)) {
7701                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7702                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7703                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7704                 }
7705         }
7706 }
7707
7708 /* tp->lock is held. */
7709 static void tg3_rings_reset(struct tg3 *tp)
7710 {
7711         int i;
7712         u32 stblk, txrcb, rxrcb, limit;
7713         struct tg3_napi *tnapi = &tp->napi[0];
7714
7715         /* Disable all transmit rings but the first. */
7716         if (!tg3_flag(tp, 5705_PLUS))
7717                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7718         else if (tg3_flag(tp, 5717_PLUS))
7719                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7720         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7721                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7722         else
7723                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7724
7725         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7726              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7727                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7728                               BDINFO_FLAGS_DISABLED);
7729
7730
7731         /* Disable all receive return rings but the first. */
7732         if (tg3_flag(tp, 5717_PLUS))
7733                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7734         else if (!tg3_flag(tp, 5705_PLUS))
7735                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7736         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7737                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7738                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7739         else
7740                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7741
7742         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7743              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7744                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7745                               BDINFO_FLAGS_DISABLED);
7746
7747         /* Disable interrupts */
7748         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7749
7750         /* Zero mailbox registers. */
7751         if (tg3_flag(tp, SUPPORT_MSIX)) {
7752                 for (i = 1; i < tp->irq_max; i++) {
7753                         tp->napi[i].tx_prod = 0;
7754                         tp->napi[i].tx_cons = 0;
7755                         if (tg3_flag(tp, ENABLE_TSS))
7756                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7757                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7758                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7759                 }
7760                 if (!tg3_flag(tp, ENABLE_TSS))
7761                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7762         } else {
7763                 tp->napi[0].tx_prod = 0;
7764                 tp->napi[0].tx_cons = 0;
7765                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7766                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7767         }
7768
7769         /* Make sure the NIC-based send BD rings are disabled. */
7770         if (!tg3_flag(tp, 5705_PLUS)) {
7771                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7772                 for (i = 0; i < 16; i++)
7773                         tw32_tx_mbox(mbox + i * 8, 0);
7774         }
7775
7776         txrcb = NIC_SRAM_SEND_RCB;
7777         rxrcb = NIC_SRAM_RCV_RET_RCB;
7778
7779         /* Clear status block in ram. */
7780         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7781
7782         /* Set status block DMA address */
7783         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7784              ((u64) tnapi->status_mapping >> 32));
7785         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7786              ((u64) tnapi->status_mapping & 0xffffffff));
7787
7788         if (tnapi->tx_ring) {
7789                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7790                                (TG3_TX_RING_SIZE <<
7791                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7792                                NIC_SRAM_TX_BUFFER_DESC);
7793                 txrcb += TG3_BDINFO_SIZE;
7794         }
7795
7796         if (tnapi->rx_rcb) {
7797                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7798                                (tp->rx_ret_ring_mask + 1) <<
7799                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7800                 rxrcb += TG3_BDINFO_SIZE;
7801         }
7802
7803         stblk = HOSTCC_STATBLCK_RING1;
7804
7805         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7806                 u64 mapping = (u64)tnapi->status_mapping;
7807                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7808                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7809
7810                 /* Clear status block in ram. */
7811                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7812
7813                 if (tnapi->tx_ring) {
7814                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7815                                        (TG3_TX_RING_SIZE <<
7816                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7817                                        NIC_SRAM_TX_BUFFER_DESC);
7818                         txrcb += TG3_BDINFO_SIZE;
7819                 }
7820
7821                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7822                                ((tp->rx_ret_ring_mask + 1) <<
7823                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7824
7825                 stblk += 8;
7826                 rxrcb += TG3_BDINFO_SIZE;
7827         }
7828 }
7829
7830 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7831 {
7832         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7833
7834         if (!tg3_flag(tp, 5750_PLUS) ||
7835             tg3_flag(tp, 5780_CLASS) ||
7836             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7837             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7838                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7839         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7840                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7841                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7842         else
7843                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7844
7845         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7846         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7847
7848         val = min(nic_rep_thresh, host_rep_thresh);
7849         tw32(RCVBDI_STD_THRESH, val);
7850
7851         if (tg3_flag(tp, 57765_PLUS))
7852                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7853
7854         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7855                 return;
7856
7857         if (!tg3_flag(tp, 5705_PLUS))
7858                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7859         else
7860                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7861
7862         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7863
7864         val = min(bdcache_maxcnt / 2, host_rep_thresh);
7865         tw32(RCVBDI_JUMBO_THRESH, val);
7866
7867         if (tg3_flag(tp, 57765_PLUS))
7868                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7869 }
7870
7871 /* tp->lock is held. */
7872 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7873 {
7874         u32 val, rdmac_mode;
7875         int i, err, limit;
7876         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7877
7878         tg3_disable_ints(tp);
7879
7880         tg3_stop_fw(tp);
7881
7882         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7883
7884         if (tg3_flag(tp, INIT_COMPLETE))
7885                 tg3_abort_hw(tp, 1);
7886
7887         /* Enable MAC control of LPI */
7888         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7889                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7890                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7891                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
7892
7893                 tw32_f(TG3_CPMU_EEE_CTRL,
7894                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7895
7896                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7897                       TG3_CPMU_EEEMD_LPI_IN_TX |
7898                       TG3_CPMU_EEEMD_LPI_IN_RX |
7899                       TG3_CPMU_EEEMD_EEE_ENABLE;
7900
7901                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7902                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7903
7904                 if (tg3_flag(tp, ENABLE_APE))
7905                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7906
7907                 tw32_f(TG3_CPMU_EEE_MODE, val);
7908
7909                 tw32_f(TG3_CPMU_EEE_DBTMR1,
7910                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7911                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7912
7913                 tw32_f(TG3_CPMU_EEE_DBTMR2,
7914                        TG3_CPMU_DBTMR2_APE_TX_2047US |
7915                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7916         }
7917
7918         if (reset_phy)
7919                 tg3_phy_reset(tp);
7920
7921         err = tg3_chip_reset(tp);
7922         if (err)
7923                 return err;
7924
7925         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7926
7927         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7928                 val = tr32(TG3_CPMU_CTRL);
7929                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7930                 tw32(TG3_CPMU_CTRL, val);
7931
7932                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7933                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7934                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7935                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7936
7937                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7938                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7939                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7940                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7941
7942                 val = tr32(TG3_CPMU_HST_ACC);
7943                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7944                 val |= CPMU_HST_ACC_MACCLK_6_25;
7945                 tw32(TG3_CPMU_HST_ACC, val);
7946         }
7947
7948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7949                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7950                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7951                        PCIE_PWR_MGMT_L1_THRESH_4MS;
7952                 tw32(PCIE_PWR_MGMT_THRESH, val);
7953
7954                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7955                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7956
7957                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7958
7959                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7960                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7961         }
7962
7963         if (tg3_flag(tp, L1PLLPD_EN)) {
7964                 u32 grc_mode = tr32(GRC_MODE);
7965
7966                 /* Access the lower 1K of PL PCIE block registers. */
7967                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7968                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7969
7970                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7971                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7972                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7973
7974                 tw32(GRC_MODE, grc_mode);
7975         }
7976
7977         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7978                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7979                         u32 grc_mode = tr32(GRC_MODE);
7980
7981                         /* Access the lower 1K of PL PCIE block registers. */
7982                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7983                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7984
7985                         val = tr32(TG3_PCIE_TLDLPL_PORT +
7986                                    TG3_PCIE_PL_LO_PHYCTL5);
7987                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7988                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7989
7990                         tw32(GRC_MODE, grc_mode);
7991                 }
7992
7993                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
7994                         u32 grc_mode = tr32(GRC_MODE);
7995
7996                         /* Access the lower 1K of DL PCIE block registers. */
7997                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7998                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
7999
8000                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8001                                    TG3_PCIE_DL_LO_FTSMAX);
8002                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8003                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8004                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8005
8006                         tw32(GRC_MODE, grc_mode);
8007                 }
8008
8009                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8010                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8011                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8012                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8013         }
8014
8015         /* This works around an issue with Athlon chipsets on
8016          * B3 tigon3 silicon.  This bit has no effect on any
8017          * other revision.  But do not set this on PCI Express
8018          * chips and don't even touch the clocks if the CPMU is present.
8019          */
8020         if (!tg3_flag(tp, CPMU_PRESENT)) {
8021                 if (!tg3_flag(tp, PCI_EXPRESS))
8022                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8023                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8024         }
8025
8026         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8027             tg3_flag(tp, PCIX_MODE)) {
8028                 val = tr32(TG3PCI_PCISTATE);
8029                 val |= PCISTATE_RETRY_SAME_DMA;
8030                 tw32(TG3PCI_PCISTATE, val);
8031         }
8032
8033         if (tg3_flag(tp, ENABLE_APE)) {
8034                 /* Allow reads and writes to the
8035                  * APE register and memory space.
8036                  */
8037                 val = tr32(TG3PCI_PCISTATE);
8038                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8039                        PCISTATE_ALLOW_APE_SHMEM_WR |
8040                        PCISTATE_ALLOW_APE_PSPACE_WR;
8041                 tw32(TG3PCI_PCISTATE, val);
8042         }
8043
8044         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8045                 /* Enable some hw fixes.  */
8046                 val = tr32(TG3PCI_MSI_DATA);
8047                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8048                 tw32(TG3PCI_MSI_DATA, val);
8049         }
8050
8051         /* Descriptor ring init may make accesses to the
8052          * NIC SRAM area to setup the TX descriptors, so we
8053          * can only do this after the hardware has been
8054          * successfully reset.
8055          */
8056         err = tg3_init_rings(tp);
8057         if (err)
8058                 return err;
8059
8060         if (tg3_flag(tp, 57765_PLUS)) {
8061                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8062                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8063                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8064                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8065                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8066                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8067                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8068                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8069         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8070                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8071                 /* This value is determined during the probe time DMA
8072                  * engine test, tg3_test_dma.
8073                  */
8074                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8075         }
8076
8077         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8078                           GRC_MODE_4X_NIC_SEND_RINGS |
8079                           GRC_MODE_NO_TX_PHDR_CSUM |
8080                           GRC_MODE_NO_RX_PHDR_CSUM);
8081         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8082
8083         /* Pseudo-header checksum is done by hardware logic and not
8084          * the offload processers, so make the chip do the pseudo-
8085          * header checksums on receive.  For transmit it is more
8086          * convenient to do the pseudo-header checksum in software
8087          * as Linux does that on transmit for us in all cases.
8088          */
8089         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8090
8091         tw32(GRC_MODE,
8092              tp->grc_mode |
8093              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8094
8095         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8096         val = tr32(GRC_MISC_CFG);
8097         val &= ~0xff;
8098         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8099         tw32(GRC_MISC_CFG, val);
8100
8101         /* Initialize MBUF/DESC pool. */
8102         if (tg3_flag(tp, 5750_PLUS)) {
8103                 /* Do nothing.  */
8104         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8105                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8106                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8107                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8108                 else
8109                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8110                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8111                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8112         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8113                 int fw_len;
8114
8115                 fw_len = tp->fw_len;
8116                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8117                 tw32(BUFMGR_MB_POOL_ADDR,
8118                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8119                 tw32(BUFMGR_MB_POOL_SIZE,
8120                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8121         }
8122
8123         if (tp->dev->mtu <= ETH_DATA_LEN) {
8124                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8125                      tp->bufmgr_config.mbuf_read_dma_low_water);
8126                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8127                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8128                 tw32(BUFMGR_MB_HIGH_WATER,
8129                      tp->bufmgr_config.mbuf_high_water);
8130         } else {
8131                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8132                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8133                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8134                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8135                 tw32(BUFMGR_MB_HIGH_WATER,
8136                      tp->bufmgr_config.mbuf_high_water_jumbo);
8137         }
8138         tw32(BUFMGR_DMA_LOW_WATER,
8139              tp->bufmgr_config.dma_low_water);
8140         tw32(BUFMGR_DMA_HIGH_WATER,
8141              tp->bufmgr_config.dma_high_water);
8142
8143         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8144         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8145                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8147             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8148             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8149                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8150         tw32(BUFMGR_MODE, val);
8151         for (i = 0; i < 2000; i++) {
8152                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8153                         break;
8154                 udelay(10);
8155         }
8156         if (i >= 2000) {
8157                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8158                 return -ENODEV;
8159         }
8160
8161         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8162                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8163
8164         tg3_setup_rxbd_thresholds(tp);
8165
8166         /* Initialize TG3_BDINFO's at:
8167          *  RCVDBDI_STD_BD:     standard eth size rx ring
8168          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8169          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8170          *
8171          * like so:
8172          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8173          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8174          *                              ring attribute flags
8175          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8176          *
8177          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8178          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8179          *
8180          * The size of each ring is fixed in the firmware, but the location is
8181          * configurable.
8182          */
8183         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8184              ((u64) tpr->rx_std_mapping >> 32));
8185         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8186              ((u64) tpr->rx_std_mapping & 0xffffffff));
8187         if (!tg3_flag(tp, 5717_PLUS))
8188                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8189                      NIC_SRAM_RX_BUFFER_DESC);
8190
8191         /* Disable the mini ring */
8192         if (!tg3_flag(tp, 5705_PLUS))
8193                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8194                      BDINFO_FLAGS_DISABLED);
8195
8196         /* Program the jumbo buffer descriptor ring control
8197          * blocks on those devices that have them.
8198          */
8199         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8200             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8201
8202                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8203                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8204                              ((u64) tpr->rx_jmb_mapping >> 32));
8205                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8206                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8207                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8208                               BDINFO_FLAGS_MAXLEN_SHIFT;
8209                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8210                              val | BDINFO_FLAGS_USE_EXT_RECV);
8211                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8212                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8213                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8214                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8215                 } else {
8216                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8217                              BDINFO_FLAGS_DISABLED);
8218                 }
8219
8220                 if (tg3_flag(tp, 57765_PLUS)) {
8221                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8222                                 val = TG3_RX_STD_MAX_SIZE_5700;
8223                         else
8224                                 val = TG3_RX_STD_MAX_SIZE_5717;
8225                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8226                         val |= (TG3_RX_STD_DMA_SZ << 2);
8227                 } else
8228                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8229         } else
8230                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8231
8232         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8233
8234         tpr->rx_std_prod_idx = tp->rx_pending;
8235         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8236
8237         tpr->rx_jmb_prod_idx =
8238                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8239         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8240
8241         tg3_rings_reset(tp);
8242
8243         /* Initialize MAC address and backoff seed. */
8244         __tg3_set_mac_addr(tp, 0);
8245
8246         /* MTU + ethernet header + FCS + optional VLAN tag */
8247         tw32(MAC_RX_MTU_SIZE,
8248              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8249
8250         /* The slot time is changed by tg3_setup_phy if we
8251          * run at gigabit with half duplex.
8252          */
8253         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8254               (6 << TX_LENGTHS_IPG_SHIFT) |
8255               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8256
8257         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8258                 val |= tr32(MAC_TX_LENGTHS) &
8259                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8260                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8261
8262         tw32(MAC_TX_LENGTHS, val);
8263
8264         /* Receive rules. */
8265         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8266         tw32(RCVLPC_CONFIG, 0x0181);
8267
8268         /* Calculate RDMAC_MODE setting early, we need it to determine
8269          * the RCVLPC_STATE_ENABLE mask.
8270          */
8271         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8272                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8273                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8274                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8275                       RDMAC_MODE_LNGREAD_ENAB);
8276
8277         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8278                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8279
8280         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8281             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8282             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8283                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8284                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8285                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8286
8287         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8288             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8289                 if (tg3_flag(tp, TSO_CAPABLE) &&
8290                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8291                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8292                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8293                            !tg3_flag(tp, IS_5788)) {
8294                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8295                 }
8296         }
8297
8298         if (tg3_flag(tp, PCI_EXPRESS))
8299                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8300
8301         if (tg3_flag(tp, HW_TSO_1) ||
8302             tg3_flag(tp, HW_TSO_2) ||
8303             tg3_flag(tp, HW_TSO_3))
8304                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8305
8306         if (tg3_flag(tp, HW_TSO_3) ||
8307             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8308             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8309                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8310
8311         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8312                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8313
8314         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8315             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8316             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8317             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8318             tg3_flag(tp, 57765_PLUS)) {
8319                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8320                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8321                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8322                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8323                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8324                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8325                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8326                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8327                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8328                 }
8329                 tw32(TG3_RDMA_RSRVCTRL_REG,
8330                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8331         }
8332
8333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8334             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8335                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8336                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8337                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8338                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8339         }
8340
8341         /* Receive/send statistics. */
8342         if (tg3_flag(tp, 5750_PLUS)) {
8343                 val = tr32(RCVLPC_STATS_ENABLE);
8344                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8345                 tw32(RCVLPC_STATS_ENABLE, val);
8346         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8347                    tg3_flag(tp, TSO_CAPABLE)) {
8348                 val = tr32(RCVLPC_STATS_ENABLE);
8349                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8350                 tw32(RCVLPC_STATS_ENABLE, val);
8351         } else {
8352                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8353         }
8354         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8355         tw32(SNDDATAI_STATSENAB, 0xffffff);
8356         tw32(SNDDATAI_STATSCTRL,
8357              (SNDDATAI_SCTRL_ENABLE |
8358               SNDDATAI_SCTRL_FASTUPD));
8359
8360         /* Setup host coalescing engine. */
8361         tw32(HOSTCC_MODE, 0);
8362         for (i = 0; i < 2000; i++) {
8363                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8364                         break;
8365                 udelay(10);
8366         }
8367
8368         __tg3_set_coalesce(tp, &tp->coal);
8369
8370         if (!tg3_flag(tp, 5705_PLUS)) {
8371                 /* Status/statistics block address.  See tg3_timer,
8372                  * the tg3_periodic_fetch_stats call there, and
8373                  * tg3_get_stats to see how this works for 5705/5750 chips.
8374                  */
8375                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8376                      ((u64) tp->stats_mapping >> 32));
8377                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8378                      ((u64) tp->stats_mapping & 0xffffffff));
8379                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8380
8381                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8382
8383                 /* Clear statistics and status block memory areas */
8384                 for (i = NIC_SRAM_STATS_BLK;
8385                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8386                      i += sizeof(u32)) {
8387                         tg3_write_mem(tp, i, 0);
8388                         udelay(40);
8389                 }
8390         }
8391
8392         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8393
8394         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8395         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8396         if (!tg3_flag(tp, 5705_PLUS))
8397                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8398
8399         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8400                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8401                 /* reset to prevent losing 1st rx packet intermittently */
8402                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8403                 udelay(10);
8404         }
8405
8406         if (tg3_flag(tp, ENABLE_APE))
8407                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8408         else
8409                 tp->mac_mode = 0;
8410         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8411                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8412         if (!tg3_flag(tp, 5705_PLUS) &&
8413             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8414             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8415                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8416         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8417         udelay(40);
8418
8419         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8420          * If TG3_FLAG_IS_NIC is zero, we should read the
8421          * register to preserve the GPIO settings for LOMs. The GPIOs,
8422          * whether used as inputs or outputs, are set by boot code after
8423          * reset.
8424          */
8425         if (!tg3_flag(tp, IS_NIC)) {
8426                 u32 gpio_mask;
8427
8428                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8429                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8430                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8431
8432                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8433                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8434                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8435
8436                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8437                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8438
8439                 tp->grc_local_ctrl &= ~gpio_mask;
8440                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8441
8442                 /* GPIO1 must be driven high for eeprom write protect */
8443                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8444                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8445                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8446         }
8447         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8448         udelay(100);
8449
8450         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8451                 val = tr32(MSGINT_MODE);
8452                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8453                 tw32(MSGINT_MODE, val);
8454         }
8455
8456         if (!tg3_flag(tp, 5705_PLUS)) {
8457                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8458                 udelay(40);
8459         }
8460
8461         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8462                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8463                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8464                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8465                WDMAC_MODE_LNGREAD_ENAB);
8466
8467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8468             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8469                 if (tg3_flag(tp, TSO_CAPABLE) &&
8470                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8471                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8472                         /* nothing */
8473                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8474                            !tg3_flag(tp, IS_5788)) {
8475                         val |= WDMAC_MODE_RX_ACCEL;
8476                 }
8477         }
8478
8479         /* Enable host coalescing bug fix */
8480         if (tg3_flag(tp, 5755_PLUS))
8481                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8482
8483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8484                 val |= WDMAC_MODE_BURST_ALL_DATA;
8485
8486         tw32_f(WDMAC_MODE, val);
8487         udelay(40);
8488
8489         if (tg3_flag(tp, PCIX_MODE)) {
8490                 u16 pcix_cmd;
8491
8492                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8493                                      &pcix_cmd);
8494                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8495                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8496                         pcix_cmd |= PCI_X_CMD_READ_2K;
8497                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8498                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8499                         pcix_cmd |= PCI_X_CMD_READ_2K;
8500                 }
8501                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8502                                       pcix_cmd);
8503         }
8504
8505         tw32_f(RDMAC_MODE, rdmac_mode);
8506         udelay(40);
8507
8508         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8509         if (!tg3_flag(tp, 5705_PLUS))
8510                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8511
8512         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8513                 tw32(SNDDATAC_MODE,
8514                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8515         else
8516                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8517
8518         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8519         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8520         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8521         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8522                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8523         tw32(RCVDBDI_MODE, val);
8524         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8525         if (tg3_flag(tp, HW_TSO_1) ||
8526             tg3_flag(tp, HW_TSO_2) ||
8527             tg3_flag(tp, HW_TSO_3))
8528                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8529         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8530         if (tg3_flag(tp, ENABLE_TSS))
8531                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8532         tw32(SNDBDI_MODE, val);
8533         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8534
8535         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8536                 err = tg3_load_5701_a0_firmware_fix(tp);
8537                 if (err)
8538                         return err;
8539         }
8540
8541         if (tg3_flag(tp, TSO_CAPABLE)) {
8542                 err = tg3_load_tso_firmware(tp);
8543                 if (err)
8544                         return err;
8545         }
8546
8547         tp->tx_mode = TX_MODE_ENABLE;
8548
8549         if (tg3_flag(tp, 5755_PLUS) ||
8550             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8551                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8552
8553         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8554                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8555                 tp->tx_mode &= ~val;
8556                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8557         }
8558
8559         tw32_f(MAC_TX_MODE, tp->tx_mode);
8560         udelay(100);
8561
8562         if (tg3_flag(tp, ENABLE_RSS)) {
8563                 u32 reg = MAC_RSS_INDIR_TBL_0;
8564                 u8 *ent = (u8 *)&val;
8565
8566                 /* Setup the indirection table */
8567                 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8568                         int idx = i % sizeof(val);
8569
8570                         ent[idx] = i % (tp->irq_cnt - 1);
8571                         if (idx == sizeof(val) - 1) {
8572                                 tw32(reg, val);
8573                                 reg += 4;
8574                         }
8575                 }
8576
8577                 /* Setup the "secret" hash key. */
8578                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8579                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8580                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8581                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8582                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8583                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8584                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8585                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8586                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8587                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8588         }
8589
8590         tp->rx_mode = RX_MODE_ENABLE;
8591         if (tg3_flag(tp, 5755_PLUS))
8592                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8593
8594         if (tg3_flag(tp, ENABLE_RSS))
8595                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8596                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8597                                RX_MODE_RSS_IPV6_HASH_EN |
8598                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8599                                RX_MODE_RSS_IPV4_HASH_EN |
8600                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8601
8602         tw32_f(MAC_RX_MODE, tp->rx_mode);
8603         udelay(10);
8604
8605         tw32(MAC_LED_CTRL, tp->led_ctrl);
8606
8607         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8608         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8609                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8610                 udelay(10);
8611         }
8612         tw32_f(MAC_RX_MODE, tp->rx_mode);
8613         udelay(10);
8614
8615         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8616                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8617                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8618                         /* Set drive transmission level to 1.2V  */
8619                         /* only if the signal pre-emphasis bit is not set  */
8620                         val = tr32(MAC_SERDES_CFG);
8621                         val &= 0xfffff000;
8622                         val |= 0x880;
8623                         tw32(MAC_SERDES_CFG, val);
8624                 }
8625                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8626                         tw32(MAC_SERDES_CFG, 0x616000);
8627         }
8628
8629         /* Prevent chip from dropping frames when flow control
8630          * is enabled.
8631          */
8632         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8633                 val = 1;
8634         else
8635                 val = 2;
8636         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8637
8638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8639             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8640                 /* Use hardware link auto-negotiation */
8641                 tg3_flag_set(tp, HW_AUTONEG);
8642         }
8643
8644         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8645             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8646                 u32 tmp;
8647
8648                 tmp = tr32(SERDES_RX_CTRL);
8649                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8650                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8651                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8652                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8653         }
8654
8655         if (!tg3_flag(tp, USE_PHYLIB)) {
8656                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8657                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8658                         tp->link_config.speed = tp->link_config.orig_speed;
8659                         tp->link_config.duplex = tp->link_config.orig_duplex;
8660                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8661                 }
8662
8663                 err = tg3_setup_phy(tp, 0);
8664                 if (err)
8665                         return err;
8666
8667                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8668                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8669                         u32 tmp;
8670
8671                         /* Clear CRC stats. */
8672                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8673                                 tg3_writephy(tp, MII_TG3_TEST1,
8674                                              tmp | MII_TG3_TEST1_CRC_EN);
8675                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8676                         }
8677                 }
8678         }
8679
8680         __tg3_set_rx_mode(tp->dev);
8681
8682         /* Initialize receive rules. */
8683         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8684         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8685         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8686         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8687
8688         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8689                 limit = 8;
8690         else
8691                 limit = 16;
8692         if (tg3_flag(tp, ENABLE_ASF))
8693                 limit -= 4;
8694         switch (limit) {
8695         case 16:
8696                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8697         case 15:
8698                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8699         case 14:
8700                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8701         case 13:
8702                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8703         case 12:
8704                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8705         case 11:
8706                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8707         case 10:
8708                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8709         case 9:
8710                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8711         case 8:
8712                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8713         case 7:
8714                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8715         case 6:
8716                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8717         case 5:
8718                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8719         case 4:
8720                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8721         case 3:
8722                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8723         case 2:
8724         case 1:
8725
8726         default:
8727                 break;
8728         }
8729
8730         if (tg3_flag(tp, ENABLE_APE))
8731                 /* Write our heartbeat update interval to APE. */
8732                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8733                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8734
8735         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8736
8737         return 0;
8738 }
8739
8740 /* Called at device open time to get the chip ready for
8741  * packet processing.  Invoked with tp->lock held.
8742  */
8743 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8744 {
8745         tg3_switch_clocks(tp);
8746
8747         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8748
8749         return tg3_reset_hw(tp, reset_phy);
8750 }
8751
8752 #define TG3_STAT_ADD32(PSTAT, REG) \
8753 do {    u32 __val = tr32(REG); \
8754         (PSTAT)->low += __val; \
8755         if ((PSTAT)->low < __val) \
8756                 (PSTAT)->high += 1; \
8757 } while (0)
8758
8759 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8760 {
8761         struct tg3_hw_stats *sp = tp->hw_stats;
8762
8763         if (!netif_carrier_ok(tp->dev))
8764                 return;
8765
8766         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8767         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8768         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8769         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8770         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8771         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8772         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8773         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8774         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8775         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8776         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8777         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8778         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8779
8780         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8781         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8782         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8783         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8784         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8785         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8786         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8787         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8788         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8789         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8790         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8791         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8792         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8793         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8794
8795         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8796         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
8797                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8798         } else {
8799                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8800                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8801                 if (val) {
8802                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8803                         sp->rx_discards.low += val;
8804                         if (sp->rx_discards.low < val)
8805                                 sp->rx_discards.high += 1;
8806                 }
8807                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8808         }
8809         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8810 }
8811
8812 static void tg3_timer(unsigned long __opaque)
8813 {
8814         struct tg3 *tp = (struct tg3 *) __opaque;
8815
8816         if (tp->irq_sync)
8817                 goto restart_timer;
8818
8819         spin_lock(&tp->lock);
8820
8821         if (!tg3_flag(tp, TAGGED_STATUS)) {
8822                 /* All of this garbage is because when using non-tagged
8823                  * IRQ status the mailbox/status_block protocol the chip
8824                  * uses with the cpu is race prone.
8825                  */
8826                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8827                         tw32(GRC_LOCAL_CTRL,
8828                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8829                 } else {
8830                         tw32(HOSTCC_MODE, tp->coalesce_mode |
8831                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8832                 }
8833
8834                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8835                         tg3_flag_set(tp, RESTART_TIMER);
8836                         spin_unlock(&tp->lock);
8837                         schedule_work(&tp->reset_task);
8838                         return;
8839                 }
8840         }
8841
8842         /* This part only runs once per second. */
8843         if (!--tp->timer_counter) {
8844                 if (tg3_flag(tp, 5705_PLUS))
8845                         tg3_periodic_fetch_stats(tp);
8846
8847                 if (tp->setlpicnt && !--tp->setlpicnt) {
8848                         u32 val = tr32(TG3_CPMU_EEE_MODE);
8849                         tw32(TG3_CPMU_EEE_MODE,
8850                              val | TG3_CPMU_EEEMD_LPI_ENABLE);
8851                 }
8852
8853                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8854                         u32 mac_stat;
8855                         int phy_event;
8856
8857                         mac_stat = tr32(MAC_STATUS);
8858
8859                         phy_event = 0;
8860                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8861                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8862                                         phy_event = 1;
8863                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8864                                 phy_event = 1;
8865
8866                         if (phy_event)
8867                                 tg3_setup_phy(tp, 0);
8868                 } else if (tg3_flag(tp, POLL_SERDES)) {
8869                         u32 mac_stat = tr32(MAC_STATUS);
8870                         int need_setup = 0;
8871
8872                         if (netif_carrier_ok(tp->dev) &&
8873                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8874                                 need_setup = 1;
8875                         }
8876                         if (!netif_carrier_ok(tp->dev) &&
8877                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
8878                                          MAC_STATUS_SIGNAL_DET))) {
8879                                 need_setup = 1;
8880                         }
8881                         if (need_setup) {
8882                                 if (!tp->serdes_counter) {
8883                                         tw32_f(MAC_MODE,
8884                                              (tp->mac_mode &
8885                                               ~MAC_MODE_PORT_MODE_MASK));
8886                                         udelay(40);
8887                                         tw32_f(MAC_MODE, tp->mac_mode);
8888                                         udelay(40);
8889                                 }
8890                                 tg3_setup_phy(tp, 0);
8891                         }
8892                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8893                            tg3_flag(tp, 5780_CLASS)) {
8894                         tg3_serdes_parallel_detect(tp);
8895                 }
8896
8897                 tp->timer_counter = tp->timer_multiplier;
8898         }
8899
8900         /* Heartbeat is only sent once every 2 seconds.
8901          *
8902          * The heartbeat is to tell the ASF firmware that the host
8903          * driver is still alive.  In the event that the OS crashes,
8904          * ASF needs to reset the hardware to free up the FIFO space
8905          * that may be filled with rx packets destined for the host.
8906          * If the FIFO is full, ASF will no longer function properly.
8907          *
8908          * Unintended resets have been reported on real time kernels
8909          * where the timer doesn't run on time.  Netpoll will also have
8910          * same problem.
8911          *
8912          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8913          * to check the ring condition when the heartbeat is expiring
8914          * before doing the reset.  This will prevent most unintended
8915          * resets.
8916          */
8917         if (!--tp->asf_counter) {
8918                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8919                         tg3_wait_for_event_ack(tp);
8920
8921                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8922                                       FWCMD_NICDRV_ALIVE3);
8923                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8924                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8925                                       TG3_FW_UPDATE_TIMEOUT_SEC);
8926
8927                         tg3_generate_fw_event(tp);
8928                 }
8929                 tp->asf_counter = tp->asf_multiplier;
8930         }
8931
8932         spin_unlock(&tp->lock);
8933
8934 restart_timer:
8935         tp->timer.expires = jiffies + tp->timer_offset;
8936         add_timer(&tp->timer);
8937 }
8938
8939 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8940 {
8941         irq_handler_t fn;
8942         unsigned long flags;
8943         char *name;
8944         struct tg3_napi *tnapi = &tp->napi[irq_num];
8945
8946         if (tp->irq_cnt == 1)
8947                 name = tp->dev->name;
8948         else {
8949                 name = &tnapi->irq_lbl[0];
8950                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8951                 name[IFNAMSIZ-1] = 0;
8952         }
8953
8954         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8955                 fn = tg3_msi;
8956                 if (tg3_flag(tp, 1SHOT_MSI))
8957                         fn = tg3_msi_1shot;
8958                 flags = 0;
8959         } else {
8960                 fn = tg3_interrupt;
8961                 if (tg3_flag(tp, TAGGED_STATUS))
8962                         fn = tg3_interrupt_tagged;
8963                 flags = IRQF_SHARED;
8964         }
8965
8966         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8967 }
8968
8969 static int tg3_test_interrupt(struct tg3 *tp)
8970 {
8971         struct tg3_napi *tnapi = &tp->napi[0];
8972         struct net_device *dev = tp->dev;
8973         int err, i, intr_ok = 0;
8974         u32 val;
8975
8976         if (!netif_running(dev))
8977                 return -ENODEV;
8978
8979         tg3_disable_ints(tp);
8980
8981         free_irq(tnapi->irq_vec, tnapi);
8982
8983         /*
8984          * Turn off MSI one shot mode.  Otherwise this test has no
8985          * observable way to know whether the interrupt was delivered.
8986          */
8987         if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
8988                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8989                 tw32(MSGINT_MODE, val);
8990         }
8991
8992         err = request_irq(tnapi->irq_vec, tg3_test_isr,
8993                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8994         if (err)
8995                 return err;
8996
8997         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8998         tg3_enable_ints(tp);
8999
9000         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9001                tnapi->coal_now);
9002
9003         for (i = 0; i < 5; i++) {
9004                 u32 int_mbox, misc_host_ctrl;
9005
9006                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9007                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9008
9009                 if ((int_mbox != 0) ||
9010                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9011                         intr_ok = 1;
9012                         break;
9013                 }
9014
9015                 msleep(10);
9016         }
9017
9018         tg3_disable_ints(tp);
9019
9020         free_irq(tnapi->irq_vec, tnapi);
9021
9022         err = tg3_request_irq(tp, 0);
9023
9024         if (err)
9025                 return err;
9026
9027         if (intr_ok) {
9028                 /* Reenable MSI one shot mode. */
9029                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9030                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9031                         tw32(MSGINT_MODE, val);
9032                 }
9033                 return 0;
9034         }
9035
9036         return -EIO;
9037 }
9038
9039 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9040  * successfully restored
9041  */
9042 static int tg3_test_msi(struct tg3 *tp)
9043 {
9044         int err;
9045         u16 pci_cmd;
9046
9047         if (!tg3_flag(tp, USING_MSI))
9048                 return 0;
9049
9050         /* Turn off SERR reporting in case MSI terminates with Master
9051          * Abort.
9052          */
9053         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9054         pci_write_config_word(tp->pdev, PCI_COMMAND,
9055                               pci_cmd & ~PCI_COMMAND_SERR);
9056
9057         err = tg3_test_interrupt(tp);
9058
9059         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9060
9061         if (!err)
9062                 return 0;
9063
9064         /* other failures */
9065         if (err != -EIO)
9066                 return err;
9067
9068         /* MSI test failed, go back to INTx mode */
9069         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9070                     "to INTx mode. Please report this failure to the PCI "
9071                     "maintainer and include system chipset information\n");
9072
9073         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9074
9075         pci_disable_msi(tp->pdev);
9076
9077         tg3_flag_clear(tp, USING_MSI);
9078         tp->napi[0].irq_vec = tp->pdev->irq;
9079
9080         err = tg3_request_irq(tp, 0);
9081         if (err)
9082                 return err;
9083
9084         /* Need to reset the chip because the MSI cycle may have terminated
9085          * with Master Abort.
9086          */
9087         tg3_full_lock(tp, 1);
9088
9089         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9090         err = tg3_init_hw(tp, 1);
9091
9092         tg3_full_unlock(tp);
9093
9094         if (err)
9095                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9096
9097         return err;
9098 }
9099
9100 static int tg3_request_firmware(struct tg3 *tp)
9101 {
9102         const __be32 *fw_data;
9103
9104         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9105                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9106                            tp->fw_needed);
9107                 return -ENOENT;
9108         }
9109
9110         fw_data = (void *)tp->fw->data;
9111
9112         /* Firmware blob starts with version numbers, followed by
9113          * start address and _full_ length including BSS sections
9114          * (which must be longer than the actual data, of course
9115          */
9116
9117         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9118         if (tp->fw_len < (tp->fw->size - 12)) {
9119                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9120                            tp->fw_len, tp->fw_needed);
9121                 release_firmware(tp->fw);
9122                 tp->fw = NULL;
9123                 return -EINVAL;
9124         }
9125
9126         /* We no longer need firmware; we have it. */
9127         tp->fw_needed = NULL;
9128         return 0;
9129 }
9130
9131 static bool tg3_enable_msix(struct tg3 *tp)
9132 {
9133         int i, rc, cpus = num_online_cpus();
9134         struct msix_entry msix_ent[tp->irq_max];
9135
9136         if (cpus == 1)
9137                 /* Just fallback to the simpler MSI mode. */
9138                 return false;
9139
9140         /*
9141          * We want as many rx rings enabled as there are cpus.
9142          * The first MSIX vector only deals with link interrupts, etc,
9143          * so we add one to the number of vectors we are requesting.
9144          */
9145         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9146
9147         for (i = 0; i < tp->irq_max; i++) {
9148                 msix_ent[i].entry  = i;
9149                 msix_ent[i].vector = 0;
9150         }
9151
9152         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9153         if (rc < 0) {
9154                 return false;
9155         } else if (rc != 0) {
9156                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9157                         return false;
9158                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9159                               tp->irq_cnt, rc);
9160                 tp->irq_cnt = rc;
9161         }
9162
9163         for (i = 0; i < tp->irq_max; i++)
9164                 tp->napi[i].irq_vec = msix_ent[i].vector;
9165
9166         netif_set_real_num_tx_queues(tp->dev, 1);
9167         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9168         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9169                 pci_disable_msix(tp->pdev);
9170                 return false;
9171         }
9172
9173         if (tp->irq_cnt > 1) {
9174                 tg3_flag_set(tp, ENABLE_RSS);
9175
9176                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9177                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9178                         tg3_flag_set(tp, ENABLE_TSS);
9179                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9180                 }
9181         }
9182
9183         return true;
9184 }
9185
9186 static void tg3_ints_init(struct tg3 *tp)
9187 {
9188         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9189             !tg3_flag(tp, TAGGED_STATUS)) {
9190                 /* All MSI supporting chips should support tagged
9191                  * status.  Assert that this is the case.
9192                  */
9193                 netdev_warn(tp->dev,
9194                             "MSI without TAGGED_STATUS? Not using MSI\n");
9195                 goto defcfg;
9196         }
9197
9198         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9199                 tg3_flag_set(tp, USING_MSIX);
9200         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9201                 tg3_flag_set(tp, USING_MSI);
9202
9203         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9204                 u32 msi_mode = tr32(MSGINT_MODE);
9205                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9206                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9207                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9208         }
9209 defcfg:
9210         if (!tg3_flag(tp, USING_MSIX)) {
9211                 tp->irq_cnt = 1;
9212                 tp->napi[0].irq_vec = tp->pdev->irq;
9213                 netif_set_real_num_tx_queues(tp->dev, 1);
9214                 netif_set_real_num_rx_queues(tp->dev, 1);
9215         }
9216 }
9217
9218 static void tg3_ints_fini(struct tg3 *tp)
9219 {
9220         if (tg3_flag(tp, USING_MSIX))
9221                 pci_disable_msix(tp->pdev);
9222         else if (tg3_flag(tp, USING_MSI))
9223                 pci_disable_msi(tp->pdev);
9224         tg3_flag_clear(tp, USING_MSI);
9225         tg3_flag_clear(tp, USING_MSIX);
9226         tg3_flag_clear(tp, ENABLE_RSS);
9227         tg3_flag_clear(tp, ENABLE_TSS);
9228 }
9229
9230 static int tg3_open(struct net_device *dev)
9231 {
9232         struct tg3 *tp = netdev_priv(dev);
9233         int i, err;
9234
9235         if (tp->fw_needed) {
9236                 err = tg3_request_firmware(tp);
9237                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9238                         if (err)
9239                                 return err;
9240                 } else if (err) {
9241                         netdev_warn(tp->dev, "TSO capability disabled\n");
9242                         tg3_flag_clear(tp, TSO_CAPABLE);
9243                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9244                         netdev_notice(tp->dev, "TSO capability restored\n");
9245                         tg3_flag_set(tp, TSO_CAPABLE);
9246                 }
9247         }
9248
9249         netif_carrier_off(tp->dev);
9250
9251         err = tg3_power_up(tp);
9252         if (err)
9253                 return err;
9254
9255         tg3_full_lock(tp, 0);
9256
9257         tg3_disable_ints(tp);
9258         tg3_flag_clear(tp, INIT_COMPLETE);
9259
9260         tg3_full_unlock(tp);
9261
9262         /*
9263          * Setup interrupts first so we know how
9264          * many NAPI resources to allocate
9265          */
9266         tg3_ints_init(tp);
9267
9268         /* The placement of this call is tied
9269          * to the setup and use of Host TX descriptors.
9270          */
9271         err = tg3_alloc_consistent(tp);
9272         if (err)
9273                 goto err_out1;
9274
9275         tg3_napi_init(tp);
9276
9277         tg3_napi_enable(tp);
9278
9279         for (i = 0; i < tp->irq_cnt; i++) {
9280                 struct tg3_napi *tnapi = &tp->napi[i];
9281                 err = tg3_request_irq(tp, i);
9282                 if (err) {
9283                         for (i--; i >= 0; i--)
9284                                 free_irq(tnapi->irq_vec, tnapi);
9285                         break;
9286                 }
9287         }
9288
9289         if (err)
9290                 goto err_out2;
9291
9292         tg3_full_lock(tp, 0);
9293
9294         err = tg3_init_hw(tp, 1);
9295         if (err) {
9296                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9297                 tg3_free_rings(tp);
9298         } else {
9299                 if (tg3_flag(tp, TAGGED_STATUS))
9300                         tp->timer_offset = HZ;
9301                 else
9302                         tp->timer_offset = HZ / 10;
9303
9304                 BUG_ON(tp->timer_offset > HZ);
9305                 tp->timer_counter = tp->timer_multiplier =
9306                         (HZ / tp->timer_offset);
9307                 tp->asf_counter = tp->asf_multiplier =
9308                         ((HZ / tp->timer_offset) * 2);
9309
9310                 init_timer(&tp->timer);
9311                 tp->timer.expires = jiffies + tp->timer_offset;
9312                 tp->timer.data = (unsigned long) tp;
9313                 tp->timer.function = tg3_timer;
9314         }
9315
9316         tg3_full_unlock(tp);
9317
9318         if (err)
9319                 goto err_out3;
9320
9321         if (tg3_flag(tp, USING_MSI)) {
9322                 err = tg3_test_msi(tp);
9323
9324                 if (err) {
9325                         tg3_full_lock(tp, 0);
9326                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9327                         tg3_free_rings(tp);
9328                         tg3_full_unlock(tp);
9329
9330                         goto err_out2;
9331                 }
9332
9333                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9334                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9335
9336                         tw32(PCIE_TRANSACTION_CFG,
9337                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9338                 }
9339         }
9340
9341         tg3_phy_start(tp);
9342
9343         tg3_full_lock(tp, 0);
9344
9345         add_timer(&tp->timer);
9346         tg3_flag_set(tp, INIT_COMPLETE);
9347         tg3_enable_ints(tp);
9348
9349         tg3_full_unlock(tp);
9350
9351         netif_tx_start_all_queues(dev);
9352
9353         /*
9354          * Reset loopback feature if it was turned on while the device was down
9355          * make sure that it's installed properly now.
9356          */
9357         if (dev->features & NETIF_F_LOOPBACK)
9358                 tg3_set_loopback(dev, dev->features);
9359
9360         return 0;
9361
9362 err_out3:
9363         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9364                 struct tg3_napi *tnapi = &tp->napi[i];
9365                 free_irq(tnapi->irq_vec, tnapi);
9366         }
9367
9368 err_out2:
9369         tg3_napi_disable(tp);
9370         tg3_napi_fini(tp);
9371         tg3_free_consistent(tp);
9372
9373 err_out1:
9374         tg3_ints_fini(tp);
9375         return err;
9376 }
9377
9378 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9379                                                  struct rtnl_link_stats64 *);
9380 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9381
9382 static int tg3_close(struct net_device *dev)
9383 {
9384         int i;
9385         struct tg3 *tp = netdev_priv(dev);
9386
9387         tg3_napi_disable(tp);
9388         cancel_work_sync(&tp->reset_task);
9389
9390         netif_tx_stop_all_queues(dev);
9391
9392         del_timer_sync(&tp->timer);
9393
9394         tg3_phy_stop(tp);
9395
9396         tg3_full_lock(tp, 1);
9397
9398         tg3_disable_ints(tp);
9399
9400         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9401         tg3_free_rings(tp);
9402         tg3_flag_clear(tp, INIT_COMPLETE);
9403
9404         tg3_full_unlock(tp);
9405
9406         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9407                 struct tg3_napi *tnapi = &tp->napi[i];
9408                 free_irq(tnapi->irq_vec, tnapi);
9409         }
9410
9411         tg3_ints_fini(tp);
9412
9413         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9414
9415         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9416                sizeof(tp->estats_prev));
9417
9418         tg3_napi_fini(tp);
9419
9420         tg3_free_consistent(tp);
9421
9422         tg3_power_down(tp);
9423
9424         netif_carrier_off(tp->dev);
9425
9426         return 0;
9427 }
9428
9429 static inline u64 get_stat64(tg3_stat64_t *val)
9430 {
9431        return ((u64)val->high << 32) | ((u64)val->low);
9432 }
9433
9434 static u64 calc_crc_errors(struct tg3 *tp)
9435 {
9436         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9437
9438         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9439             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9440              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9441                 u32 val;
9442
9443                 spin_lock_bh(&tp->lock);
9444                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9445                         tg3_writephy(tp, MII_TG3_TEST1,
9446                                      val | MII_TG3_TEST1_CRC_EN);
9447                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9448                 } else
9449                         val = 0;
9450                 spin_unlock_bh(&tp->lock);
9451
9452                 tp->phy_crc_errors += val;
9453
9454                 return tp->phy_crc_errors;
9455         }
9456
9457         return get_stat64(&hw_stats->rx_fcs_errors);
9458 }
9459
9460 #define ESTAT_ADD(member) \
9461         estats->member =        old_estats->member + \
9462                                 get_stat64(&hw_stats->member)
9463
9464 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9465 {
9466         struct tg3_ethtool_stats *estats = &tp->estats;
9467         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9468         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9469
9470         if (!hw_stats)
9471                 return old_estats;
9472
9473         ESTAT_ADD(rx_octets);
9474         ESTAT_ADD(rx_fragments);
9475         ESTAT_ADD(rx_ucast_packets);
9476         ESTAT_ADD(rx_mcast_packets);
9477         ESTAT_ADD(rx_bcast_packets);
9478         ESTAT_ADD(rx_fcs_errors);
9479         ESTAT_ADD(rx_align_errors);
9480         ESTAT_ADD(rx_xon_pause_rcvd);
9481         ESTAT_ADD(rx_xoff_pause_rcvd);
9482         ESTAT_ADD(rx_mac_ctrl_rcvd);
9483         ESTAT_ADD(rx_xoff_entered);
9484         ESTAT_ADD(rx_frame_too_long_errors);
9485         ESTAT_ADD(rx_jabbers);
9486         ESTAT_ADD(rx_undersize_packets);
9487         ESTAT_ADD(rx_in_length_errors);
9488         ESTAT_ADD(rx_out_length_errors);
9489         ESTAT_ADD(rx_64_or_less_octet_packets);
9490         ESTAT_ADD(rx_65_to_127_octet_packets);
9491         ESTAT_ADD(rx_128_to_255_octet_packets);
9492         ESTAT_ADD(rx_256_to_511_octet_packets);
9493         ESTAT_ADD(rx_512_to_1023_octet_packets);
9494         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9495         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9496         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9497         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9498         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9499
9500         ESTAT_ADD(tx_octets);
9501         ESTAT_ADD(tx_collisions);
9502         ESTAT_ADD(tx_xon_sent);
9503         ESTAT_ADD(tx_xoff_sent);
9504         ESTAT_ADD(tx_flow_control);
9505         ESTAT_ADD(tx_mac_errors);
9506         ESTAT_ADD(tx_single_collisions);
9507         ESTAT_ADD(tx_mult_collisions);
9508         ESTAT_ADD(tx_deferred);
9509         ESTAT_ADD(tx_excessive_collisions);
9510         ESTAT_ADD(tx_late_collisions);
9511         ESTAT_ADD(tx_collide_2times);
9512         ESTAT_ADD(tx_collide_3times);
9513         ESTAT_ADD(tx_collide_4times);
9514         ESTAT_ADD(tx_collide_5times);
9515         ESTAT_ADD(tx_collide_6times);
9516         ESTAT_ADD(tx_collide_7times);
9517         ESTAT_ADD(tx_collide_8times);
9518         ESTAT_ADD(tx_collide_9times);
9519         ESTAT_ADD(tx_collide_10times);
9520         ESTAT_ADD(tx_collide_11times);
9521         ESTAT_ADD(tx_collide_12times);
9522         ESTAT_ADD(tx_collide_13times);
9523         ESTAT_ADD(tx_collide_14times);
9524         ESTAT_ADD(tx_collide_15times);
9525         ESTAT_ADD(tx_ucast_packets);
9526         ESTAT_ADD(tx_mcast_packets);
9527         ESTAT_ADD(tx_bcast_packets);
9528         ESTAT_ADD(tx_carrier_sense_errors);
9529         ESTAT_ADD(tx_discards);
9530         ESTAT_ADD(tx_errors);
9531
9532         ESTAT_ADD(dma_writeq_full);
9533         ESTAT_ADD(dma_write_prioq_full);
9534         ESTAT_ADD(rxbds_empty);
9535         ESTAT_ADD(rx_discards);
9536         ESTAT_ADD(rx_errors);
9537         ESTAT_ADD(rx_threshold_hit);
9538
9539         ESTAT_ADD(dma_readq_full);
9540         ESTAT_ADD(dma_read_prioq_full);
9541         ESTAT_ADD(tx_comp_queue_full);
9542
9543         ESTAT_ADD(ring_set_send_prod_index);
9544         ESTAT_ADD(ring_status_update);
9545         ESTAT_ADD(nic_irqs);
9546         ESTAT_ADD(nic_avoided_irqs);
9547         ESTAT_ADD(nic_tx_threshold_hit);
9548
9549         return estats;
9550 }
9551
9552 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9553                                                  struct rtnl_link_stats64 *stats)
9554 {
9555         struct tg3 *tp = netdev_priv(dev);
9556         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9557         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9558
9559         if (!hw_stats)
9560                 return old_stats;
9561
9562         stats->rx_packets = old_stats->rx_packets +
9563                 get_stat64(&hw_stats->rx_ucast_packets) +
9564                 get_stat64(&hw_stats->rx_mcast_packets) +
9565                 get_stat64(&hw_stats->rx_bcast_packets);
9566
9567         stats->tx_packets = old_stats->tx_packets +
9568                 get_stat64(&hw_stats->tx_ucast_packets) +
9569                 get_stat64(&hw_stats->tx_mcast_packets) +
9570                 get_stat64(&hw_stats->tx_bcast_packets);
9571
9572         stats->rx_bytes = old_stats->rx_bytes +
9573                 get_stat64(&hw_stats->rx_octets);
9574         stats->tx_bytes = old_stats->tx_bytes +
9575                 get_stat64(&hw_stats->tx_octets);
9576
9577         stats->rx_errors = old_stats->rx_errors +
9578                 get_stat64(&hw_stats->rx_errors);
9579         stats->tx_errors = old_stats->tx_errors +
9580                 get_stat64(&hw_stats->tx_errors) +
9581                 get_stat64(&hw_stats->tx_mac_errors) +
9582                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9583                 get_stat64(&hw_stats->tx_discards);
9584
9585         stats->multicast = old_stats->multicast +
9586                 get_stat64(&hw_stats->rx_mcast_packets);
9587         stats->collisions = old_stats->collisions +
9588                 get_stat64(&hw_stats->tx_collisions);
9589
9590         stats->rx_length_errors = old_stats->rx_length_errors +
9591                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9592                 get_stat64(&hw_stats->rx_undersize_packets);
9593
9594         stats->rx_over_errors = old_stats->rx_over_errors +
9595                 get_stat64(&hw_stats->rxbds_empty);
9596         stats->rx_frame_errors = old_stats->rx_frame_errors +
9597                 get_stat64(&hw_stats->rx_align_errors);
9598         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9599                 get_stat64(&hw_stats->tx_discards);
9600         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9601                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9602
9603         stats->rx_crc_errors = old_stats->rx_crc_errors +
9604                 calc_crc_errors(tp);
9605
9606         stats->rx_missed_errors = old_stats->rx_missed_errors +
9607                 get_stat64(&hw_stats->rx_discards);
9608
9609         stats->rx_dropped = tp->rx_dropped;
9610
9611         return stats;
9612 }
9613
9614 static inline u32 calc_crc(unsigned char *buf, int len)
9615 {
9616         u32 reg;
9617         u32 tmp;
9618         int j, k;
9619
9620         reg = 0xffffffff;
9621
9622         for (j = 0; j < len; j++) {
9623                 reg ^= buf[j];
9624
9625                 for (k = 0; k < 8; k++) {
9626                         tmp = reg & 0x01;
9627
9628                         reg >>= 1;
9629
9630                         if (tmp)
9631                                 reg ^= 0xedb88320;
9632                 }
9633         }
9634
9635         return ~reg;
9636 }
9637
9638 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9639 {
9640         /* accept or reject all multicast frames */
9641         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9642         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9643         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9644         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9645 }
9646
9647 static void __tg3_set_rx_mode(struct net_device *dev)
9648 {
9649         struct tg3 *tp = netdev_priv(dev);
9650         u32 rx_mode;
9651
9652         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9653                                   RX_MODE_KEEP_VLAN_TAG);
9654
9655 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9656         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9657          * flag clear.
9658          */
9659         if (!tg3_flag(tp, ENABLE_ASF))
9660                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9661 #endif
9662
9663         if (dev->flags & IFF_PROMISC) {
9664                 /* Promiscuous mode. */
9665                 rx_mode |= RX_MODE_PROMISC;
9666         } else if (dev->flags & IFF_ALLMULTI) {
9667                 /* Accept all multicast. */
9668                 tg3_set_multi(tp, 1);
9669         } else if (netdev_mc_empty(dev)) {
9670                 /* Reject all multicast. */
9671                 tg3_set_multi(tp, 0);
9672         } else {
9673                 /* Accept one or more multicast(s). */
9674                 struct netdev_hw_addr *ha;
9675                 u32 mc_filter[4] = { 0, };
9676                 u32 regidx;
9677                 u32 bit;
9678                 u32 crc;
9679
9680                 netdev_for_each_mc_addr(ha, dev) {
9681                         crc = calc_crc(ha->addr, ETH_ALEN);
9682                         bit = ~crc & 0x7f;
9683                         regidx = (bit & 0x60) >> 5;
9684                         bit &= 0x1f;
9685                         mc_filter[regidx] |= (1 << bit);
9686                 }
9687
9688                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9689                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9690                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9691                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9692         }
9693
9694         if (rx_mode != tp->rx_mode) {
9695                 tp->rx_mode = rx_mode;
9696                 tw32_f(MAC_RX_MODE, rx_mode);
9697                 udelay(10);
9698         }
9699 }
9700
9701 static void tg3_set_rx_mode(struct net_device *dev)
9702 {
9703         struct tg3 *tp = netdev_priv(dev);
9704
9705         if (!netif_running(dev))
9706                 return;
9707
9708         tg3_full_lock(tp, 0);
9709         __tg3_set_rx_mode(dev);
9710         tg3_full_unlock(tp);
9711 }
9712
9713 static int tg3_get_regs_len(struct net_device *dev)
9714 {
9715         return TG3_REG_BLK_SIZE;
9716 }
9717
9718 static void tg3_get_regs(struct net_device *dev,
9719                 struct ethtool_regs *regs, void *_p)
9720 {
9721         struct tg3 *tp = netdev_priv(dev);
9722
9723         regs->version = 0;
9724
9725         memset(_p, 0, TG3_REG_BLK_SIZE);
9726
9727         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9728                 return;
9729
9730         tg3_full_lock(tp, 0);
9731
9732         tg3_dump_legacy_regs(tp, (u32 *)_p);
9733
9734         tg3_full_unlock(tp);
9735 }
9736
9737 static int tg3_get_eeprom_len(struct net_device *dev)
9738 {
9739         struct tg3 *tp = netdev_priv(dev);
9740
9741         return tp->nvram_size;
9742 }
9743
9744 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9745 {
9746         struct tg3 *tp = netdev_priv(dev);
9747         int ret;
9748         u8  *pd;
9749         u32 i, offset, len, b_offset, b_count;
9750         __be32 val;
9751
9752         if (tg3_flag(tp, NO_NVRAM))
9753                 return -EINVAL;
9754
9755         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9756                 return -EAGAIN;
9757
9758         offset = eeprom->offset;
9759         len = eeprom->len;
9760         eeprom->len = 0;
9761
9762         eeprom->magic = TG3_EEPROM_MAGIC;
9763
9764         if (offset & 3) {
9765                 /* adjustments to start on required 4 byte boundary */
9766                 b_offset = offset & 3;
9767                 b_count = 4 - b_offset;
9768                 if (b_count > len) {
9769                         /* i.e. offset=1 len=2 */
9770                         b_count = len;
9771                 }
9772                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9773                 if (ret)
9774                         return ret;
9775                 memcpy(data, ((char *)&val) + b_offset, b_count);
9776                 len -= b_count;
9777                 offset += b_count;
9778                 eeprom->len += b_count;
9779         }
9780
9781         /* read bytes up to the last 4 byte boundary */
9782         pd = &data[eeprom->len];
9783         for (i = 0; i < (len - (len & 3)); i += 4) {
9784                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9785                 if (ret) {
9786                         eeprom->len += i;
9787                         return ret;
9788                 }
9789                 memcpy(pd + i, &val, 4);
9790         }
9791         eeprom->len += i;
9792
9793         if (len & 3) {
9794                 /* read last bytes not ending on 4 byte boundary */
9795                 pd = &data[eeprom->len];
9796                 b_count = len & 3;
9797                 b_offset = offset + len - b_count;
9798                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9799                 if (ret)
9800                         return ret;
9801                 memcpy(pd, &val, b_count);
9802                 eeprom->len += b_count;
9803         }
9804         return 0;
9805 }
9806
9807 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9808
9809 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9810 {
9811         struct tg3 *tp = netdev_priv(dev);
9812         int ret;
9813         u32 offset, len, b_offset, odd_len;
9814         u8 *buf;
9815         __be32 start, end;
9816
9817         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9818                 return -EAGAIN;
9819
9820         if (tg3_flag(tp, NO_NVRAM) ||
9821             eeprom->magic != TG3_EEPROM_MAGIC)
9822                 return -EINVAL;
9823
9824         offset = eeprom->offset;
9825         len = eeprom->len;
9826
9827         if ((b_offset = (offset & 3))) {
9828                 /* adjustments to start on required 4 byte boundary */
9829                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9830                 if (ret)
9831                         return ret;
9832                 len += b_offset;
9833                 offset &= ~3;
9834                 if (len < 4)
9835                         len = 4;
9836         }
9837
9838         odd_len = 0;
9839         if (len & 3) {
9840                 /* adjustments to end on required 4 byte boundary */
9841                 odd_len = 1;
9842                 len = (len + 3) & ~3;
9843                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9844                 if (ret)
9845                         return ret;
9846         }
9847
9848         buf = data;
9849         if (b_offset || odd_len) {
9850                 buf = kmalloc(len, GFP_KERNEL);
9851                 if (!buf)
9852                         return -ENOMEM;
9853                 if (b_offset)
9854                         memcpy(buf, &start, 4);
9855                 if (odd_len)
9856                         memcpy(buf+len-4, &end, 4);
9857                 memcpy(buf + b_offset, data, eeprom->len);
9858         }
9859
9860         ret = tg3_nvram_write_block(tp, offset, len, buf);
9861
9862         if (buf != data)
9863                 kfree(buf);
9864
9865         return ret;
9866 }
9867
9868 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9869 {
9870         struct tg3 *tp = netdev_priv(dev);
9871
9872         if (tg3_flag(tp, USE_PHYLIB)) {
9873                 struct phy_device *phydev;
9874                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9875                         return -EAGAIN;
9876                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9877                 return phy_ethtool_gset(phydev, cmd);
9878         }
9879
9880         cmd->supported = (SUPPORTED_Autoneg);
9881
9882         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9883                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9884                                    SUPPORTED_1000baseT_Full);
9885
9886         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9887                 cmd->supported |= (SUPPORTED_100baseT_Half |
9888                                   SUPPORTED_100baseT_Full |
9889                                   SUPPORTED_10baseT_Half |
9890                                   SUPPORTED_10baseT_Full |
9891                                   SUPPORTED_TP);
9892                 cmd->port = PORT_TP;
9893         } else {
9894                 cmd->supported |= SUPPORTED_FIBRE;
9895                 cmd->port = PORT_FIBRE;
9896         }
9897
9898         cmd->advertising = tp->link_config.advertising;
9899         if (netif_running(dev)) {
9900                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9901                 cmd->duplex = tp->link_config.active_duplex;
9902         } else {
9903                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9904                 cmd->duplex = DUPLEX_INVALID;
9905         }
9906         cmd->phy_address = tp->phy_addr;
9907         cmd->transceiver = XCVR_INTERNAL;
9908         cmd->autoneg = tp->link_config.autoneg;
9909         cmd->maxtxpkt = 0;
9910         cmd->maxrxpkt = 0;
9911         return 0;
9912 }
9913
9914 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9915 {
9916         struct tg3 *tp = netdev_priv(dev);
9917         u32 speed = ethtool_cmd_speed(cmd);
9918
9919         if (tg3_flag(tp, USE_PHYLIB)) {
9920                 struct phy_device *phydev;
9921                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9922                         return -EAGAIN;
9923                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9924                 return phy_ethtool_sset(phydev, cmd);
9925         }
9926
9927         if (cmd->autoneg != AUTONEG_ENABLE &&
9928             cmd->autoneg != AUTONEG_DISABLE)
9929                 return -EINVAL;
9930
9931         if (cmd->autoneg == AUTONEG_DISABLE &&
9932             cmd->duplex != DUPLEX_FULL &&
9933             cmd->duplex != DUPLEX_HALF)
9934                 return -EINVAL;
9935
9936         if (cmd->autoneg == AUTONEG_ENABLE) {
9937                 u32 mask = ADVERTISED_Autoneg |
9938                            ADVERTISED_Pause |
9939                            ADVERTISED_Asym_Pause;
9940
9941                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9942                         mask |= ADVERTISED_1000baseT_Half |
9943                                 ADVERTISED_1000baseT_Full;
9944
9945                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9946                         mask |= ADVERTISED_100baseT_Half |
9947                                 ADVERTISED_100baseT_Full |
9948                                 ADVERTISED_10baseT_Half |
9949                                 ADVERTISED_10baseT_Full |
9950                                 ADVERTISED_TP;
9951                 else
9952                         mask |= ADVERTISED_FIBRE;
9953
9954                 if (cmd->advertising & ~mask)
9955                         return -EINVAL;
9956
9957                 mask &= (ADVERTISED_1000baseT_Half |
9958                          ADVERTISED_1000baseT_Full |
9959                          ADVERTISED_100baseT_Half |
9960                          ADVERTISED_100baseT_Full |
9961                          ADVERTISED_10baseT_Half |
9962                          ADVERTISED_10baseT_Full);
9963
9964                 cmd->advertising &= mask;
9965         } else {
9966                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9967                         if (speed != SPEED_1000)
9968                                 return -EINVAL;
9969
9970                         if (cmd->duplex != DUPLEX_FULL)
9971                                 return -EINVAL;
9972                 } else {
9973                         if (speed != SPEED_100 &&
9974                             speed != SPEED_10)
9975                                 return -EINVAL;
9976                 }
9977         }
9978
9979         tg3_full_lock(tp, 0);
9980
9981         tp->link_config.autoneg = cmd->autoneg;
9982         if (cmd->autoneg == AUTONEG_ENABLE) {
9983                 tp->link_config.advertising = (cmd->advertising |
9984                                               ADVERTISED_Autoneg);
9985                 tp->link_config.speed = SPEED_INVALID;
9986                 tp->link_config.duplex = DUPLEX_INVALID;
9987         } else {
9988                 tp->link_config.advertising = 0;
9989                 tp->link_config.speed = speed;
9990                 tp->link_config.duplex = cmd->duplex;
9991         }
9992
9993         tp->link_config.orig_speed = tp->link_config.speed;
9994         tp->link_config.orig_duplex = tp->link_config.duplex;
9995         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9996
9997         if (netif_running(dev))
9998                 tg3_setup_phy(tp, 1);
9999
10000         tg3_full_unlock(tp);
10001
10002         return 0;
10003 }
10004
10005 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10006 {
10007         struct tg3 *tp = netdev_priv(dev);
10008
10009         strcpy(info->driver, DRV_MODULE_NAME);
10010         strcpy(info->version, DRV_MODULE_VERSION);
10011         strcpy(info->fw_version, tp->fw_ver);
10012         strcpy(info->bus_info, pci_name(tp->pdev));
10013 }
10014
10015 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10016 {
10017         struct tg3 *tp = netdev_priv(dev);
10018
10019         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10020                 wol->supported = WAKE_MAGIC;
10021         else
10022                 wol->supported = 0;
10023         wol->wolopts = 0;
10024         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10025                 wol->wolopts = WAKE_MAGIC;
10026         memset(&wol->sopass, 0, sizeof(wol->sopass));
10027 }
10028
10029 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10030 {
10031         struct tg3 *tp = netdev_priv(dev);
10032         struct device *dp = &tp->pdev->dev;
10033
10034         if (wol->wolopts & ~WAKE_MAGIC)
10035                 return -EINVAL;
10036         if ((wol->wolopts & WAKE_MAGIC) &&
10037             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10038                 return -EINVAL;
10039
10040         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10041
10042         spin_lock_bh(&tp->lock);
10043         if (device_may_wakeup(dp))
10044                 tg3_flag_set(tp, WOL_ENABLE);
10045         else
10046                 tg3_flag_clear(tp, WOL_ENABLE);
10047         spin_unlock_bh(&tp->lock);
10048
10049         return 0;
10050 }
10051
10052 static u32 tg3_get_msglevel(struct net_device *dev)
10053 {
10054         struct tg3 *tp = netdev_priv(dev);
10055         return tp->msg_enable;
10056 }
10057
10058 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10059 {
10060         struct tg3 *tp = netdev_priv(dev);
10061         tp->msg_enable = value;
10062 }
10063
10064 static int tg3_nway_reset(struct net_device *dev)
10065 {
10066         struct tg3 *tp = netdev_priv(dev);
10067         int r;
10068
10069         if (!netif_running(dev))
10070                 return -EAGAIN;
10071
10072         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10073                 return -EINVAL;
10074
10075         if (tg3_flag(tp, USE_PHYLIB)) {
10076                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10077                         return -EAGAIN;
10078                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10079         } else {
10080                 u32 bmcr;
10081
10082                 spin_lock_bh(&tp->lock);
10083                 r = -EINVAL;
10084                 tg3_readphy(tp, MII_BMCR, &bmcr);
10085                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10086                     ((bmcr & BMCR_ANENABLE) ||
10087                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10088                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10089                                                    BMCR_ANENABLE);
10090                         r = 0;
10091                 }
10092                 spin_unlock_bh(&tp->lock);
10093         }
10094
10095         return r;
10096 }
10097
10098 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10099 {
10100         struct tg3 *tp = netdev_priv(dev);
10101
10102         ering->rx_max_pending = tp->rx_std_ring_mask;
10103         ering->rx_mini_max_pending = 0;
10104         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10105                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10106         else
10107                 ering->rx_jumbo_max_pending = 0;
10108
10109         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10110
10111         ering->rx_pending = tp->rx_pending;
10112         ering->rx_mini_pending = 0;
10113         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10114                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10115         else
10116                 ering->rx_jumbo_pending = 0;
10117
10118         ering->tx_pending = tp->napi[0].tx_pending;
10119 }
10120
10121 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10122 {
10123         struct tg3 *tp = netdev_priv(dev);
10124         int i, irq_sync = 0, err = 0;
10125
10126         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10127             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10128             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10129             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10130             (tg3_flag(tp, TSO_BUG) &&
10131              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10132                 return -EINVAL;
10133
10134         if (netif_running(dev)) {
10135                 tg3_phy_stop(tp);
10136                 tg3_netif_stop(tp);
10137                 irq_sync = 1;
10138         }
10139
10140         tg3_full_lock(tp, irq_sync);
10141
10142         tp->rx_pending = ering->rx_pending;
10143
10144         if (tg3_flag(tp, MAX_RXPEND_64) &&
10145             tp->rx_pending > 63)
10146                 tp->rx_pending = 63;
10147         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10148
10149         for (i = 0; i < tp->irq_max; i++)
10150                 tp->napi[i].tx_pending = ering->tx_pending;
10151
10152         if (netif_running(dev)) {
10153                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10154                 err = tg3_restart_hw(tp, 1);
10155                 if (!err)
10156                         tg3_netif_start(tp);
10157         }
10158
10159         tg3_full_unlock(tp);
10160
10161         if (irq_sync && !err)
10162                 tg3_phy_start(tp);
10163
10164         return err;
10165 }
10166
10167 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10168 {
10169         struct tg3 *tp = netdev_priv(dev);
10170
10171         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10172
10173         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10174                 epause->rx_pause = 1;
10175         else
10176                 epause->rx_pause = 0;
10177
10178         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10179                 epause->tx_pause = 1;
10180         else
10181                 epause->tx_pause = 0;
10182 }
10183
10184 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10185 {
10186         struct tg3 *tp = netdev_priv(dev);
10187         int err = 0;
10188
10189         if (tg3_flag(tp, USE_PHYLIB)) {
10190                 u32 newadv;
10191                 struct phy_device *phydev;
10192
10193                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10194
10195                 if (!(phydev->supported & SUPPORTED_Pause) ||
10196                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10197                      (epause->rx_pause != epause->tx_pause)))
10198                         return -EINVAL;
10199
10200                 tp->link_config.flowctrl = 0;
10201                 if (epause->rx_pause) {
10202                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10203
10204                         if (epause->tx_pause) {
10205                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10206                                 newadv = ADVERTISED_Pause;
10207                         } else
10208                                 newadv = ADVERTISED_Pause |
10209                                          ADVERTISED_Asym_Pause;
10210                 } else if (epause->tx_pause) {
10211                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10212                         newadv = ADVERTISED_Asym_Pause;
10213                 } else
10214                         newadv = 0;
10215
10216                 if (epause->autoneg)
10217                         tg3_flag_set(tp, PAUSE_AUTONEG);
10218                 else
10219                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10220
10221                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10222                         u32 oldadv = phydev->advertising &
10223                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10224                         if (oldadv != newadv) {
10225                                 phydev->advertising &=
10226                                         ~(ADVERTISED_Pause |
10227                                           ADVERTISED_Asym_Pause);
10228                                 phydev->advertising |= newadv;
10229                                 if (phydev->autoneg) {
10230                                         /*
10231                                          * Always renegotiate the link to
10232                                          * inform our link partner of our
10233                                          * flow control settings, even if the
10234                                          * flow control is forced.  Let
10235                                          * tg3_adjust_link() do the final
10236                                          * flow control setup.
10237                                          */
10238                                         return phy_start_aneg(phydev);
10239                                 }
10240                         }
10241
10242                         if (!epause->autoneg)
10243                                 tg3_setup_flow_control(tp, 0, 0);
10244                 } else {
10245                         tp->link_config.orig_advertising &=
10246                                         ~(ADVERTISED_Pause |
10247                                           ADVERTISED_Asym_Pause);
10248                         tp->link_config.orig_advertising |= newadv;
10249                 }
10250         } else {
10251                 int irq_sync = 0;
10252
10253                 if (netif_running(dev)) {
10254                         tg3_netif_stop(tp);
10255                         irq_sync = 1;
10256                 }
10257
10258                 tg3_full_lock(tp, irq_sync);
10259
10260                 if (epause->autoneg)
10261                         tg3_flag_set(tp, PAUSE_AUTONEG);
10262                 else
10263                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10264                 if (epause->rx_pause)
10265                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10266                 else
10267                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10268                 if (epause->tx_pause)
10269                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10270                 else
10271                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10272
10273                 if (netif_running(dev)) {
10274                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10275                         err = tg3_restart_hw(tp, 1);
10276                         if (!err)
10277                                 tg3_netif_start(tp);
10278                 }
10279
10280                 tg3_full_unlock(tp);
10281         }
10282
10283         return err;
10284 }
10285
10286 static int tg3_get_sset_count(struct net_device *dev, int sset)
10287 {
10288         switch (sset) {
10289         case ETH_SS_TEST:
10290                 return TG3_NUM_TEST;
10291         case ETH_SS_STATS:
10292                 return TG3_NUM_STATS;
10293         default:
10294                 return -EOPNOTSUPP;
10295         }
10296 }
10297
10298 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10299 {
10300         switch (stringset) {
10301         case ETH_SS_STATS:
10302                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10303                 break;
10304         case ETH_SS_TEST:
10305                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10306                 break;
10307         default:
10308                 WARN_ON(1);     /* we need a WARN() */
10309                 break;
10310         }
10311 }
10312
10313 static int tg3_set_phys_id(struct net_device *dev,
10314                             enum ethtool_phys_id_state state)
10315 {
10316         struct tg3 *tp = netdev_priv(dev);
10317
10318         if (!netif_running(tp->dev))
10319                 return -EAGAIN;
10320
10321         switch (state) {
10322         case ETHTOOL_ID_ACTIVE:
10323                 return 1;       /* cycle on/off once per second */
10324
10325         case ETHTOOL_ID_ON:
10326                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10327                      LED_CTRL_1000MBPS_ON |
10328                      LED_CTRL_100MBPS_ON |
10329                      LED_CTRL_10MBPS_ON |
10330                      LED_CTRL_TRAFFIC_OVERRIDE |
10331                      LED_CTRL_TRAFFIC_BLINK |
10332                      LED_CTRL_TRAFFIC_LED);
10333                 break;
10334
10335         case ETHTOOL_ID_OFF:
10336                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10337                      LED_CTRL_TRAFFIC_OVERRIDE);
10338                 break;
10339
10340         case ETHTOOL_ID_INACTIVE:
10341                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10342                 break;
10343         }
10344
10345         return 0;
10346 }
10347
10348 static void tg3_get_ethtool_stats(struct net_device *dev,
10349                                    struct ethtool_stats *estats, u64 *tmp_stats)
10350 {
10351         struct tg3 *tp = netdev_priv(dev);
10352         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10353 }
10354
10355 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10356 {
10357         int i;
10358         __be32 *buf;
10359         u32 offset = 0, len = 0;
10360         u32 magic, val;
10361
10362         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10363                 return NULL;
10364
10365         if (magic == TG3_EEPROM_MAGIC) {
10366                 for (offset = TG3_NVM_DIR_START;
10367                      offset < TG3_NVM_DIR_END;
10368                      offset += TG3_NVM_DIRENT_SIZE) {
10369                         if (tg3_nvram_read(tp, offset, &val))
10370                                 return NULL;
10371
10372                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10373                             TG3_NVM_DIRTYPE_EXTVPD)
10374                                 break;
10375                 }
10376
10377                 if (offset != TG3_NVM_DIR_END) {
10378                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10379                         if (tg3_nvram_read(tp, offset + 4, &offset))
10380                                 return NULL;
10381
10382                         offset = tg3_nvram_logical_addr(tp, offset);
10383                 }
10384         }
10385
10386         if (!offset || !len) {
10387                 offset = TG3_NVM_VPD_OFF;
10388                 len = TG3_NVM_VPD_LEN;
10389         }
10390
10391         buf = kmalloc(len, GFP_KERNEL);
10392         if (buf == NULL)
10393                 return NULL;
10394
10395         if (magic == TG3_EEPROM_MAGIC) {
10396                 for (i = 0; i < len; i += 4) {
10397                         /* The data is in little-endian format in NVRAM.
10398                          * Use the big-endian read routines to preserve
10399                          * the byte order as it exists in NVRAM.
10400                          */
10401                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10402                                 goto error;
10403                 }
10404         } else {
10405                 u8 *ptr;
10406                 ssize_t cnt;
10407                 unsigned int pos = 0;
10408
10409                 ptr = (u8 *)&buf[0];
10410                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10411                         cnt = pci_read_vpd(tp->pdev, pos,
10412                                            len - pos, ptr);
10413                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10414                                 cnt = 0;
10415                         else if (cnt < 0)
10416                                 goto error;
10417                 }
10418                 if (pos != len)
10419                         goto error;
10420         }
10421
10422         return buf;
10423
10424 error:
10425         kfree(buf);
10426         return NULL;
10427 }
10428
10429 #define NVRAM_TEST_SIZE 0x100
10430 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10431 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10432 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10433 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10434 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10435
10436 static int tg3_test_nvram(struct tg3 *tp)
10437 {
10438         u32 csum, magic;
10439         __be32 *buf;
10440         int i, j, k, err = 0, size;
10441
10442         if (tg3_flag(tp, NO_NVRAM))
10443                 return 0;
10444
10445         if (tg3_nvram_read(tp, 0, &magic) != 0)
10446                 return -EIO;
10447
10448         if (magic == TG3_EEPROM_MAGIC)
10449                 size = NVRAM_TEST_SIZE;
10450         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10451                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10452                     TG3_EEPROM_SB_FORMAT_1) {
10453                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10454                         case TG3_EEPROM_SB_REVISION_0:
10455                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10456                                 break;
10457                         case TG3_EEPROM_SB_REVISION_2:
10458                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10459                                 break;
10460                         case TG3_EEPROM_SB_REVISION_3:
10461                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10462                                 break;
10463                         default:
10464                                 return 0;
10465                         }
10466                 } else
10467                         return 0;
10468         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10469                 size = NVRAM_SELFBOOT_HW_SIZE;
10470         else
10471                 return -EIO;
10472
10473         buf = kmalloc(size, GFP_KERNEL);
10474         if (buf == NULL)
10475                 return -ENOMEM;
10476
10477         err = -EIO;
10478         for (i = 0, j = 0; i < size; i += 4, j++) {
10479                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10480                 if (err)
10481                         break;
10482         }
10483         if (i < size)
10484                 goto out;
10485
10486         /* Selfboot format */
10487         magic = be32_to_cpu(buf[0]);
10488         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10489             TG3_EEPROM_MAGIC_FW) {
10490                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10491
10492                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10493                     TG3_EEPROM_SB_REVISION_2) {
10494                         /* For rev 2, the csum doesn't include the MBA. */
10495                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10496                                 csum8 += buf8[i];
10497                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10498                                 csum8 += buf8[i];
10499                 } else {
10500                         for (i = 0; i < size; i++)
10501                                 csum8 += buf8[i];
10502                 }
10503
10504                 if (csum8 == 0) {
10505                         err = 0;
10506                         goto out;
10507                 }
10508
10509                 err = -EIO;
10510                 goto out;
10511         }
10512
10513         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10514             TG3_EEPROM_MAGIC_HW) {
10515                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10516                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10517                 u8 *buf8 = (u8 *) buf;
10518
10519                 /* Separate the parity bits and the data bytes.  */
10520                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10521                         if ((i == 0) || (i == 8)) {
10522                                 int l;
10523                                 u8 msk;
10524
10525                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10526                                         parity[k++] = buf8[i] & msk;
10527                                 i++;
10528                         } else if (i == 16) {
10529                                 int l;
10530                                 u8 msk;
10531
10532                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10533                                         parity[k++] = buf8[i] & msk;
10534                                 i++;
10535
10536                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10537                                         parity[k++] = buf8[i] & msk;
10538                                 i++;
10539                         }
10540                         data[j++] = buf8[i];
10541                 }
10542
10543                 err = -EIO;
10544                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10545                         u8 hw8 = hweight8(data[i]);
10546
10547                         if ((hw8 & 0x1) && parity[i])
10548                                 goto out;
10549                         else if (!(hw8 & 0x1) && !parity[i])
10550                                 goto out;
10551                 }
10552                 err = 0;
10553                 goto out;
10554         }
10555
10556         err = -EIO;
10557
10558         /* Bootstrap checksum at offset 0x10 */
10559         csum = calc_crc((unsigned char *) buf, 0x10);
10560         if (csum != le32_to_cpu(buf[0x10/4]))
10561                 goto out;
10562
10563         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10564         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10565         if (csum != le32_to_cpu(buf[0xfc/4]))
10566                 goto out;
10567
10568         kfree(buf);
10569
10570         buf = tg3_vpd_readblock(tp);
10571         if (!buf)
10572                 return -ENOMEM;
10573
10574         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10575                              PCI_VPD_LRDT_RO_DATA);
10576         if (i > 0) {
10577                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10578                 if (j < 0)
10579                         goto out;
10580
10581                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10582                         goto out;
10583
10584                 i += PCI_VPD_LRDT_TAG_SIZE;
10585                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10586                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10587                 if (j > 0) {
10588                         u8 csum8 = 0;
10589
10590                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10591
10592                         for (i = 0; i <= j; i++)
10593                                 csum8 += ((u8 *)buf)[i];
10594
10595                         if (csum8)
10596                                 goto out;
10597                 }
10598         }
10599
10600         err = 0;
10601
10602 out:
10603         kfree(buf);
10604         return err;
10605 }
10606
10607 #define TG3_SERDES_TIMEOUT_SEC  2
10608 #define TG3_COPPER_TIMEOUT_SEC  6
10609
10610 static int tg3_test_link(struct tg3 *tp)
10611 {
10612         int i, max;
10613
10614         if (!netif_running(tp->dev))
10615                 return -ENODEV;
10616
10617         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10618                 max = TG3_SERDES_TIMEOUT_SEC;
10619         else
10620                 max = TG3_COPPER_TIMEOUT_SEC;
10621
10622         for (i = 0; i < max; i++) {
10623                 if (netif_carrier_ok(tp->dev))
10624                         return 0;
10625
10626                 if (msleep_interruptible(1000))
10627                         break;
10628         }
10629
10630         return -EIO;
10631 }
10632
10633 /* Only test the commonly used registers */
10634 static int tg3_test_registers(struct tg3 *tp)
10635 {
10636         int i, is_5705, is_5750;
10637         u32 offset, read_mask, write_mask, val, save_val, read_val;
10638         static struct {
10639                 u16 offset;
10640                 u16 flags;
10641 #define TG3_FL_5705     0x1
10642 #define TG3_FL_NOT_5705 0x2
10643 #define TG3_FL_NOT_5788 0x4
10644 #define TG3_FL_NOT_5750 0x8
10645                 u32 read_mask;
10646                 u32 write_mask;
10647         } reg_tbl[] = {
10648                 /* MAC Control Registers */
10649                 { MAC_MODE, TG3_FL_NOT_5705,
10650                         0x00000000, 0x00ef6f8c },
10651                 { MAC_MODE, TG3_FL_5705,
10652                         0x00000000, 0x01ef6b8c },
10653                 { MAC_STATUS, TG3_FL_NOT_5705,
10654                         0x03800107, 0x00000000 },
10655                 { MAC_STATUS, TG3_FL_5705,
10656                         0x03800100, 0x00000000 },
10657                 { MAC_ADDR_0_HIGH, 0x0000,
10658                         0x00000000, 0x0000ffff },
10659                 { MAC_ADDR_0_LOW, 0x0000,
10660                         0x00000000, 0xffffffff },
10661                 { MAC_RX_MTU_SIZE, 0x0000,
10662                         0x00000000, 0x0000ffff },
10663                 { MAC_TX_MODE, 0x0000,
10664                         0x00000000, 0x00000070 },
10665                 { MAC_TX_LENGTHS, 0x0000,
10666                         0x00000000, 0x00003fff },
10667                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10668                         0x00000000, 0x000007fc },
10669                 { MAC_RX_MODE, TG3_FL_5705,
10670                         0x00000000, 0x000007dc },
10671                 { MAC_HASH_REG_0, 0x0000,
10672                         0x00000000, 0xffffffff },
10673                 { MAC_HASH_REG_1, 0x0000,
10674                         0x00000000, 0xffffffff },
10675                 { MAC_HASH_REG_2, 0x0000,
10676                         0x00000000, 0xffffffff },
10677                 { MAC_HASH_REG_3, 0x0000,
10678                         0x00000000, 0xffffffff },
10679
10680                 /* Receive Data and Receive BD Initiator Control Registers. */
10681                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10682                         0x00000000, 0xffffffff },
10683                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10684                         0x00000000, 0xffffffff },
10685                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10686                         0x00000000, 0x00000003 },
10687                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10688                         0x00000000, 0xffffffff },
10689                 { RCVDBDI_STD_BD+0, 0x0000,
10690                         0x00000000, 0xffffffff },
10691                 { RCVDBDI_STD_BD+4, 0x0000,
10692                         0x00000000, 0xffffffff },
10693                 { RCVDBDI_STD_BD+8, 0x0000,
10694                         0x00000000, 0xffff0002 },
10695                 { RCVDBDI_STD_BD+0xc, 0x0000,
10696                         0x00000000, 0xffffffff },
10697
10698                 /* Receive BD Initiator Control Registers. */
10699                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10700                         0x00000000, 0xffffffff },
10701                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10702                         0x00000000, 0x000003ff },
10703                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10704                         0x00000000, 0xffffffff },
10705
10706                 /* Host Coalescing Control Registers. */
10707                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10708                         0x00000000, 0x00000004 },
10709                 { HOSTCC_MODE, TG3_FL_5705,
10710                         0x00000000, 0x000000f6 },
10711                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10712                         0x00000000, 0xffffffff },
10713                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10714                         0x00000000, 0x000003ff },
10715                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10716                         0x00000000, 0xffffffff },
10717                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10718                         0x00000000, 0x000003ff },
10719                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10720                         0x00000000, 0xffffffff },
10721                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10722                         0x00000000, 0x000000ff },
10723                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10724                         0x00000000, 0xffffffff },
10725                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10726                         0x00000000, 0x000000ff },
10727                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10728                         0x00000000, 0xffffffff },
10729                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10730                         0x00000000, 0xffffffff },
10731                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10732                         0x00000000, 0xffffffff },
10733                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10734                         0x00000000, 0x000000ff },
10735                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10736                         0x00000000, 0xffffffff },
10737                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10738                         0x00000000, 0x000000ff },
10739                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10740                         0x00000000, 0xffffffff },
10741                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10742                         0x00000000, 0xffffffff },
10743                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10744                         0x00000000, 0xffffffff },
10745                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10746                         0x00000000, 0xffffffff },
10747                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10748                         0x00000000, 0xffffffff },
10749                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10750                         0xffffffff, 0x00000000 },
10751                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10752                         0xffffffff, 0x00000000 },
10753
10754                 /* Buffer Manager Control Registers. */
10755                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10756                         0x00000000, 0x007fff80 },
10757                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10758                         0x00000000, 0x007fffff },
10759                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10760                         0x00000000, 0x0000003f },
10761                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10762                         0x00000000, 0x000001ff },
10763                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10764                         0x00000000, 0x000001ff },
10765                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10766                         0xffffffff, 0x00000000 },
10767                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10768                         0xffffffff, 0x00000000 },
10769
10770                 /* Mailbox Registers */
10771                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10772                         0x00000000, 0x000001ff },
10773                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10774                         0x00000000, 0x000001ff },
10775                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10776                         0x00000000, 0x000007ff },
10777                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10778                         0x00000000, 0x000001ff },
10779
10780                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10781         };
10782
10783         is_5705 = is_5750 = 0;
10784         if (tg3_flag(tp, 5705_PLUS)) {
10785                 is_5705 = 1;
10786                 if (tg3_flag(tp, 5750_PLUS))
10787                         is_5750 = 1;
10788         }
10789
10790         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10791                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10792                         continue;
10793
10794                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10795                         continue;
10796
10797                 if (tg3_flag(tp, IS_5788) &&
10798                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
10799                         continue;
10800
10801                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10802                         continue;
10803
10804                 offset = (u32) reg_tbl[i].offset;
10805                 read_mask = reg_tbl[i].read_mask;
10806                 write_mask = reg_tbl[i].write_mask;
10807
10808                 /* Save the original register content */
10809                 save_val = tr32(offset);
10810
10811                 /* Determine the read-only value. */
10812                 read_val = save_val & read_mask;
10813
10814                 /* Write zero to the register, then make sure the read-only bits
10815                  * are not changed and the read/write bits are all zeros.
10816                  */
10817                 tw32(offset, 0);
10818
10819                 val = tr32(offset);
10820
10821                 /* Test the read-only and read/write bits. */
10822                 if (((val & read_mask) != read_val) || (val & write_mask))
10823                         goto out;
10824
10825                 /* Write ones to all the bits defined by RdMask and WrMask, then
10826                  * make sure the read-only bits are not changed and the
10827                  * read/write bits are all ones.
10828                  */
10829                 tw32(offset, read_mask | write_mask);
10830
10831                 val = tr32(offset);
10832
10833                 /* Test the read-only bits. */
10834                 if ((val & read_mask) != read_val)
10835                         goto out;
10836
10837                 /* Test the read/write bits. */
10838                 if ((val & write_mask) != write_mask)
10839                         goto out;
10840
10841                 tw32(offset, save_val);
10842         }
10843
10844         return 0;
10845
10846 out:
10847         if (netif_msg_hw(tp))
10848                 netdev_err(tp->dev,
10849                            "Register test failed at offset %x\n", offset);
10850         tw32(offset, save_val);
10851         return -EIO;
10852 }
10853
10854 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10855 {
10856         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10857         int i;
10858         u32 j;
10859
10860         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10861                 for (j = 0; j < len; j += 4) {
10862                         u32 val;
10863
10864                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10865                         tg3_read_mem(tp, offset + j, &val);
10866                         if (val != test_pattern[i])
10867                                 return -EIO;
10868                 }
10869         }
10870         return 0;
10871 }
10872
10873 static int tg3_test_memory(struct tg3 *tp)
10874 {
10875         static struct mem_entry {
10876                 u32 offset;
10877                 u32 len;
10878         } mem_tbl_570x[] = {
10879                 { 0x00000000, 0x00b50},
10880                 { 0x00002000, 0x1c000},
10881                 { 0xffffffff, 0x00000}
10882         }, mem_tbl_5705[] = {
10883                 { 0x00000100, 0x0000c},
10884                 { 0x00000200, 0x00008},
10885                 { 0x00004000, 0x00800},
10886                 { 0x00006000, 0x01000},
10887                 { 0x00008000, 0x02000},
10888                 { 0x00010000, 0x0e000},
10889                 { 0xffffffff, 0x00000}
10890         }, mem_tbl_5755[] = {
10891                 { 0x00000200, 0x00008},
10892                 { 0x00004000, 0x00800},
10893                 { 0x00006000, 0x00800},
10894                 { 0x00008000, 0x02000},
10895                 { 0x00010000, 0x0c000},
10896                 { 0xffffffff, 0x00000}
10897         }, mem_tbl_5906[] = {
10898                 { 0x00000200, 0x00008},
10899                 { 0x00004000, 0x00400},
10900                 { 0x00006000, 0x00400},
10901                 { 0x00008000, 0x01000},
10902                 { 0x00010000, 0x01000},
10903                 { 0xffffffff, 0x00000}
10904         }, mem_tbl_5717[] = {
10905                 { 0x00000200, 0x00008},
10906                 { 0x00010000, 0x0a000},
10907                 { 0x00020000, 0x13c00},
10908                 { 0xffffffff, 0x00000}
10909         }, mem_tbl_57765[] = {
10910                 { 0x00000200, 0x00008},
10911                 { 0x00004000, 0x00800},
10912                 { 0x00006000, 0x09800},
10913                 { 0x00010000, 0x0a000},
10914                 { 0xffffffff, 0x00000}
10915         };
10916         struct mem_entry *mem_tbl;
10917         int err = 0;
10918         int i;
10919
10920         if (tg3_flag(tp, 5717_PLUS))
10921                 mem_tbl = mem_tbl_5717;
10922         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10923                 mem_tbl = mem_tbl_57765;
10924         else if (tg3_flag(tp, 5755_PLUS))
10925                 mem_tbl = mem_tbl_5755;
10926         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10927                 mem_tbl = mem_tbl_5906;
10928         else if (tg3_flag(tp, 5705_PLUS))
10929                 mem_tbl = mem_tbl_5705;
10930         else
10931                 mem_tbl = mem_tbl_570x;
10932
10933         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10934                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10935                 if (err)
10936                         break;
10937         }
10938
10939         return err;
10940 }
10941
10942 #define TG3_MAC_LOOPBACK        0
10943 #define TG3_PHY_LOOPBACK        1
10944 #define TG3_TSO_LOOPBACK        2
10945
10946 #define TG3_TSO_MSS             500
10947
10948 #define TG3_TSO_IP_HDR_LEN      20
10949 #define TG3_TSO_TCP_HDR_LEN     20
10950 #define TG3_TSO_TCP_OPT_LEN     12
10951
10952 static const u8 tg3_tso_header[] = {
10953 0x08, 0x00,
10954 0x45, 0x00, 0x00, 0x00,
10955 0x00, 0x00, 0x40, 0x00,
10956 0x40, 0x06, 0x00, 0x00,
10957 0x0a, 0x00, 0x00, 0x01,
10958 0x0a, 0x00, 0x00, 0x02,
10959 0x0d, 0x00, 0xe0, 0x00,
10960 0x00, 0x00, 0x01, 0x00,
10961 0x00, 0x00, 0x02, 0x00,
10962 0x80, 0x10, 0x10, 0x00,
10963 0x14, 0x09, 0x00, 0x00,
10964 0x01, 0x01, 0x08, 0x0a,
10965 0x11, 0x11, 0x11, 0x11,
10966 0x11, 0x11, 0x11, 0x11,
10967 };
10968
10969 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10970 {
10971         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10972         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10973         struct sk_buff *skb, *rx_skb;
10974         u8 *tx_data;
10975         dma_addr_t map;
10976         int num_pkts, tx_len, rx_len, i, err;
10977         struct tg3_rx_buffer_desc *desc;
10978         struct tg3_napi *tnapi, *rnapi;
10979         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10980
10981         tnapi = &tp->napi[0];
10982         rnapi = &tp->napi[0];
10983         if (tp->irq_cnt > 1) {
10984                 if (tg3_flag(tp, ENABLE_RSS))
10985                         rnapi = &tp->napi[1];
10986                 if (tg3_flag(tp, ENABLE_TSS))
10987                         tnapi = &tp->napi[1];
10988         }
10989         coal_now = tnapi->coal_now | rnapi->coal_now;
10990
10991         if (loopback_mode == TG3_MAC_LOOPBACK) {
10992                 /* HW errata - mac loopback fails in some cases on 5780.
10993                  * Normal traffic and PHY loopback are not affected by
10994                  * errata.  Also, the MAC loopback test is deprecated for
10995                  * all newer ASIC revisions.
10996                  */
10997                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10998                     tg3_flag(tp, CPMU_PRESENT))
10999                         return 0;
11000
11001                 mac_mode = tp->mac_mode &
11002                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11003                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11004                 if (!tg3_flag(tp, 5705_PLUS))
11005                         mac_mode |= MAC_MODE_LINK_POLARITY;
11006                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11007                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11008                 else
11009                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11010                 tw32(MAC_MODE, mac_mode);
11011         } else {
11012                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11013                         tg3_phy_fet_toggle_apd(tp, false);
11014                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11015                 } else
11016                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11017
11018                 tg3_phy_toggle_automdix(tp, 0);
11019
11020                 tg3_writephy(tp, MII_BMCR, val);
11021                 udelay(40);
11022
11023                 mac_mode = tp->mac_mode &
11024                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11025                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11026                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11027                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11028                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11029                         /* The write needs to be flushed for the AC131 */
11030                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11031                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11032                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11033                 } else
11034                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11035
11036                 /* reset to prevent losing 1st rx packet intermittently */
11037                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11038                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11039                         udelay(10);
11040                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11041                 }
11042                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11043                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11044                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11045                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11046                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11047                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11048                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11049                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11050                 }
11051                 tw32(MAC_MODE, mac_mode);
11052
11053                 /* Wait for link */
11054                 for (i = 0; i < 100; i++) {
11055                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11056                                 break;
11057                         mdelay(1);
11058                 }
11059         }
11060
11061         err = -EIO;
11062
11063         tx_len = pktsz;
11064         skb = netdev_alloc_skb(tp->dev, tx_len);
11065         if (!skb)
11066                 return -ENOMEM;
11067
11068         tx_data = skb_put(skb, tx_len);
11069         memcpy(tx_data, tp->dev->dev_addr, 6);
11070         memset(tx_data + 6, 0x0, 8);
11071
11072         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11073
11074         if (loopback_mode == TG3_TSO_LOOPBACK) {
11075                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11076
11077                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11078                               TG3_TSO_TCP_OPT_LEN;
11079
11080                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11081                        sizeof(tg3_tso_header));
11082                 mss = TG3_TSO_MSS;
11083
11084                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11085                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11086
11087                 /* Set the total length field in the IP header */
11088                 iph->tot_len = htons((u16)(mss + hdr_len));
11089
11090                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11091                               TXD_FLAG_CPU_POST_DMA);
11092
11093                 if (tg3_flag(tp, HW_TSO_1) ||
11094                     tg3_flag(tp, HW_TSO_2) ||
11095                     tg3_flag(tp, HW_TSO_3)) {
11096                         struct tcphdr *th;
11097                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11098                         th = (struct tcphdr *)&tx_data[val];
11099                         th->check = 0;
11100                 } else
11101                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11102
11103                 if (tg3_flag(tp, HW_TSO_3)) {
11104                         mss |= (hdr_len & 0xc) << 12;
11105                         if (hdr_len & 0x10)
11106                                 base_flags |= 0x00000010;
11107                         base_flags |= (hdr_len & 0x3e0) << 5;
11108                 } else if (tg3_flag(tp, HW_TSO_2))
11109                         mss |= hdr_len << 9;
11110                 else if (tg3_flag(tp, HW_TSO_1) ||
11111                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11112                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11113                 } else {
11114                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11115                 }
11116
11117                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11118         } else {
11119                 num_pkts = 1;
11120                 data_off = ETH_HLEN;
11121         }
11122
11123         for (i = data_off; i < tx_len; i++)
11124                 tx_data[i] = (u8) (i & 0xff);
11125
11126         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11127         if (pci_dma_mapping_error(tp->pdev, map)) {
11128                 dev_kfree_skb(skb);
11129                 return -EIO;
11130         }
11131
11132         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11133                rnapi->coal_now);
11134
11135         udelay(10);
11136
11137         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11138
11139         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11140                     base_flags, (mss << 1) | 1);
11141
11142         tnapi->tx_prod++;
11143
11144         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11145         tr32_mailbox(tnapi->prodmbox);
11146
11147         udelay(10);
11148
11149         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11150         for (i = 0; i < 35; i++) {
11151                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11152                        coal_now);
11153
11154                 udelay(10);
11155
11156                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11157                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11158                 if ((tx_idx == tnapi->tx_prod) &&
11159                     (rx_idx == (rx_start_idx + num_pkts)))
11160                         break;
11161         }
11162
11163         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11164         dev_kfree_skb(skb);
11165
11166         if (tx_idx != tnapi->tx_prod)
11167                 goto out;
11168
11169         if (rx_idx != rx_start_idx + num_pkts)
11170                 goto out;
11171
11172         val = data_off;
11173         while (rx_idx != rx_start_idx) {
11174                 desc = &rnapi->rx_rcb[rx_start_idx++];
11175                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11176                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11177
11178                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11179                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11180                         goto out;
11181
11182                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11183                          - ETH_FCS_LEN;
11184
11185                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11186                         if (rx_len != tx_len)
11187                                 goto out;
11188
11189                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11190                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11191                                         goto out;
11192                         } else {
11193                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11194                                         goto out;
11195                         }
11196                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11197                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11198                             >> RXD_TCPCSUM_SHIFT == 0xffff) {
11199                         goto out;
11200                 }
11201
11202                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11203                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11204                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11205                                              mapping);
11206                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11207                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11208                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11209                                              mapping);
11210                 } else
11211                         goto out;
11212
11213                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11214                                             PCI_DMA_FROMDEVICE);
11215
11216                 for (i = data_off; i < rx_len; i++, val++) {
11217                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11218                                 goto out;
11219                 }
11220         }
11221
11222         err = 0;
11223
11224         /* tg3_free_rings will unmap and free the rx_skb */
11225 out:
11226         return err;
11227 }
11228
11229 #define TG3_STD_LOOPBACK_FAILED         1
11230 #define TG3_JMB_LOOPBACK_FAILED         2
11231 #define TG3_TSO_LOOPBACK_FAILED         4
11232
11233 #define TG3_MAC_LOOPBACK_SHIFT          0
11234 #define TG3_PHY_LOOPBACK_SHIFT          4
11235 #define TG3_LOOPBACK_FAILED             0x00000077
11236
11237 static int tg3_test_loopback(struct tg3 *tp)
11238 {
11239         int err = 0;
11240         u32 eee_cap, cpmuctrl = 0;
11241
11242         if (!netif_running(tp->dev))
11243                 return TG3_LOOPBACK_FAILED;
11244
11245         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11246         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11247
11248         err = tg3_reset_hw(tp, 1);
11249         if (err) {
11250                 err = TG3_LOOPBACK_FAILED;
11251                 goto done;
11252         }
11253
11254         if (tg3_flag(tp, ENABLE_RSS)) {
11255                 int i;
11256
11257                 /* Reroute all rx packets to the 1st queue */
11258                 for (i = MAC_RSS_INDIR_TBL_0;
11259                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11260                         tw32(i, 0x0);
11261         }
11262
11263         /* Turn off gphy autopowerdown. */
11264         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11265                 tg3_phy_toggle_apd(tp, false);
11266
11267         if (tg3_flag(tp, CPMU_PRESENT)) {
11268                 int i;
11269                 u32 status;
11270
11271                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11272
11273                 /* Wait for up to 40 microseconds to acquire lock. */
11274                 for (i = 0; i < 4; i++) {
11275                         status = tr32(TG3_CPMU_MUTEX_GNT);
11276                         if (status == CPMU_MUTEX_GNT_DRIVER)
11277                                 break;
11278                         udelay(10);
11279                 }
11280
11281                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11282                         err = TG3_LOOPBACK_FAILED;
11283                         goto done;
11284                 }
11285
11286                 /* Turn off link-based power management. */
11287                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11288                 tw32(TG3_CPMU_CTRL,
11289                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11290                                   CPMU_CTRL_LINK_AWARE_MODE));
11291         }
11292
11293         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11294                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11295
11296         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11297             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11298                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11299
11300         if (tg3_flag(tp, CPMU_PRESENT)) {
11301                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11302
11303                 /* Release the mutex */
11304                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11305         }
11306
11307         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11308             !tg3_flag(tp, USE_PHYLIB)) {
11309                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11310                         err |= TG3_STD_LOOPBACK_FAILED <<
11311                                TG3_PHY_LOOPBACK_SHIFT;
11312                 if (tg3_flag(tp, TSO_CAPABLE) &&
11313                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11314                         err |= TG3_TSO_LOOPBACK_FAILED <<
11315                                TG3_PHY_LOOPBACK_SHIFT;
11316                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11317                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11318                         err |= TG3_JMB_LOOPBACK_FAILED <<
11319                                TG3_PHY_LOOPBACK_SHIFT;
11320         }
11321
11322         /* Re-enable gphy autopowerdown. */
11323         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11324                 tg3_phy_toggle_apd(tp, true);
11325
11326 done:
11327         tp->phy_flags |= eee_cap;
11328
11329         return err;
11330 }
11331
11332 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11333                           u64 *data)
11334 {
11335         struct tg3 *tp = netdev_priv(dev);
11336
11337         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11338                 tg3_power_up(tp);
11339
11340         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11341
11342         if (tg3_test_nvram(tp) != 0) {
11343                 etest->flags |= ETH_TEST_FL_FAILED;
11344                 data[0] = 1;
11345         }
11346         if (tg3_test_link(tp) != 0) {
11347                 etest->flags |= ETH_TEST_FL_FAILED;
11348                 data[1] = 1;
11349         }
11350         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11351                 int err, err2 = 0, irq_sync = 0;
11352
11353                 if (netif_running(dev)) {
11354                         tg3_phy_stop(tp);
11355                         tg3_netif_stop(tp);
11356                         irq_sync = 1;
11357                 }
11358
11359                 tg3_full_lock(tp, irq_sync);
11360
11361                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11362                 err = tg3_nvram_lock(tp);
11363                 tg3_halt_cpu(tp, RX_CPU_BASE);
11364                 if (!tg3_flag(tp, 5705_PLUS))
11365                         tg3_halt_cpu(tp, TX_CPU_BASE);
11366                 if (!err)
11367                         tg3_nvram_unlock(tp);
11368
11369                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11370                         tg3_phy_reset(tp);
11371
11372                 if (tg3_test_registers(tp) != 0) {
11373                         etest->flags |= ETH_TEST_FL_FAILED;
11374                         data[2] = 1;
11375                 }
11376                 if (tg3_test_memory(tp) != 0) {
11377                         etest->flags |= ETH_TEST_FL_FAILED;
11378                         data[3] = 1;
11379                 }
11380                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11381                         etest->flags |= ETH_TEST_FL_FAILED;
11382
11383                 tg3_full_unlock(tp);
11384
11385                 if (tg3_test_interrupt(tp) != 0) {
11386                         etest->flags |= ETH_TEST_FL_FAILED;
11387                         data[5] = 1;
11388                 }
11389
11390                 tg3_full_lock(tp, 0);
11391
11392                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11393                 if (netif_running(dev)) {
11394                         tg3_flag_set(tp, INIT_COMPLETE);
11395                         err2 = tg3_restart_hw(tp, 1);
11396                         if (!err2)
11397                                 tg3_netif_start(tp);
11398                 }
11399
11400                 tg3_full_unlock(tp);
11401
11402                 if (irq_sync && !err2)
11403                         tg3_phy_start(tp);
11404         }
11405         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11406                 tg3_power_down(tp);
11407
11408 }
11409
11410 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11411 {
11412         struct mii_ioctl_data *data = if_mii(ifr);
11413         struct tg3 *tp = netdev_priv(dev);
11414         int err;
11415
11416         if (tg3_flag(tp, USE_PHYLIB)) {
11417                 struct phy_device *phydev;
11418                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11419                         return -EAGAIN;
11420                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11421                 return phy_mii_ioctl(phydev, ifr, cmd);
11422         }
11423
11424         switch (cmd) {
11425         case SIOCGMIIPHY:
11426                 data->phy_id = tp->phy_addr;
11427
11428                 /* fallthru */
11429         case SIOCGMIIREG: {
11430                 u32 mii_regval;
11431
11432                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11433                         break;                  /* We have no PHY */
11434
11435                 if (!netif_running(dev))
11436                         return -EAGAIN;
11437
11438                 spin_lock_bh(&tp->lock);
11439                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11440                 spin_unlock_bh(&tp->lock);
11441
11442                 data->val_out = mii_regval;
11443
11444                 return err;
11445         }
11446
11447         case SIOCSMIIREG:
11448                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11449                         break;                  /* We have no PHY */
11450
11451                 if (!netif_running(dev))
11452                         return -EAGAIN;
11453
11454                 spin_lock_bh(&tp->lock);
11455                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11456                 spin_unlock_bh(&tp->lock);
11457
11458                 return err;
11459
11460         default:
11461                 /* do nothing */
11462                 break;
11463         }
11464         return -EOPNOTSUPP;
11465 }
11466
11467 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11468 {
11469         struct tg3 *tp = netdev_priv(dev);
11470
11471         memcpy(ec, &tp->coal, sizeof(*ec));
11472         return 0;
11473 }
11474
11475 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11476 {
11477         struct tg3 *tp = netdev_priv(dev);
11478         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11479         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11480
11481         if (!tg3_flag(tp, 5705_PLUS)) {
11482                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11483                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11484                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11485                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11486         }
11487
11488         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11489             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11490             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11491             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11492             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11493             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11494             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11495             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11496             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11497             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11498                 return -EINVAL;
11499
11500         /* No rx interrupts will be generated if both are zero */
11501         if ((ec->rx_coalesce_usecs == 0) &&
11502             (ec->rx_max_coalesced_frames == 0))
11503                 return -EINVAL;
11504
11505         /* No tx interrupts will be generated if both are zero */
11506         if ((ec->tx_coalesce_usecs == 0) &&
11507             (ec->tx_max_coalesced_frames == 0))
11508                 return -EINVAL;
11509
11510         /* Only copy relevant parameters, ignore all others. */
11511         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11512         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11513         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11514         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11515         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11516         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11517         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11518         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11519         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11520
11521         if (netif_running(dev)) {
11522                 tg3_full_lock(tp, 0);
11523                 __tg3_set_coalesce(tp, &tp->coal);
11524                 tg3_full_unlock(tp);
11525         }
11526         return 0;
11527 }
11528
11529 static const struct ethtool_ops tg3_ethtool_ops = {
11530         .get_settings           = tg3_get_settings,
11531         .set_settings           = tg3_set_settings,
11532         .get_drvinfo            = tg3_get_drvinfo,
11533         .get_regs_len           = tg3_get_regs_len,
11534         .get_regs               = tg3_get_regs,
11535         .get_wol                = tg3_get_wol,
11536         .set_wol                = tg3_set_wol,
11537         .get_msglevel           = tg3_get_msglevel,
11538         .set_msglevel           = tg3_set_msglevel,
11539         .nway_reset             = tg3_nway_reset,
11540         .get_link               = ethtool_op_get_link,
11541         .get_eeprom_len         = tg3_get_eeprom_len,
11542         .get_eeprom             = tg3_get_eeprom,
11543         .set_eeprom             = tg3_set_eeprom,
11544         .get_ringparam          = tg3_get_ringparam,
11545         .set_ringparam          = tg3_set_ringparam,
11546         .get_pauseparam         = tg3_get_pauseparam,
11547         .set_pauseparam         = tg3_set_pauseparam,
11548         .self_test              = tg3_self_test,
11549         .get_strings            = tg3_get_strings,
11550         .set_phys_id            = tg3_set_phys_id,
11551         .get_ethtool_stats      = tg3_get_ethtool_stats,
11552         .get_coalesce           = tg3_get_coalesce,
11553         .set_coalesce           = tg3_set_coalesce,
11554         .get_sset_count         = tg3_get_sset_count,
11555 };
11556
11557 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11558 {
11559         u32 cursize, val, magic;
11560
11561         tp->nvram_size = EEPROM_CHIP_SIZE;
11562
11563         if (tg3_nvram_read(tp, 0, &magic) != 0)
11564                 return;
11565
11566         if ((magic != TG3_EEPROM_MAGIC) &&
11567             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11568             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11569                 return;
11570
11571         /*
11572          * Size the chip by reading offsets at increasing powers of two.
11573          * When we encounter our validation signature, we know the addressing
11574          * has wrapped around, and thus have our chip size.
11575          */
11576         cursize = 0x10;
11577
11578         while (cursize < tp->nvram_size) {
11579                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11580                         return;
11581
11582                 if (val == magic)
11583                         break;
11584
11585                 cursize <<= 1;
11586         }
11587
11588         tp->nvram_size = cursize;
11589 }
11590
11591 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11592 {
11593         u32 val;
11594
11595         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11596                 return;
11597
11598         /* Selfboot format */
11599         if (val != TG3_EEPROM_MAGIC) {
11600                 tg3_get_eeprom_size(tp);
11601                 return;
11602         }
11603
11604         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11605                 if (val != 0) {
11606                         /* This is confusing.  We want to operate on the
11607                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11608                          * call will read from NVRAM and byteswap the data
11609                          * according to the byteswapping settings for all
11610                          * other register accesses.  This ensures the data we
11611                          * want will always reside in the lower 16-bits.
11612                          * However, the data in NVRAM is in LE format, which
11613                          * means the data from the NVRAM read will always be
11614                          * opposite the endianness of the CPU.  The 16-bit
11615                          * byteswap then brings the data to CPU endianness.
11616                          */
11617                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11618                         return;
11619                 }
11620         }
11621         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11622 }
11623
11624 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11625 {
11626         u32 nvcfg1;
11627
11628         nvcfg1 = tr32(NVRAM_CFG1);
11629         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11630                 tg3_flag_set(tp, FLASH);
11631         } else {
11632                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11633                 tw32(NVRAM_CFG1, nvcfg1);
11634         }
11635
11636         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11637             tg3_flag(tp, 5780_CLASS)) {
11638                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11639                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11640                         tp->nvram_jedecnum = JEDEC_ATMEL;
11641                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11642                         tg3_flag_set(tp, NVRAM_BUFFERED);
11643                         break;
11644                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11645                         tp->nvram_jedecnum = JEDEC_ATMEL;
11646                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11647                         break;
11648                 case FLASH_VENDOR_ATMEL_EEPROM:
11649                         tp->nvram_jedecnum = JEDEC_ATMEL;
11650                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11651                         tg3_flag_set(tp, NVRAM_BUFFERED);
11652                         break;
11653                 case FLASH_VENDOR_ST:
11654                         tp->nvram_jedecnum = JEDEC_ST;
11655                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11656                         tg3_flag_set(tp, NVRAM_BUFFERED);
11657                         break;
11658                 case FLASH_VENDOR_SAIFUN:
11659                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11660                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11661                         break;
11662                 case FLASH_VENDOR_SST_SMALL:
11663                 case FLASH_VENDOR_SST_LARGE:
11664                         tp->nvram_jedecnum = JEDEC_SST;
11665                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11666                         break;
11667                 }
11668         } else {
11669                 tp->nvram_jedecnum = JEDEC_ATMEL;
11670                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11671                 tg3_flag_set(tp, NVRAM_BUFFERED);
11672         }
11673 }
11674
11675 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11676 {
11677         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11678         case FLASH_5752PAGE_SIZE_256:
11679                 tp->nvram_pagesize = 256;
11680                 break;
11681         case FLASH_5752PAGE_SIZE_512:
11682                 tp->nvram_pagesize = 512;
11683                 break;
11684         case FLASH_5752PAGE_SIZE_1K:
11685                 tp->nvram_pagesize = 1024;
11686                 break;
11687         case FLASH_5752PAGE_SIZE_2K:
11688                 tp->nvram_pagesize = 2048;
11689                 break;
11690         case FLASH_5752PAGE_SIZE_4K:
11691                 tp->nvram_pagesize = 4096;
11692                 break;
11693         case FLASH_5752PAGE_SIZE_264:
11694                 tp->nvram_pagesize = 264;
11695                 break;
11696         case FLASH_5752PAGE_SIZE_528:
11697                 tp->nvram_pagesize = 528;
11698                 break;
11699         }
11700 }
11701
11702 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11703 {
11704         u32 nvcfg1;
11705
11706         nvcfg1 = tr32(NVRAM_CFG1);
11707
11708         /* NVRAM protection for TPM */
11709         if (nvcfg1 & (1 << 27))
11710                 tg3_flag_set(tp, PROTECTED_NVRAM);
11711
11712         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11713         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11714         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11715                 tp->nvram_jedecnum = JEDEC_ATMEL;
11716                 tg3_flag_set(tp, NVRAM_BUFFERED);
11717                 break;
11718         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11719                 tp->nvram_jedecnum = JEDEC_ATMEL;
11720                 tg3_flag_set(tp, NVRAM_BUFFERED);
11721                 tg3_flag_set(tp, FLASH);
11722                 break;
11723         case FLASH_5752VENDOR_ST_M45PE10:
11724         case FLASH_5752VENDOR_ST_M45PE20:
11725         case FLASH_5752VENDOR_ST_M45PE40:
11726                 tp->nvram_jedecnum = JEDEC_ST;
11727                 tg3_flag_set(tp, NVRAM_BUFFERED);
11728                 tg3_flag_set(tp, FLASH);
11729                 break;
11730         }
11731
11732         if (tg3_flag(tp, FLASH)) {
11733                 tg3_nvram_get_pagesize(tp, nvcfg1);
11734         } else {
11735                 /* For eeprom, set pagesize to maximum eeprom size */
11736                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11737
11738                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11739                 tw32(NVRAM_CFG1, nvcfg1);
11740         }
11741 }
11742
11743 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11744 {
11745         u32 nvcfg1, protect = 0;
11746
11747         nvcfg1 = tr32(NVRAM_CFG1);
11748
11749         /* NVRAM protection for TPM */
11750         if (nvcfg1 & (1 << 27)) {
11751                 tg3_flag_set(tp, PROTECTED_NVRAM);
11752                 protect = 1;
11753         }
11754
11755         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11756         switch (nvcfg1) {
11757         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11758         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11759         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11760         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11761                 tp->nvram_jedecnum = JEDEC_ATMEL;
11762                 tg3_flag_set(tp, NVRAM_BUFFERED);
11763                 tg3_flag_set(tp, FLASH);
11764                 tp->nvram_pagesize = 264;
11765                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11766                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11767                         tp->nvram_size = (protect ? 0x3e200 :
11768                                           TG3_NVRAM_SIZE_512KB);
11769                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11770                         tp->nvram_size = (protect ? 0x1f200 :
11771                                           TG3_NVRAM_SIZE_256KB);
11772                 else
11773                         tp->nvram_size = (protect ? 0x1f200 :
11774                                           TG3_NVRAM_SIZE_128KB);
11775                 break;
11776         case FLASH_5752VENDOR_ST_M45PE10:
11777         case FLASH_5752VENDOR_ST_M45PE20:
11778         case FLASH_5752VENDOR_ST_M45PE40:
11779                 tp->nvram_jedecnum = JEDEC_ST;
11780                 tg3_flag_set(tp, NVRAM_BUFFERED);
11781                 tg3_flag_set(tp, FLASH);
11782                 tp->nvram_pagesize = 256;
11783                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11784                         tp->nvram_size = (protect ?
11785                                           TG3_NVRAM_SIZE_64KB :
11786                                           TG3_NVRAM_SIZE_128KB);
11787                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11788                         tp->nvram_size = (protect ?
11789                                           TG3_NVRAM_SIZE_64KB :
11790                                           TG3_NVRAM_SIZE_256KB);
11791                 else
11792                         tp->nvram_size = (protect ?
11793                                           TG3_NVRAM_SIZE_128KB :
11794                                           TG3_NVRAM_SIZE_512KB);
11795                 break;
11796         }
11797 }
11798
11799 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11800 {
11801         u32 nvcfg1;
11802
11803         nvcfg1 = tr32(NVRAM_CFG1);
11804
11805         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11806         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11807         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11808         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11809         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11810                 tp->nvram_jedecnum = JEDEC_ATMEL;
11811                 tg3_flag_set(tp, NVRAM_BUFFERED);
11812                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11813
11814                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11815                 tw32(NVRAM_CFG1, nvcfg1);
11816                 break;
11817         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11818         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11819         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11820         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11821                 tp->nvram_jedecnum = JEDEC_ATMEL;
11822                 tg3_flag_set(tp, NVRAM_BUFFERED);
11823                 tg3_flag_set(tp, FLASH);
11824                 tp->nvram_pagesize = 264;
11825                 break;
11826         case FLASH_5752VENDOR_ST_M45PE10:
11827         case FLASH_5752VENDOR_ST_M45PE20:
11828         case FLASH_5752VENDOR_ST_M45PE40:
11829                 tp->nvram_jedecnum = JEDEC_ST;
11830                 tg3_flag_set(tp, NVRAM_BUFFERED);
11831                 tg3_flag_set(tp, FLASH);
11832                 tp->nvram_pagesize = 256;
11833                 break;
11834         }
11835 }
11836
11837 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11838 {
11839         u32 nvcfg1, protect = 0;
11840
11841         nvcfg1 = tr32(NVRAM_CFG1);
11842
11843         /* NVRAM protection for TPM */
11844         if (nvcfg1 & (1 << 27)) {
11845                 tg3_flag_set(tp, PROTECTED_NVRAM);
11846                 protect = 1;
11847         }
11848
11849         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11850         switch (nvcfg1) {
11851         case FLASH_5761VENDOR_ATMEL_ADB021D:
11852         case FLASH_5761VENDOR_ATMEL_ADB041D:
11853         case FLASH_5761VENDOR_ATMEL_ADB081D:
11854         case FLASH_5761VENDOR_ATMEL_ADB161D:
11855         case FLASH_5761VENDOR_ATMEL_MDB021D:
11856         case FLASH_5761VENDOR_ATMEL_MDB041D:
11857         case FLASH_5761VENDOR_ATMEL_MDB081D:
11858         case FLASH_5761VENDOR_ATMEL_MDB161D:
11859                 tp->nvram_jedecnum = JEDEC_ATMEL;
11860                 tg3_flag_set(tp, NVRAM_BUFFERED);
11861                 tg3_flag_set(tp, FLASH);
11862                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11863                 tp->nvram_pagesize = 256;
11864                 break;
11865         case FLASH_5761VENDOR_ST_A_M45PE20:
11866         case FLASH_5761VENDOR_ST_A_M45PE40:
11867         case FLASH_5761VENDOR_ST_A_M45PE80:
11868         case FLASH_5761VENDOR_ST_A_M45PE16:
11869         case FLASH_5761VENDOR_ST_M_M45PE20:
11870         case FLASH_5761VENDOR_ST_M_M45PE40:
11871         case FLASH_5761VENDOR_ST_M_M45PE80:
11872         case FLASH_5761VENDOR_ST_M_M45PE16:
11873                 tp->nvram_jedecnum = JEDEC_ST;
11874                 tg3_flag_set(tp, NVRAM_BUFFERED);
11875                 tg3_flag_set(tp, FLASH);
11876                 tp->nvram_pagesize = 256;
11877                 break;
11878         }
11879
11880         if (protect) {
11881                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11882         } else {
11883                 switch (nvcfg1) {
11884                 case FLASH_5761VENDOR_ATMEL_ADB161D:
11885                 case FLASH_5761VENDOR_ATMEL_MDB161D:
11886                 case FLASH_5761VENDOR_ST_A_M45PE16:
11887                 case FLASH_5761VENDOR_ST_M_M45PE16:
11888                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11889                         break;
11890                 case FLASH_5761VENDOR_ATMEL_ADB081D:
11891                 case FLASH_5761VENDOR_ATMEL_MDB081D:
11892                 case FLASH_5761VENDOR_ST_A_M45PE80:
11893                 case FLASH_5761VENDOR_ST_M_M45PE80:
11894                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11895                         break;
11896                 case FLASH_5761VENDOR_ATMEL_ADB041D:
11897                 case FLASH_5761VENDOR_ATMEL_MDB041D:
11898                 case FLASH_5761VENDOR_ST_A_M45PE40:
11899                 case FLASH_5761VENDOR_ST_M_M45PE40:
11900                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11901                         break;
11902                 case FLASH_5761VENDOR_ATMEL_ADB021D:
11903                 case FLASH_5761VENDOR_ATMEL_MDB021D:
11904                 case FLASH_5761VENDOR_ST_A_M45PE20:
11905                 case FLASH_5761VENDOR_ST_M_M45PE20:
11906                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11907                         break;
11908                 }
11909         }
11910 }
11911
11912 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11913 {
11914         tp->nvram_jedecnum = JEDEC_ATMEL;
11915         tg3_flag_set(tp, NVRAM_BUFFERED);
11916         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11917 }
11918
11919 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11920 {
11921         u32 nvcfg1;
11922
11923         nvcfg1 = tr32(NVRAM_CFG1);
11924
11925         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11926         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11927         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11928                 tp->nvram_jedecnum = JEDEC_ATMEL;
11929                 tg3_flag_set(tp, NVRAM_BUFFERED);
11930                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11931
11932                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11933                 tw32(NVRAM_CFG1, nvcfg1);
11934                 return;
11935         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11936         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11937         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11938         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11939         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11940         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11941         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11942                 tp->nvram_jedecnum = JEDEC_ATMEL;
11943                 tg3_flag_set(tp, NVRAM_BUFFERED);
11944                 tg3_flag_set(tp, FLASH);
11945
11946                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11947                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11948                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11949                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11950                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11951                         break;
11952                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11953                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11954                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11955                         break;
11956                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11957                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11958                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11959                         break;
11960                 }
11961                 break;
11962         case FLASH_5752VENDOR_ST_M45PE10:
11963         case FLASH_5752VENDOR_ST_M45PE20:
11964         case FLASH_5752VENDOR_ST_M45PE40:
11965                 tp->nvram_jedecnum = JEDEC_ST;
11966                 tg3_flag_set(tp, NVRAM_BUFFERED);
11967                 tg3_flag_set(tp, FLASH);
11968
11969                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11970                 case FLASH_5752VENDOR_ST_M45PE10:
11971                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11972                         break;
11973                 case FLASH_5752VENDOR_ST_M45PE20:
11974                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11975                         break;
11976                 case FLASH_5752VENDOR_ST_M45PE40:
11977                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11978                         break;
11979                 }
11980                 break;
11981         default:
11982                 tg3_flag_set(tp, NO_NVRAM);
11983                 return;
11984         }
11985
11986         tg3_nvram_get_pagesize(tp, nvcfg1);
11987         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11988                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11989 }
11990
11991
11992 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11993 {
11994         u32 nvcfg1;
11995
11996         nvcfg1 = tr32(NVRAM_CFG1);
11997
11998         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11999         case FLASH_5717VENDOR_ATMEL_EEPROM:
12000         case FLASH_5717VENDOR_MICRO_EEPROM:
12001                 tp->nvram_jedecnum = JEDEC_ATMEL;
12002                 tg3_flag_set(tp, NVRAM_BUFFERED);
12003                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12004
12005                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12006                 tw32(NVRAM_CFG1, nvcfg1);
12007                 return;
12008         case FLASH_5717VENDOR_ATMEL_MDB011D:
12009         case FLASH_5717VENDOR_ATMEL_ADB011B:
12010         case FLASH_5717VENDOR_ATMEL_ADB011D:
12011         case FLASH_5717VENDOR_ATMEL_MDB021D:
12012         case FLASH_5717VENDOR_ATMEL_ADB021B:
12013         case FLASH_5717VENDOR_ATMEL_ADB021D:
12014         case FLASH_5717VENDOR_ATMEL_45USPT:
12015                 tp->nvram_jedecnum = JEDEC_ATMEL;
12016                 tg3_flag_set(tp, NVRAM_BUFFERED);
12017                 tg3_flag_set(tp, FLASH);
12018
12019                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12020                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12021                         /* Detect size with tg3_nvram_get_size() */
12022                         break;
12023                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12024                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12025                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12026                         break;
12027                 default:
12028                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12029                         break;
12030                 }
12031                 break;
12032         case FLASH_5717VENDOR_ST_M_M25PE10:
12033         case FLASH_5717VENDOR_ST_A_M25PE10:
12034         case FLASH_5717VENDOR_ST_M_M45PE10:
12035         case FLASH_5717VENDOR_ST_A_M45PE10:
12036         case FLASH_5717VENDOR_ST_M_M25PE20:
12037         case FLASH_5717VENDOR_ST_A_M25PE20:
12038         case FLASH_5717VENDOR_ST_M_M45PE20:
12039         case FLASH_5717VENDOR_ST_A_M45PE20:
12040         case FLASH_5717VENDOR_ST_25USPT:
12041         case FLASH_5717VENDOR_ST_45USPT:
12042                 tp->nvram_jedecnum = JEDEC_ST;
12043                 tg3_flag_set(tp, NVRAM_BUFFERED);
12044                 tg3_flag_set(tp, FLASH);
12045
12046                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12047                 case FLASH_5717VENDOR_ST_M_M25PE20:
12048                 case FLASH_5717VENDOR_ST_M_M45PE20:
12049                         /* Detect size with tg3_nvram_get_size() */
12050                         break;
12051                 case FLASH_5717VENDOR_ST_A_M25PE20:
12052                 case FLASH_5717VENDOR_ST_A_M45PE20:
12053                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12054                         break;
12055                 default:
12056                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12057                         break;
12058                 }
12059                 break;
12060         default:
12061                 tg3_flag_set(tp, NO_NVRAM);
12062                 return;
12063         }
12064
12065         tg3_nvram_get_pagesize(tp, nvcfg1);
12066         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12067                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12068 }
12069
12070 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12071 {
12072         u32 nvcfg1, nvmpinstrp;
12073
12074         nvcfg1 = tr32(NVRAM_CFG1);
12075         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12076
12077         switch (nvmpinstrp) {
12078         case FLASH_5720_EEPROM_HD:
12079         case FLASH_5720_EEPROM_LD:
12080                 tp->nvram_jedecnum = JEDEC_ATMEL;
12081                 tg3_flag_set(tp, NVRAM_BUFFERED);
12082
12083                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12084                 tw32(NVRAM_CFG1, nvcfg1);
12085                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12086                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12087                 else
12088                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12089                 return;
12090         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12091         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12092         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12093         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12094         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12095         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12096         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12097         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12098         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12099         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12100         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12101         case FLASH_5720VENDOR_ATMEL_45USPT:
12102                 tp->nvram_jedecnum = JEDEC_ATMEL;
12103                 tg3_flag_set(tp, NVRAM_BUFFERED);
12104                 tg3_flag_set(tp, FLASH);
12105
12106                 switch (nvmpinstrp) {
12107                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12108                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12109                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12110                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12111                         break;
12112                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12113                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12114                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12115                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12116                         break;
12117                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12118                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12119                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12120                         break;
12121                 default:
12122                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12123                         break;
12124                 }
12125                 break;
12126         case FLASH_5720VENDOR_M_ST_M25PE10:
12127         case FLASH_5720VENDOR_M_ST_M45PE10:
12128         case FLASH_5720VENDOR_A_ST_M25PE10:
12129         case FLASH_5720VENDOR_A_ST_M45PE10:
12130         case FLASH_5720VENDOR_M_ST_M25PE20:
12131         case FLASH_5720VENDOR_M_ST_M45PE20:
12132         case FLASH_5720VENDOR_A_ST_M25PE20:
12133         case FLASH_5720VENDOR_A_ST_M45PE20:
12134         case FLASH_5720VENDOR_M_ST_M25PE40:
12135         case FLASH_5720VENDOR_M_ST_M45PE40:
12136         case FLASH_5720VENDOR_A_ST_M25PE40:
12137         case FLASH_5720VENDOR_A_ST_M45PE40:
12138         case FLASH_5720VENDOR_M_ST_M25PE80:
12139         case FLASH_5720VENDOR_M_ST_M45PE80:
12140         case FLASH_5720VENDOR_A_ST_M25PE80:
12141         case FLASH_5720VENDOR_A_ST_M45PE80:
12142         case FLASH_5720VENDOR_ST_25USPT:
12143         case FLASH_5720VENDOR_ST_45USPT:
12144                 tp->nvram_jedecnum = JEDEC_ST;
12145                 tg3_flag_set(tp, NVRAM_BUFFERED);
12146                 tg3_flag_set(tp, FLASH);
12147
12148                 switch (nvmpinstrp) {
12149                 case FLASH_5720VENDOR_M_ST_M25PE20:
12150                 case FLASH_5720VENDOR_M_ST_M45PE20:
12151                 case FLASH_5720VENDOR_A_ST_M25PE20:
12152                 case FLASH_5720VENDOR_A_ST_M45PE20:
12153                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12154                         break;
12155                 case FLASH_5720VENDOR_M_ST_M25PE40:
12156                 case FLASH_5720VENDOR_M_ST_M45PE40:
12157                 case FLASH_5720VENDOR_A_ST_M25PE40:
12158                 case FLASH_5720VENDOR_A_ST_M45PE40:
12159                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12160                         break;
12161                 case FLASH_5720VENDOR_M_ST_M25PE80:
12162                 case FLASH_5720VENDOR_M_ST_M45PE80:
12163                 case FLASH_5720VENDOR_A_ST_M25PE80:
12164                 case FLASH_5720VENDOR_A_ST_M45PE80:
12165                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12166                         break;
12167                 default:
12168                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12169                         break;
12170                 }
12171                 break;
12172         default:
12173                 tg3_flag_set(tp, NO_NVRAM);
12174                 return;
12175         }
12176
12177         tg3_nvram_get_pagesize(tp, nvcfg1);
12178         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12179                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12180 }
12181
12182 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12183 static void __devinit tg3_nvram_init(struct tg3 *tp)
12184 {
12185         tw32_f(GRC_EEPROM_ADDR,
12186              (EEPROM_ADDR_FSM_RESET |
12187               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12188                EEPROM_ADDR_CLKPERD_SHIFT)));
12189
12190         msleep(1);
12191
12192         /* Enable seeprom accesses. */
12193         tw32_f(GRC_LOCAL_CTRL,
12194              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12195         udelay(100);
12196
12197         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12198             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12199                 tg3_flag_set(tp, NVRAM);
12200
12201                 if (tg3_nvram_lock(tp)) {
12202                         netdev_warn(tp->dev,
12203                                     "Cannot get nvram lock, %s failed\n",
12204                                     __func__);
12205                         return;
12206                 }
12207                 tg3_enable_nvram_access(tp);
12208
12209                 tp->nvram_size = 0;
12210
12211                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12212                         tg3_get_5752_nvram_info(tp);
12213                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12214                         tg3_get_5755_nvram_info(tp);
12215                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12216                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12217                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12218                         tg3_get_5787_nvram_info(tp);
12219                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12220                         tg3_get_5761_nvram_info(tp);
12221                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12222                         tg3_get_5906_nvram_info(tp);
12223                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12224                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12225                         tg3_get_57780_nvram_info(tp);
12226                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12227                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12228                         tg3_get_5717_nvram_info(tp);
12229                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12230                         tg3_get_5720_nvram_info(tp);
12231                 else
12232                         tg3_get_nvram_info(tp);
12233
12234                 if (tp->nvram_size == 0)
12235                         tg3_get_nvram_size(tp);
12236
12237                 tg3_disable_nvram_access(tp);
12238                 tg3_nvram_unlock(tp);
12239
12240         } else {
12241                 tg3_flag_clear(tp, NVRAM);
12242                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12243
12244                 tg3_get_eeprom_size(tp);
12245         }
12246 }
12247
12248 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12249                                     u32 offset, u32 len, u8 *buf)
12250 {
12251         int i, j, rc = 0;
12252         u32 val;
12253
12254         for (i = 0; i < len; i += 4) {
12255                 u32 addr;
12256                 __be32 data;
12257
12258                 addr = offset + i;
12259
12260                 memcpy(&data, buf + i, 4);
12261
12262                 /*
12263                  * The SEEPROM interface expects the data to always be opposite
12264                  * the native endian format.  We accomplish this by reversing
12265                  * all the operations that would have been performed on the
12266                  * data from a call to tg3_nvram_read_be32().
12267                  */
12268                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12269
12270                 val = tr32(GRC_EEPROM_ADDR);
12271                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12272
12273                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12274                         EEPROM_ADDR_READ);
12275                 tw32(GRC_EEPROM_ADDR, val |
12276                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12277                         (addr & EEPROM_ADDR_ADDR_MASK) |
12278                         EEPROM_ADDR_START |
12279                         EEPROM_ADDR_WRITE);
12280
12281                 for (j = 0; j < 1000; j++) {
12282                         val = tr32(GRC_EEPROM_ADDR);
12283
12284                         if (val & EEPROM_ADDR_COMPLETE)
12285                                 break;
12286                         msleep(1);
12287                 }
12288                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12289                         rc = -EBUSY;
12290                         break;
12291                 }
12292         }
12293
12294         return rc;
12295 }
12296
12297 /* offset and length are dword aligned */
12298 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12299                 u8 *buf)
12300 {
12301         int ret = 0;
12302         u32 pagesize = tp->nvram_pagesize;
12303         u32 pagemask = pagesize - 1;
12304         u32 nvram_cmd;
12305         u8 *tmp;
12306
12307         tmp = kmalloc(pagesize, GFP_KERNEL);
12308         if (tmp == NULL)
12309                 return -ENOMEM;
12310
12311         while (len) {
12312                 int j;
12313                 u32 phy_addr, page_off, size;
12314
12315                 phy_addr = offset & ~pagemask;
12316
12317                 for (j = 0; j < pagesize; j += 4) {
12318                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12319                                                   (__be32 *) (tmp + j));
12320                         if (ret)
12321                                 break;
12322                 }
12323                 if (ret)
12324                         break;
12325
12326                 page_off = offset & pagemask;
12327                 size = pagesize;
12328                 if (len < size)
12329                         size = len;
12330
12331                 len -= size;
12332
12333                 memcpy(tmp + page_off, buf, size);
12334
12335                 offset = offset + (pagesize - page_off);
12336
12337                 tg3_enable_nvram_access(tp);
12338
12339                 /*
12340                  * Before we can erase the flash page, we need
12341                  * to issue a special "write enable" command.
12342                  */
12343                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12344
12345                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12346                         break;
12347
12348                 /* Erase the target page */
12349                 tw32(NVRAM_ADDR, phy_addr);
12350
12351                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12352                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12353
12354                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12355                         break;
12356
12357                 /* Issue another write enable to start the write. */
12358                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12359
12360                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12361                         break;
12362
12363                 for (j = 0; j < pagesize; j += 4) {
12364                         __be32 data;
12365
12366                         data = *((__be32 *) (tmp + j));
12367
12368                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12369
12370                         tw32(NVRAM_ADDR, phy_addr + j);
12371
12372                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12373                                 NVRAM_CMD_WR;
12374
12375                         if (j == 0)
12376                                 nvram_cmd |= NVRAM_CMD_FIRST;
12377                         else if (j == (pagesize - 4))
12378                                 nvram_cmd |= NVRAM_CMD_LAST;
12379
12380                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12381                                 break;
12382                 }
12383                 if (ret)
12384                         break;
12385         }
12386
12387         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12388         tg3_nvram_exec_cmd(tp, nvram_cmd);
12389
12390         kfree(tmp);
12391
12392         return ret;
12393 }
12394
12395 /* offset and length are dword aligned */
12396 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12397                 u8 *buf)
12398 {
12399         int i, ret = 0;
12400
12401         for (i = 0; i < len; i += 4, offset += 4) {
12402                 u32 page_off, phy_addr, nvram_cmd;
12403                 __be32 data;
12404
12405                 memcpy(&data, buf + i, 4);
12406                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12407
12408                 page_off = offset % tp->nvram_pagesize;
12409
12410                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12411
12412                 tw32(NVRAM_ADDR, phy_addr);
12413
12414                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12415
12416                 if (page_off == 0 || i == 0)
12417                         nvram_cmd |= NVRAM_CMD_FIRST;
12418                 if (page_off == (tp->nvram_pagesize - 4))
12419                         nvram_cmd |= NVRAM_CMD_LAST;
12420
12421                 if (i == (len - 4))
12422                         nvram_cmd |= NVRAM_CMD_LAST;
12423
12424                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12425                     !tg3_flag(tp, 5755_PLUS) &&
12426                     (tp->nvram_jedecnum == JEDEC_ST) &&
12427                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12428
12429                         if ((ret = tg3_nvram_exec_cmd(tp,
12430                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12431                                 NVRAM_CMD_DONE)))
12432
12433                                 break;
12434                 }
12435                 if (!tg3_flag(tp, FLASH)) {
12436                         /* We always do complete word writes to eeprom. */
12437                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12438                 }
12439
12440                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12441                         break;
12442         }
12443         return ret;
12444 }
12445
12446 /* offset and length are dword aligned */
12447 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12448 {
12449         int ret;
12450
12451         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12452                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12453                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12454                 udelay(40);
12455         }
12456
12457         if (!tg3_flag(tp, NVRAM)) {
12458                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12459         } else {
12460                 u32 grc_mode;
12461
12462                 ret = tg3_nvram_lock(tp);
12463                 if (ret)
12464                         return ret;
12465
12466                 tg3_enable_nvram_access(tp);
12467                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12468                         tw32(NVRAM_WRITE1, 0x406);
12469
12470                 grc_mode = tr32(GRC_MODE);
12471                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12472
12473                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12474                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12475                                 buf);
12476                 } else {
12477                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12478                                 buf);
12479                 }
12480
12481                 grc_mode = tr32(GRC_MODE);
12482                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12483
12484                 tg3_disable_nvram_access(tp);
12485                 tg3_nvram_unlock(tp);
12486         }
12487
12488         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12489                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12490                 udelay(40);
12491         }
12492
12493         return ret;
12494 }
12495
12496 struct subsys_tbl_ent {
12497         u16 subsys_vendor, subsys_devid;
12498         u32 phy_id;
12499 };
12500
12501 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12502         /* Broadcom boards. */
12503         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12504           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12505         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12506           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12507         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12508           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12509         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12510           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12511         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12512           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12513         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12514           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12515         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12516           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12517         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12518           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12519         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12520           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12521         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12522           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12523         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12524           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12525
12526         /* 3com boards. */
12527         { TG3PCI_SUBVENDOR_ID_3COM,
12528           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12529         { TG3PCI_SUBVENDOR_ID_3COM,
12530           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12531         { TG3PCI_SUBVENDOR_ID_3COM,
12532           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12533         { TG3PCI_SUBVENDOR_ID_3COM,
12534           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12535         { TG3PCI_SUBVENDOR_ID_3COM,
12536           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12537
12538         /* DELL boards. */
12539         { TG3PCI_SUBVENDOR_ID_DELL,
12540           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12541         { TG3PCI_SUBVENDOR_ID_DELL,
12542           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12543         { TG3PCI_SUBVENDOR_ID_DELL,
12544           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12545         { TG3PCI_SUBVENDOR_ID_DELL,
12546           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12547
12548         /* Compaq boards. */
12549         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12550           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12551         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12552           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12553         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12554           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12555         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12556           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12557         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12558           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12559
12560         /* IBM boards. */
12561         { TG3PCI_SUBVENDOR_ID_IBM,
12562           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12563 };
12564
12565 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12566 {
12567         int i;
12568
12569         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12570                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12571                      tp->pdev->subsystem_vendor) &&
12572                     (subsys_id_to_phy_id[i].subsys_devid ==
12573                      tp->pdev->subsystem_device))
12574                         return &subsys_id_to_phy_id[i];
12575         }
12576         return NULL;
12577 }
12578
12579 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12580 {
12581         u32 val;
12582         u16 pmcsr;
12583
12584         /* On some early chips the SRAM cannot be accessed in D3hot state,
12585          * so need make sure we're in D0.
12586          */
12587         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12588         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12589         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12590         msleep(1);
12591
12592         /* Make sure register accesses (indirect or otherwise)
12593          * will function correctly.
12594          */
12595         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12596                                tp->misc_host_ctrl);
12597
12598         /* The memory arbiter has to be enabled in order for SRAM accesses
12599          * to succeed.  Normally on powerup the tg3 chip firmware will make
12600          * sure it is enabled, but other entities such as system netboot
12601          * code might disable it.
12602          */
12603         val = tr32(MEMARB_MODE);
12604         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12605
12606         tp->phy_id = TG3_PHY_ID_INVALID;
12607         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12608
12609         /* Assume an onboard device and WOL capable by default.  */
12610         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12611         tg3_flag_set(tp, WOL_CAP);
12612
12613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12614                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12615                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12616                         tg3_flag_set(tp, IS_NIC);
12617                 }
12618                 val = tr32(VCPU_CFGSHDW);
12619                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12620                         tg3_flag_set(tp, ASPM_WORKAROUND);
12621                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12622                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12623                         tg3_flag_set(tp, WOL_ENABLE);
12624                         device_set_wakeup_enable(&tp->pdev->dev, true);
12625                 }
12626                 goto done;
12627         }
12628
12629         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12630         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12631                 u32 nic_cfg, led_cfg;
12632                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12633                 int eeprom_phy_serdes = 0;
12634
12635                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12636                 tp->nic_sram_data_cfg = nic_cfg;
12637
12638                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12639                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12640                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12641                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12642                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12643                     (ver > 0) && (ver < 0x100))
12644                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12645
12646                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12647                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12648
12649                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12650                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12651                         eeprom_phy_serdes = 1;
12652
12653                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12654                 if (nic_phy_id != 0) {
12655                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12656                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12657
12658                         eeprom_phy_id  = (id1 >> 16) << 10;
12659                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12660                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12661                 } else
12662                         eeprom_phy_id = 0;
12663
12664                 tp->phy_id = eeprom_phy_id;
12665                 if (eeprom_phy_serdes) {
12666                         if (!tg3_flag(tp, 5705_PLUS))
12667                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12668                         else
12669                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12670                 }
12671
12672                 if (tg3_flag(tp, 5750_PLUS))
12673                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12674                                     SHASTA_EXT_LED_MODE_MASK);
12675                 else
12676                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12677
12678                 switch (led_cfg) {
12679                 default:
12680                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12681                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12682                         break;
12683
12684                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12685                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12686                         break;
12687
12688                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12689                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12690
12691                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12692                          * read on some older 5700/5701 bootcode.
12693                          */
12694                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12695                             ASIC_REV_5700 ||
12696                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12697                             ASIC_REV_5701)
12698                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12699
12700                         break;
12701
12702                 case SHASTA_EXT_LED_SHARED:
12703                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12704                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12705                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12706                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12707                                                  LED_CTRL_MODE_PHY_2);
12708                         break;
12709
12710                 case SHASTA_EXT_LED_MAC:
12711                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12712                         break;
12713
12714                 case SHASTA_EXT_LED_COMBO:
12715                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12716                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12717                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12718                                                  LED_CTRL_MODE_PHY_2);
12719                         break;
12720
12721                 }
12722
12723                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12724                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12725                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12726                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12727
12728                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12729                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12730
12731                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12732                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12733                         if ((tp->pdev->subsystem_vendor ==
12734                              PCI_VENDOR_ID_ARIMA) &&
12735                             (tp->pdev->subsystem_device == 0x205a ||
12736                              tp->pdev->subsystem_device == 0x2063))
12737                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12738                 } else {
12739                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12740                         tg3_flag_set(tp, IS_NIC);
12741                 }
12742
12743                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12744                         tg3_flag_set(tp, ENABLE_ASF);
12745                         if (tg3_flag(tp, 5750_PLUS))
12746                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12747                 }
12748
12749                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12750                     tg3_flag(tp, 5750_PLUS))
12751                         tg3_flag_set(tp, ENABLE_APE);
12752
12753                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12754                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12755                         tg3_flag_clear(tp, WOL_CAP);
12756
12757                 if (tg3_flag(tp, WOL_CAP) &&
12758                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12759                         tg3_flag_set(tp, WOL_ENABLE);
12760                         device_set_wakeup_enable(&tp->pdev->dev, true);
12761                 }
12762
12763                 if (cfg2 & (1 << 17))
12764                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12765
12766                 /* serdes signal pre-emphasis in register 0x590 set by */
12767                 /* bootcode if bit 18 is set */
12768                 if (cfg2 & (1 << 18))
12769                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12770
12771                 if ((tg3_flag(tp, 57765_PLUS) ||
12772                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12773                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12774                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12775                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12776
12777                 if (tg3_flag(tp, PCI_EXPRESS) &&
12778                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12779                     !tg3_flag(tp, 57765_PLUS)) {
12780                         u32 cfg3;
12781
12782                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12783                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12784                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12785                 }
12786
12787                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12788                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12789                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12790                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12791                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12792                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12793         }
12794 done:
12795         if (tg3_flag(tp, WOL_CAP))
12796                 device_set_wakeup_enable(&tp->pdev->dev,
12797                                          tg3_flag(tp, WOL_ENABLE));
12798         else
12799                 device_set_wakeup_capable(&tp->pdev->dev, false);
12800 }
12801
12802 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12803 {
12804         int i;
12805         u32 val;
12806
12807         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12808         tw32(OTP_CTRL, cmd);
12809
12810         /* Wait for up to 1 ms for command to execute. */
12811         for (i = 0; i < 100; i++) {
12812                 val = tr32(OTP_STATUS);
12813                 if (val & OTP_STATUS_CMD_DONE)
12814                         break;
12815                 udelay(10);
12816         }
12817
12818         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12819 }
12820
12821 /* Read the gphy configuration from the OTP region of the chip.  The gphy
12822  * configuration is a 32-bit value that straddles the alignment boundary.
12823  * We do two 32-bit reads and then shift and merge the results.
12824  */
12825 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12826 {
12827         u32 bhalf_otp, thalf_otp;
12828
12829         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12830
12831         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12832                 return 0;
12833
12834         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12835
12836         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12837                 return 0;
12838
12839         thalf_otp = tr32(OTP_READ_DATA);
12840
12841         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12842
12843         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12844                 return 0;
12845
12846         bhalf_otp = tr32(OTP_READ_DATA);
12847
12848         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12849 }
12850
12851 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12852 {
12853         u32 adv = ADVERTISED_Autoneg |
12854                   ADVERTISED_Pause;
12855
12856         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12857                 adv |= ADVERTISED_1000baseT_Half |
12858                        ADVERTISED_1000baseT_Full;
12859
12860         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12861                 adv |= ADVERTISED_100baseT_Half |
12862                        ADVERTISED_100baseT_Full |
12863                        ADVERTISED_10baseT_Half |
12864                        ADVERTISED_10baseT_Full |
12865                        ADVERTISED_TP;
12866         else
12867                 adv |= ADVERTISED_FIBRE;
12868
12869         tp->link_config.advertising = adv;
12870         tp->link_config.speed = SPEED_INVALID;
12871         tp->link_config.duplex = DUPLEX_INVALID;
12872         tp->link_config.autoneg = AUTONEG_ENABLE;
12873         tp->link_config.active_speed = SPEED_INVALID;
12874         tp->link_config.active_duplex = DUPLEX_INVALID;
12875         tp->link_config.orig_speed = SPEED_INVALID;
12876         tp->link_config.orig_duplex = DUPLEX_INVALID;
12877         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12878 }
12879
12880 static int __devinit tg3_phy_probe(struct tg3 *tp)
12881 {
12882         u32 hw_phy_id_1, hw_phy_id_2;
12883         u32 hw_phy_id, hw_phy_id_masked;
12884         int err;
12885
12886         /* flow control autonegotiation is default behavior */
12887         tg3_flag_set(tp, PAUSE_AUTONEG);
12888         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12889
12890         if (tg3_flag(tp, USE_PHYLIB))
12891                 return tg3_phy_init(tp);
12892
12893         /* Reading the PHY ID register can conflict with ASF
12894          * firmware access to the PHY hardware.
12895          */
12896         err = 0;
12897         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12898                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12899         } else {
12900                 /* Now read the physical PHY_ID from the chip and verify
12901                  * that it is sane.  If it doesn't look good, we fall back
12902                  * to either the hard-coded table based PHY_ID and failing
12903                  * that the value found in the eeprom area.
12904                  */
12905                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12906                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12907
12908                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
12909                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12910                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
12911
12912                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12913         }
12914
12915         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12916                 tp->phy_id = hw_phy_id;
12917                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12918                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12919                 else
12920                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12921         } else {
12922                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12923                         /* Do nothing, phy ID already set up in
12924                          * tg3_get_eeprom_hw_cfg().
12925                          */
12926                 } else {
12927                         struct subsys_tbl_ent *p;
12928
12929                         /* No eeprom signature?  Try the hardcoded
12930                          * subsys device table.
12931                          */
12932                         p = tg3_lookup_by_subsys(tp);
12933                         if (!p)
12934                                 return -ENODEV;
12935
12936                         tp->phy_id = p->phy_id;
12937                         if (!tp->phy_id ||
12938                             tp->phy_id == TG3_PHY_ID_BCM8002)
12939                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12940                 }
12941         }
12942
12943         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12944             ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12945               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12946              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12947               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12948                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12949
12950         tg3_phy_init_link_config(tp);
12951
12952         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12953             !tg3_flag(tp, ENABLE_APE) &&
12954             !tg3_flag(tp, ENABLE_ASF)) {
12955                 u32 bmsr, adv_reg, tg3_ctrl, mask;
12956
12957                 tg3_readphy(tp, MII_BMSR, &bmsr);
12958                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12959                     (bmsr & BMSR_LSTATUS))
12960                         goto skip_phy_reset;
12961
12962                 err = tg3_phy_reset(tp);
12963                 if (err)
12964                         return err;
12965
12966                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12967                            ADVERTISE_100HALF | ADVERTISE_100FULL |
12968                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12969                 tg3_ctrl = 0;
12970                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
12971                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12972                                     MII_TG3_CTRL_ADV_1000_FULL);
12973                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12974                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12975                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12976                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
12977                 }
12978
12979                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12980                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12981                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12982                 if (!tg3_copper_is_advertising_all(tp, mask)) {
12983                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12984
12985                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12986                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12987
12988                         tg3_writephy(tp, MII_BMCR,
12989                                      BMCR_ANENABLE | BMCR_ANRESTART);
12990                 }
12991                 tg3_phy_set_wirespeed(tp);
12992
12993                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12994                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12995                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12996         }
12997
12998 skip_phy_reset:
12999         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13000                 err = tg3_init_5401phy_dsp(tp);
13001                 if (err)
13002                         return err;
13003
13004                 err = tg3_init_5401phy_dsp(tp);
13005         }
13006
13007         return err;
13008 }
13009
13010 static void __devinit tg3_read_vpd(struct tg3 *tp)
13011 {
13012         u8 *vpd_data;
13013         unsigned int block_end, rosize, len;
13014         int j, i = 0;
13015
13016         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13017         if (!vpd_data)
13018                 goto out_no_vpd;
13019
13020         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13021                              PCI_VPD_LRDT_RO_DATA);
13022         if (i < 0)
13023                 goto out_not_found;
13024
13025         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13026         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13027         i += PCI_VPD_LRDT_TAG_SIZE;
13028
13029         if (block_end > TG3_NVM_VPD_LEN)
13030                 goto out_not_found;
13031
13032         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13033                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13034         if (j > 0) {
13035                 len = pci_vpd_info_field_size(&vpd_data[j]);
13036
13037                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13038                 if (j + len > block_end || len != 4 ||
13039                     memcmp(&vpd_data[j], "1028", 4))
13040                         goto partno;
13041
13042                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13043                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13044                 if (j < 0)
13045                         goto partno;
13046
13047                 len = pci_vpd_info_field_size(&vpd_data[j]);
13048
13049                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13050                 if (j + len > block_end)
13051                         goto partno;
13052
13053                 memcpy(tp->fw_ver, &vpd_data[j], len);
13054                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13055         }
13056
13057 partno:
13058         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13059                                       PCI_VPD_RO_KEYWORD_PARTNO);
13060         if (i < 0)
13061                 goto out_not_found;
13062
13063         len = pci_vpd_info_field_size(&vpd_data[i]);
13064
13065         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13066         if (len > TG3_BPN_SIZE ||
13067             (len + i) > TG3_NVM_VPD_LEN)
13068                 goto out_not_found;
13069
13070         memcpy(tp->board_part_number, &vpd_data[i], len);
13071
13072 out_not_found:
13073         kfree(vpd_data);
13074         if (tp->board_part_number[0])
13075                 return;
13076
13077 out_no_vpd:
13078         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13079                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13080                         strcpy(tp->board_part_number, "BCM5717");
13081                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13082                         strcpy(tp->board_part_number, "BCM5718");
13083                 else
13084                         goto nomatch;
13085         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13086                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13087                         strcpy(tp->board_part_number, "BCM57780");
13088                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13089                         strcpy(tp->board_part_number, "BCM57760");
13090                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13091                         strcpy(tp->board_part_number, "BCM57790");
13092                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13093                         strcpy(tp->board_part_number, "BCM57788");
13094                 else
13095                         goto nomatch;
13096         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13097                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13098                         strcpy(tp->board_part_number, "BCM57761");
13099                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13100                         strcpy(tp->board_part_number, "BCM57765");
13101                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13102                         strcpy(tp->board_part_number, "BCM57781");
13103                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13104                         strcpy(tp->board_part_number, "BCM57785");
13105                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13106                         strcpy(tp->board_part_number, "BCM57791");
13107                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13108                         strcpy(tp->board_part_number, "BCM57795");
13109                 else
13110                         goto nomatch;
13111         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13112                 strcpy(tp->board_part_number, "BCM95906");
13113         } else {
13114 nomatch:
13115                 strcpy(tp->board_part_number, "none");
13116         }
13117 }
13118
13119 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13120 {
13121         u32 val;
13122
13123         if (tg3_nvram_read(tp, offset, &val) ||
13124             (val & 0xfc000000) != 0x0c000000 ||
13125             tg3_nvram_read(tp, offset + 4, &val) ||
13126             val != 0)
13127                 return 0;
13128
13129         return 1;
13130 }
13131
13132 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13133 {
13134         u32 val, offset, start, ver_offset;
13135         int i, dst_off;
13136         bool newver = false;
13137
13138         if (tg3_nvram_read(tp, 0xc, &offset) ||
13139             tg3_nvram_read(tp, 0x4, &start))
13140                 return;
13141
13142         offset = tg3_nvram_logical_addr(tp, offset);
13143
13144         if (tg3_nvram_read(tp, offset, &val))
13145                 return;
13146
13147         if ((val & 0xfc000000) == 0x0c000000) {
13148                 if (tg3_nvram_read(tp, offset + 4, &val))
13149                         return;
13150
13151                 if (val == 0)
13152                         newver = true;
13153         }
13154
13155         dst_off = strlen(tp->fw_ver);
13156
13157         if (newver) {
13158                 if (TG3_VER_SIZE - dst_off < 16 ||
13159                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13160                         return;
13161
13162                 offset = offset + ver_offset - start;
13163                 for (i = 0; i < 16; i += 4) {
13164                         __be32 v;
13165                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13166                                 return;
13167
13168                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13169                 }
13170         } else {
13171                 u32 major, minor;
13172
13173                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13174                         return;
13175
13176                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13177                         TG3_NVM_BCVER_MAJSFT;
13178                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13179                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13180                          "v%d.%02d", major, minor);
13181         }
13182 }
13183
13184 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13185 {
13186         u32 val, major, minor;
13187
13188         /* Use native endian representation */
13189         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13190                 return;
13191
13192         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13193                 TG3_NVM_HWSB_CFG1_MAJSFT;
13194         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13195                 TG3_NVM_HWSB_CFG1_MINSFT;
13196
13197         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13198 }
13199
13200 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13201 {
13202         u32 offset, major, minor, build;
13203
13204         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13205
13206         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13207                 return;
13208
13209         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13210         case TG3_EEPROM_SB_REVISION_0:
13211                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13212                 break;
13213         case TG3_EEPROM_SB_REVISION_2:
13214                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13215                 break;
13216         case TG3_EEPROM_SB_REVISION_3:
13217                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13218                 break;
13219         case TG3_EEPROM_SB_REVISION_4:
13220                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13221                 break;
13222         case TG3_EEPROM_SB_REVISION_5:
13223                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13224                 break;
13225         case TG3_EEPROM_SB_REVISION_6:
13226                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13227                 break;
13228         default:
13229                 return;
13230         }
13231
13232         if (tg3_nvram_read(tp, offset, &val))
13233                 return;
13234
13235         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13236                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13237         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13238                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13239         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13240
13241         if (minor > 99 || build > 26)
13242                 return;
13243
13244         offset = strlen(tp->fw_ver);
13245         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13246                  " v%d.%02d", major, minor);
13247
13248         if (build > 0) {
13249                 offset = strlen(tp->fw_ver);
13250                 if (offset < TG3_VER_SIZE - 1)
13251                         tp->fw_ver[offset] = 'a' + build - 1;
13252         }
13253 }
13254
13255 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13256 {
13257         u32 val, offset, start;
13258         int i, vlen;
13259
13260         for (offset = TG3_NVM_DIR_START;
13261              offset < TG3_NVM_DIR_END;
13262              offset += TG3_NVM_DIRENT_SIZE) {
13263                 if (tg3_nvram_read(tp, offset, &val))
13264                         return;
13265
13266                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13267                         break;
13268         }
13269
13270         if (offset == TG3_NVM_DIR_END)
13271                 return;
13272
13273         if (!tg3_flag(tp, 5705_PLUS))
13274                 start = 0x08000000;
13275         else if (tg3_nvram_read(tp, offset - 4, &start))
13276                 return;
13277
13278         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13279             !tg3_fw_img_is_valid(tp, offset) ||
13280             tg3_nvram_read(tp, offset + 8, &val))
13281                 return;
13282
13283         offset += val - start;
13284
13285         vlen = strlen(tp->fw_ver);
13286
13287         tp->fw_ver[vlen++] = ',';
13288         tp->fw_ver[vlen++] = ' ';
13289
13290         for (i = 0; i < 4; i++) {
13291                 __be32 v;
13292                 if (tg3_nvram_read_be32(tp, offset, &v))
13293                         return;
13294
13295                 offset += sizeof(v);
13296
13297                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13298                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13299                         break;
13300                 }
13301
13302                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13303                 vlen += sizeof(v);
13304         }
13305 }
13306
13307 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13308 {
13309         int vlen;
13310         u32 apedata;
13311         char *fwtype;
13312
13313         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13314                 return;
13315
13316         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13317         if (apedata != APE_SEG_SIG_MAGIC)
13318                 return;
13319
13320         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13321         if (!(apedata & APE_FW_STATUS_READY))
13322                 return;
13323
13324         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13325
13326         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13327                 tg3_flag_set(tp, APE_HAS_NCSI);
13328                 fwtype = "NCSI";
13329         } else {
13330                 fwtype = "DASH";
13331         }
13332
13333         vlen = strlen(tp->fw_ver);
13334
13335         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13336                  fwtype,
13337                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13338                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13339                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13340                  (apedata & APE_FW_VERSION_BLDMSK));
13341 }
13342
13343 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13344 {
13345         u32 val;
13346         bool vpd_vers = false;
13347
13348         if (tp->fw_ver[0] != 0)
13349                 vpd_vers = true;
13350
13351         if (tg3_flag(tp, NO_NVRAM)) {
13352                 strcat(tp->fw_ver, "sb");
13353                 return;
13354         }
13355
13356         if (tg3_nvram_read(tp, 0, &val))
13357                 return;
13358
13359         if (val == TG3_EEPROM_MAGIC)
13360                 tg3_read_bc_ver(tp);
13361         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13362                 tg3_read_sb_ver(tp, val);
13363         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13364                 tg3_read_hwsb_ver(tp);
13365         else
13366                 return;
13367
13368         if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13369                 goto done;
13370
13371         tg3_read_mgmtfw_ver(tp);
13372
13373 done:
13374         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13375 }
13376
13377 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13378
13379 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13380 {
13381         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13382                 return TG3_RX_RET_MAX_SIZE_5717;
13383         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13384                 return TG3_RX_RET_MAX_SIZE_5700;
13385         else
13386                 return TG3_RX_RET_MAX_SIZE_5705;
13387 }
13388
13389 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13390         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13391         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13392         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13393         { },
13394 };
13395
13396 static int __devinit tg3_get_invariants(struct tg3 *tp)
13397 {
13398         u32 misc_ctrl_reg;
13399         u32 pci_state_reg, grc_misc_cfg;
13400         u32 val;
13401         u16 pci_cmd;
13402         int err;
13403
13404         /* Force memory write invalidate off.  If we leave it on,
13405          * then on 5700_BX chips we have to enable a workaround.
13406          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13407          * to match the cacheline size.  The Broadcom driver have this
13408          * workaround but turns MWI off all the times so never uses
13409          * it.  This seems to suggest that the workaround is insufficient.
13410          */
13411         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13412         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13413         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13414
13415         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13416          * has the register indirect write enable bit set before
13417          * we try to access any of the MMIO registers.  It is also
13418          * critical that the PCI-X hw workaround situation is decided
13419          * before that as well.
13420          */
13421         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13422                               &misc_ctrl_reg);
13423
13424         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13425                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13426         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13427                 u32 prod_id_asic_rev;
13428
13429                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13430                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13431                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13432                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13433                         pci_read_config_dword(tp->pdev,
13434                                               TG3PCI_GEN2_PRODID_ASICREV,
13435                                               &prod_id_asic_rev);
13436                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13437                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13438                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13439                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13440                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13441                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13442                         pci_read_config_dword(tp->pdev,
13443                                               TG3PCI_GEN15_PRODID_ASICREV,
13444                                               &prod_id_asic_rev);
13445                 else
13446                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13447                                               &prod_id_asic_rev);
13448
13449                 tp->pci_chip_rev_id = prod_id_asic_rev;
13450         }
13451
13452         /* Wrong chip ID in 5752 A0. This code can be removed later
13453          * as A0 is not in production.
13454          */
13455         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13456                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13457
13458         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13459          * we need to disable memory and use config. cycles
13460          * only to access all registers. The 5702/03 chips
13461          * can mistakenly decode the special cycles from the
13462          * ICH chipsets as memory write cycles, causing corruption
13463          * of register and memory space. Only certain ICH bridges
13464          * will drive special cycles with non-zero data during the
13465          * address phase which can fall within the 5703's address
13466          * range. This is not an ICH bug as the PCI spec allows
13467          * non-zero address during special cycles. However, only
13468          * these ICH bridges are known to drive non-zero addresses
13469          * during special cycles.
13470          *
13471          * Since special cycles do not cross PCI bridges, we only
13472          * enable this workaround if the 5703 is on the secondary
13473          * bus of these ICH bridges.
13474          */
13475         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13476             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13477                 static struct tg3_dev_id {
13478                         u32     vendor;
13479                         u32     device;
13480                         u32     rev;
13481                 } ich_chipsets[] = {
13482                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13483                           PCI_ANY_ID },
13484                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13485                           PCI_ANY_ID },
13486                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13487                           0xa },
13488                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13489                           PCI_ANY_ID },
13490                         { },
13491                 };
13492                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13493                 struct pci_dev *bridge = NULL;
13494
13495                 while (pci_id->vendor != 0) {
13496                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13497                                                 bridge);
13498                         if (!bridge) {
13499                                 pci_id++;
13500                                 continue;
13501                         }
13502                         if (pci_id->rev != PCI_ANY_ID) {
13503                                 if (bridge->revision > pci_id->rev)
13504                                         continue;
13505                         }
13506                         if (bridge->subordinate &&
13507                             (bridge->subordinate->number ==
13508                              tp->pdev->bus->number)) {
13509                                 tg3_flag_set(tp, ICH_WORKAROUND);
13510                                 pci_dev_put(bridge);
13511                                 break;
13512                         }
13513                 }
13514         }
13515
13516         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13517                 static struct tg3_dev_id {
13518                         u32     vendor;
13519                         u32     device;
13520                 } bridge_chipsets[] = {
13521                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13522                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13523                         { },
13524                 };
13525                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13526                 struct pci_dev *bridge = NULL;
13527
13528                 while (pci_id->vendor != 0) {
13529                         bridge = pci_get_device(pci_id->vendor,
13530                                                 pci_id->device,
13531                                                 bridge);
13532                         if (!bridge) {
13533                                 pci_id++;
13534                                 continue;
13535                         }
13536                         if (bridge->subordinate &&
13537                             (bridge->subordinate->number <=
13538                              tp->pdev->bus->number) &&
13539                             (bridge->subordinate->subordinate >=
13540                              tp->pdev->bus->number)) {
13541                                 tg3_flag_set(tp, 5701_DMA_BUG);
13542                                 pci_dev_put(bridge);
13543                                 break;
13544                         }
13545                 }
13546         }
13547
13548         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13549          * DMA addresses > 40-bit. This bridge may have other additional
13550          * 57xx devices behind it in some 4-port NIC designs for example.
13551          * Any tg3 device found behind the bridge will also need the 40-bit
13552          * DMA workaround.
13553          */
13554         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13555             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13556                 tg3_flag_set(tp, 5780_CLASS);
13557                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13558                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13559         } else {
13560                 struct pci_dev *bridge = NULL;
13561
13562                 do {
13563                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13564                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13565                                                 bridge);
13566                         if (bridge && bridge->subordinate &&
13567                             (bridge->subordinate->number <=
13568                              tp->pdev->bus->number) &&
13569                             (bridge->subordinate->subordinate >=
13570                              tp->pdev->bus->number)) {
13571                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13572                                 pci_dev_put(bridge);
13573                                 break;
13574                         }
13575                 } while (bridge);
13576         }
13577
13578         /* Initialize misc host control in PCI block. */
13579         tp->misc_host_ctrl |= (misc_ctrl_reg &
13580                                MISC_HOST_CTRL_CHIPREV);
13581         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13582                                tp->misc_host_ctrl);
13583
13584         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13585             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13586             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13587             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13588                 tp->pdev_peer = tg3_find_peer(tp);
13589
13590         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13591             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13592             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13593                 tg3_flag_set(tp, 5717_PLUS);
13594
13595         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13596             tg3_flag(tp, 5717_PLUS))
13597                 tg3_flag_set(tp, 57765_PLUS);
13598
13599         /* Intentionally exclude ASIC_REV_5906 */
13600         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13601             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13602             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13603             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13604             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13605             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13606             tg3_flag(tp, 57765_PLUS))
13607                 tg3_flag_set(tp, 5755_PLUS);
13608
13609         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13610             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13611             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13612             tg3_flag(tp, 5755_PLUS) ||
13613             tg3_flag(tp, 5780_CLASS))
13614                 tg3_flag_set(tp, 5750_PLUS);
13615
13616         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13617             tg3_flag(tp, 5750_PLUS))
13618                 tg3_flag_set(tp, 5705_PLUS);
13619
13620         /* 5700 B0 chips do not support checksumming correctly due
13621          * to hardware bugs.
13622          */
13623         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
13624                 u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
13625
13626                 if (tg3_flag(tp, 5755_PLUS))
13627                         features |= NETIF_F_IPV6_CSUM;
13628                 tp->dev->features |= features;
13629                 tp->dev->hw_features |= features;
13630                 tp->dev->vlan_features |= features;
13631         }
13632
13633         /* Determine TSO capabilities */
13634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13635                 ; /* Do nothing. HW bug. */
13636         else if (tg3_flag(tp, 57765_PLUS))
13637                 tg3_flag_set(tp, HW_TSO_3);
13638         else if (tg3_flag(tp, 5755_PLUS) ||
13639                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13640                 tg3_flag_set(tp, HW_TSO_2);
13641         else if (tg3_flag(tp, 5750_PLUS)) {
13642                 tg3_flag_set(tp, HW_TSO_1);
13643                 tg3_flag_set(tp, TSO_BUG);
13644                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13645                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13646                         tg3_flag_clear(tp, TSO_BUG);
13647         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13648                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13649                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13650                         tg3_flag_set(tp, TSO_BUG);
13651                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13652                         tp->fw_needed = FIRMWARE_TG3TSO5;
13653                 else
13654                         tp->fw_needed = FIRMWARE_TG3TSO;
13655         }
13656
13657         tp->irq_max = 1;
13658
13659         if (tg3_flag(tp, 5750_PLUS)) {
13660                 tg3_flag_set(tp, SUPPORT_MSI);
13661                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13662                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13663                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13664                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13665                      tp->pdev_peer == tp->pdev))
13666                         tg3_flag_clear(tp, SUPPORT_MSI);
13667
13668                 if (tg3_flag(tp, 5755_PLUS) ||
13669                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13670                         tg3_flag_set(tp, 1SHOT_MSI);
13671                 }
13672
13673                 if (tg3_flag(tp, 57765_PLUS)) {
13674                         tg3_flag_set(tp, SUPPORT_MSIX);
13675                         tp->irq_max = TG3_IRQ_MAX_VECS;
13676                 }
13677         }
13678
13679         /* All chips can get confused if TX buffers
13680          * straddle the 4GB address boundary.
13681          */
13682         tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13683
13684         if (tg3_flag(tp, 5755_PLUS))
13685                 tg3_flag_set(tp, SHORT_DMA_BUG);
13686         else
13687                 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13688
13689         if (tg3_flag(tp, 5717_PLUS))
13690                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13691
13692         if (tg3_flag(tp, 57765_PLUS) &&
13693             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13694                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13695
13696         if (!tg3_flag(tp, 5705_PLUS) ||
13697             tg3_flag(tp, 5780_CLASS) ||
13698             tg3_flag(tp, USE_JUMBO_BDFLAG))
13699                 tg3_flag_set(tp, JUMBO_CAPABLE);
13700
13701         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13702                               &pci_state_reg);
13703
13704         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13705         if (tp->pcie_cap != 0) {
13706                 u16 lnkctl;
13707
13708                 tg3_flag_set(tp, PCI_EXPRESS);
13709
13710                 tp->pcie_readrq = 4096;
13711                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13712                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13713                         tp->pcie_readrq = 2048;
13714
13715                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13716
13717                 pci_read_config_word(tp->pdev,
13718                                      tp->pcie_cap + PCI_EXP_LNKCTL,
13719                                      &lnkctl);
13720                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13721                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13722                                 tg3_flag_clear(tp, HW_TSO_2);
13723                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13724                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13725                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13726                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13727                                 tg3_flag_set(tp, CLKREQ_BUG);
13728                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13729                         tg3_flag_set(tp, L1PLLPD_EN);
13730                 }
13731         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13732                 tg3_flag_set(tp, PCI_EXPRESS);
13733         } else if (!tg3_flag(tp, 5705_PLUS) ||
13734                    tg3_flag(tp, 5780_CLASS)) {
13735                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13736                 if (!tp->pcix_cap) {
13737                         dev_err(&tp->pdev->dev,
13738                                 "Cannot find PCI-X capability, aborting\n");
13739                         return -EIO;
13740                 }
13741
13742                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13743                         tg3_flag_set(tp, PCIX_MODE);
13744         }
13745
13746         /* If we have an AMD 762 or VIA K8T800 chipset, write
13747          * reordering to the mailbox registers done by the host
13748          * controller can cause major troubles.  We read back from
13749          * every mailbox register write to force the writes to be
13750          * posted to the chip in order.
13751          */
13752         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13753             !tg3_flag(tp, PCI_EXPRESS))
13754                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13755
13756         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13757                              &tp->pci_cacheline_sz);
13758         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13759                              &tp->pci_lat_timer);
13760         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13761             tp->pci_lat_timer < 64) {
13762                 tp->pci_lat_timer = 64;
13763                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13764                                       tp->pci_lat_timer);
13765         }
13766
13767         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13768                 /* 5700 BX chips need to have their TX producer index
13769                  * mailboxes written twice to workaround a bug.
13770                  */
13771                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13772
13773                 /* If we are in PCI-X mode, enable register write workaround.
13774                  *
13775                  * The workaround is to use indirect register accesses
13776                  * for all chip writes not to mailbox registers.
13777                  */
13778                 if (tg3_flag(tp, PCIX_MODE)) {
13779                         u32 pm_reg;
13780
13781                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13782
13783                         /* The chip can have it's power management PCI config
13784                          * space registers clobbered due to this bug.
13785                          * So explicitly force the chip into D0 here.
13786                          */
13787                         pci_read_config_dword(tp->pdev,
13788                                               tp->pm_cap + PCI_PM_CTRL,
13789                                               &pm_reg);
13790                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13791                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13792                         pci_write_config_dword(tp->pdev,
13793                                                tp->pm_cap + PCI_PM_CTRL,
13794                                                pm_reg);
13795
13796                         /* Also, force SERR#/PERR# in PCI command. */
13797                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13798                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13799                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13800                 }
13801         }
13802
13803         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13804                 tg3_flag_set(tp, PCI_HIGH_SPEED);
13805         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13806                 tg3_flag_set(tp, PCI_32BIT);
13807
13808         /* Chip-specific fixup from Broadcom driver */
13809         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13810             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13811                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13812                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13813         }
13814
13815         /* Default fast path register access methods */
13816         tp->read32 = tg3_read32;
13817         tp->write32 = tg3_write32;
13818         tp->read32_mbox = tg3_read32;
13819         tp->write32_mbox = tg3_write32;
13820         tp->write32_tx_mbox = tg3_write32;
13821         tp->write32_rx_mbox = tg3_write32;
13822
13823         /* Various workaround register access methods */
13824         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13825                 tp->write32 = tg3_write_indirect_reg32;
13826         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13827                  (tg3_flag(tp, PCI_EXPRESS) &&
13828                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13829                 /*
13830                  * Back to back register writes can cause problems on these
13831                  * chips, the workaround is to read back all reg writes
13832                  * except those to mailbox regs.
13833                  *
13834                  * See tg3_write_indirect_reg32().
13835                  */
13836                 tp->write32 = tg3_write_flush_reg32;
13837         }
13838
13839         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13840                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13841                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13842                         tp->write32_rx_mbox = tg3_write_flush_reg32;
13843         }
13844
13845         if (tg3_flag(tp, ICH_WORKAROUND)) {
13846                 tp->read32 = tg3_read_indirect_reg32;
13847                 tp->write32 = tg3_write_indirect_reg32;
13848                 tp->read32_mbox = tg3_read_indirect_mbox;
13849                 tp->write32_mbox = tg3_write_indirect_mbox;
13850                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13851                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13852
13853                 iounmap(tp->regs);
13854                 tp->regs = NULL;
13855
13856                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13857                 pci_cmd &= ~PCI_COMMAND_MEMORY;
13858                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13859         }
13860         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13861                 tp->read32_mbox = tg3_read32_mbox_5906;
13862                 tp->write32_mbox = tg3_write32_mbox_5906;
13863                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13864                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13865         }
13866
13867         if (tp->write32 == tg3_write_indirect_reg32 ||
13868             (tg3_flag(tp, PCIX_MODE) &&
13869              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13870               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13871                 tg3_flag_set(tp, SRAM_USE_CONFIG);
13872
13873         /* Get eeprom hw config before calling tg3_set_power_state().
13874          * In particular, the TG3_FLAG_IS_NIC flag must be
13875          * determined before calling tg3_set_power_state() so that
13876          * we know whether or not to switch out of Vaux power.
13877          * When the flag is set, it means that GPIO1 is used for eeprom
13878          * write protect and also implies that it is a LOM where GPIOs
13879          * are not used to switch power.
13880          */
13881         tg3_get_eeprom_hw_cfg(tp);
13882
13883         if (tg3_flag(tp, ENABLE_APE)) {
13884                 /* Allow reads and writes to the
13885                  * APE register and memory space.
13886                  */
13887                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13888                                  PCISTATE_ALLOW_APE_SHMEM_WR |
13889                                  PCISTATE_ALLOW_APE_PSPACE_WR;
13890                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13891                                        pci_state_reg);
13892         }
13893
13894         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13895             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13896             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13897             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13898             tg3_flag(tp, 57765_PLUS))
13899                 tg3_flag_set(tp, CPMU_PRESENT);
13900
13901         /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13902          * GPIO1 driven high will bring 5700's external PHY out of reset.
13903          * It is also used as eeprom write protect on LOMs.
13904          */
13905         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13906         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13907             tg3_flag(tp, EEPROM_WRITE_PROT))
13908                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13909                                        GRC_LCLCTRL_GPIO_OUTPUT1);
13910         /* Unused GPIO3 must be driven as output on 5752 because there
13911          * are no pull-up resistors on unused GPIO pins.
13912          */
13913         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13914                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13915
13916         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13917             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13918             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13919                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13920
13921         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13922             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13923                 /* Turn off the debug UART. */
13924                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13925                 if (tg3_flag(tp, IS_NIC))
13926                         /* Keep VMain power. */
13927                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13928                                               GRC_LCLCTRL_GPIO_OUTPUT0;
13929         }
13930
13931         /* Force the chip into D0. */
13932         err = tg3_power_up(tp);
13933         if (err) {
13934                 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13935                 return err;
13936         }
13937
13938         /* Derive initial jumbo mode from MTU assigned in
13939          * ether_setup() via the alloc_etherdev() call
13940          */
13941         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13942                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13943
13944         /* Determine WakeOnLan speed to use. */
13945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13946             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13947             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13948             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13949                 tg3_flag_clear(tp, WOL_SPEED_100MB);
13950         } else {
13951                 tg3_flag_set(tp, WOL_SPEED_100MB);
13952         }
13953
13954         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13955                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13956
13957         /* A few boards don't want Ethernet@WireSpeed phy feature */
13958         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13959             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13960              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13961              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13962             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13963             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13964                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13965
13966         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13967             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13968                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13969         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13970                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13971
13972         if (tg3_flag(tp, 5705_PLUS) &&
13973             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13974             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13975             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13976             !tg3_flag(tp, 57765_PLUS)) {
13977                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13978                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13979                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13980                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13981                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13982                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13983                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13984                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13985                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13986                 } else
13987                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13988         }
13989
13990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13991             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13992                 tp->phy_otp = tg3_read_otp_phycfg(tp);
13993                 if (tp->phy_otp == 0)
13994                         tp->phy_otp = TG3_OTP_DEFAULT;
13995         }
13996
13997         if (tg3_flag(tp, CPMU_PRESENT))
13998                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13999         else
14000                 tp->mi_mode = MAC_MI_MODE_BASE;
14001
14002         tp->coalesce_mode = 0;
14003         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14004             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14005                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14006
14007         /* Set these bits to enable statistics workaround. */
14008         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14009             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14010             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14011                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14012                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14013         }
14014
14015         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14016             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14017                 tg3_flag_set(tp, USE_PHYLIB);
14018
14019         err = tg3_mdio_init(tp);
14020         if (err)
14021                 return err;
14022
14023         /* Initialize data/descriptor byte/word swapping. */
14024         val = tr32(GRC_MODE);
14025         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14026                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14027                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14028                         GRC_MODE_B2HRX_ENABLE |
14029                         GRC_MODE_HTX2B_ENABLE |
14030                         GRC_MODE_HOST_STACKUP);
14031         else
14032                 val &= GRC_MODE_HOST_STACKUP;
14033
14034         tw32(GRC_MODE, val | tp->grc_mode);
14035
14036         tg3_switch_clocks(tp);
14037
14038         /* Clear this out for sanity. */
14039         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14040
14041         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14042                               &pci_state_reg);
14043         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14044             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14045                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14046
14047                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14048                     chiprevid == CHIPREV_ID_5701_B0 ||
14049                     chiprevid == CHIPREV_ID_5701_B2 ||
14050                     chiprevid == CHIPREV_ID_5701_B5) {
14051                         void __iomem *sram_base;
14052
14053                         /* Write some dummy words into the SRAM status block
14054                          * area, see if it reads back correctly.  If the return
14055                          * value is bad, force enable the PCIX workaround.
14056                          */
14057                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14058
14059                         writel(0x00000000, sram_base);
14060                         writel(0x00000000, sram_base + 4);
14061                         writel(0xffffffff, sram_base + 4);
14062                         if (readl(sram_base) != 0x00000000)
14063                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14064                 }
14065         }
14066
14067         udelay(50);
14068         tg3_nvram_init(tp);
14069
14070         grc_misc_cfg = tr32(GRC_MISC_CFG);
14071         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14072
14073         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14074             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14075              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14076                 tg3_flag_set(tp, IS_5788);
14077
14078         if (!tg3_flag(tp, IS_5788) &&
14079             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
14080                 tg3_flag_set(tp, TAGGED_STATUS);
14081         if (tg3_flag(tp, TAGGED_STATUS)) {
14082                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14083                                       HOSTCC_MODE_CLRTICK_TXBD);
14084
14085                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14086                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14087                                        tp->misc_host_ctrl);
14088         }
14089
14090         /* Preserve the APE MAC_MODE bits */
14091         if (tg3_flag(tp, ENABLE_APE))
14092                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14093         else
14094                 tp->mac_mode = TG3_DEF_MAC_MODE;
14095
14096         /* these are limited to 10/100 only */
14097         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14098              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14099             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14100              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14101              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14102               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14103               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14104             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14105              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14106               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14107               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14108             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14109             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14110             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14111             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14112                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14113
14114         err = tg3_phy_probe(tp);
14115         if (err) {
14116                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14117                 /* ... but do not return immediately ... */
14118                 tg3_mdio_fini(tp);
14119         }
14120
14121         tg3_read_vpd(tp);
14122         tg3_read_fw_ver(tp);
14123
14124         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14125                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14126         } else {
14127                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14128                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14129                 else
14130                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14131         }
14132
14133         /* 5700 {AX,BX} chips have a broken status block link
14134          * change bit implementation, so we must use the
14135          * status register in those cases.
14136          */
14137         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14138                 tg3_flag_set(tp, USE_LINKCHG_REG);
14139         else
14140                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14141
14142         /* The led_ctrl is set during tg3_phy_probe, here we might
14143          * have to force the link status polling mechanism based
14144          * upon subsystem IDs.
14145          */
14146         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14147             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14148             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14149                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14150                 tg3_flag_set(tp, USE_LINKCHG_REG);
14151         }
14152
14153         /* For all SERDES we poll the MAC status register. */
14154         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14155                 tg3_flag_set(tp, POLL_SERDES);
14156         else
14157                 tg3_flag_clear(tp, POLL_SERDES);
14158
14159         tp->rx_offset = NET_IP_ALIGN;
14160         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14161         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14162             tg3_flag(tp, PCIX_MODE)) {
14163                 tp->rx_offset = 0;
14164 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14165                 tp->rx_copy_thresh = ~(u16)0;
14166 #endif
14167         }
14168
14169         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14170         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14171         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14172
14173         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14174
14175         /* Increment the rx prod index on the rx std ring by at most
14176          * 8 for these chips to workaround hw errata.
14177          */
14178         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14179             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14180             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14181                 tp->rx_std_max_post = 8;
14182
14183         if (tg3_flag(tp, ASPM_WORKAROUND))
14184                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14185                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14186
14187         return err;
14188 }
14189
14190 #ifdef CONFIG_SPARC
14191 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14192 {
14193         struct net_device *dev = tp->dev;
14194         struct pci_dev *pdev = tp->pdev;
14195         struct device_node *dp = pci_device_to_OF_node(pdev);
14196         const unsigned char *addr;
14197         int len;
14198
14199         addr = of_get_property(dp, "local-mac-address", &len);
14200         if (addr && len == 6) {
14201                 memcpy(dev->dev_addr, addr, 6);
14202                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14203                 return 0;
14204         }
14205         return -ENODEV;
14206 }
14207
14208 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14209 {
14210         struct net_device *dev = tp->dev;
14211
14212         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14213         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14214         return 0;
14215 }
14216 #endif
14217
14218 static int __devinit tg3_get_device_address(struct tg3 *tp)
14219 {
14220         struct net_device *dev = tp->dev;
14221         u32 hi, lo, mac_offset;
14222         int addr_ok = 0;
14223
14224 #ifdef CONFIG_SPARC
14225         if (!tg3_get_macaddr_sparc(tp))
14226                 return 0;
14227 #endif
14228
14229         mac_offset = 0x7c;
14230         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
14231             tg3_flag(tp, 5780_CLASS)) {
14232                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14233                         mac_offset = 0xcc;
14234                 if (tg3_nvram_lock(tp))
14235                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14236                 else
14237                         tg3_nvram_unlock(tp);
14238         } else if (tg3_flag(tp, 5717_PLUS)) {
14239                 if (PCI_FUNC(tp->pdev->devfn) & 1)
14240                         mac_offset = 0xcc;
14241                 if (PCI_FUNC(tp->pdev->devfn) > 1)
14242                         mac_offset += 0x18c;
14243         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14244                 mac_offset = 0x10;
14245
14246         /* First try to get it from MAC address mailbox. */
14247         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14248         if ((hi >> 16) == 0x484b) {
14249                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14250                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14251
14252                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14253                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14254                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14255                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14256                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14257
14258                 /* Some old bootcode may report a 0 MAC address in SRAM */
14259                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14260         }
14261         if (!addr_ok) {
14262                 /* Next, try NVRAM. */
14263                 if (!tg3_flag(tp, NO_NVRAM) &&
14264                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14265                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14266                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14267                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14268                 }
14269                 /* Finally just fetch it out of the MAC control regs. */
14270                 else {
14271                         hi = tr32(MAC_ADDR_0_HIGH);
14272                         lo = tr32(MAC_ADDR_0_LOW);
14273
14274                         dev->dev_addr[5] = lo & 0xff;
14275                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14276                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14277                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14278                         dev->dev_addr[1] = hi & 0xff;
14279                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14280                 }
14281         }
14282
14283         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14284 #ifdef CONFIG_SPARC
14285                 if (!tg3_get_default_macaddr_sparc(tp))
14286                         return 0;
14287 #endif
14288                 return -EINVAL;
14289         }
14290         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14291         return 0;
14292 }
14293
14294 #define BOUNDARY_SINGLE_CACHELINE       1
14295 #define BOUNDARY_MULTI_CACHELINE        2
14296
14297 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14298 {
14299         int cacheline_size;
14300         u8 byte;
14301         int goal;
14302
14303         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14304         if (byte == 0)
14305                 cacheline_size = 1024;
14306         else
14307                 cacheline_size = (int) byte * 4;
14308
14309         /* On 5703 and later chips, the boundary bits have no
14310          * effect.
14311          */
14312         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14313             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14314             !tg3_flag(tp, PCI_EXPRESS))
14315                 goto out;
14316
14317 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14318         goal = BOUNDARY_MULTI_CACHELINE;
14319 #else
14320 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14321         goal = BOUNDARY_SINGLE_CACHELINE;
14322 #else
14323         goal = 0;
14324 #endif
14325 #endif
14326
14327         if (tg3_flag(tp, 57765_PLUS)) {
14328                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14329                 goto out;
14330         }
14331
14332         if (!goal)
14333                 goto out;
14334
14335         /* PCI controllers on most RISC systems tend to disconnect
14336          * when a device tries to burst across a cache-line boundary.
14337          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14338          *
14339          * Unfortunately, for PCI-E there are only limited
14340          * write-side controls for this, and thus for reads
14341          * we will still get the disconnects.  We'll also waste
14342          * these PCI cycles for both read and write for chips
14343          * other than 5700 and 5701 which do not implement the
14344          * boundary bits.
14345          */
14346         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14347                 switch (cacheline_size) {
14348                 case 16:
14349                 case 32:
14350                 case 64:
14351                 case 128:
14352                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14353                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14354                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14355                         } else {
14356                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14357                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14358                         }
14359                         break;
14360
14361                 case 256:
14362                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14363                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14364                         break;
14365
14366                 default:
14367                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14368                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14369                         break;
14370                 }
14371         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14372                 switch (cacheline_size) {
14373                 case 16:
14374                 case 32:
14375                 case 64:
14376                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14377                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14378                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14379                                 break;
14380                         }
14381                         /* fallthrough */
14382                 case 128:
14383                 default:
14384                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14385                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14386                         break;
14387                 }
14388         } else {
14389                 switch (cacheline_size) {
14390                 case 16:
14391                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14392                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14393                                         DMA_RWCTRL_WRITE_BNDRY_16);
14394                                 break;
14395                         }
14396                         /* fallthrough */
14397                 case 32:
14398                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14399                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14400                                         DMA_RWCTRL_WRITE_BNDRY_32);
14401                                 break;
14402                         }
14403                         /* fallthrough */
14404                 case 64:
14405                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14406                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14407                                         DMA_RWCTRL_WRITE_BNDRY_64);
14408                                 break;
14409                         }
14410                         /* fallthrough */
14411                 case 128:
14412                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14413                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14414                                         DMA_RWCTRL_WRITE_BNDRY_128);
14415                                 break;
14416                         }
14417                         /* fallthrough */
14418                 case 256:
14419                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14420                                 DMA_RWCTRL_WRITE_BNDRY_256);
14421                         break;
14422                 case 512:
14423                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14424                                 DMA_RWCTRL_WRITE_BNDRY_512);
14425                         break;
14426                 case 1024:
14427                 default:
14428                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14429                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14430                         break;
14431                 }
14432         }
14433
14434 out:
14435         return val;
14436 }
14437
14438 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14439 {
14440         struct tg3_internal_buffer_desc test_desc;
14441         u32 sram_dma_descs;
14442         int i, ret;
14443
14444         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14445
14446         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14447         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14448         tw32(RDMAC_STATUS, 0);
14449         tw32(WDMAC_STATUS, 0);
14450
14451         tw32(BUFMGR_MODE, 0);
14452         tw32(FTQ_RESET, 0);
14453
14454         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14455         test_desc.addr_lo = buf_dma & 0xffffffff;
14456         test_desc.nic_mbuf = 0x00002100;
14457         test_desc.len = size;
14458
14459         /*
14460          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14461          * the *second* time the tg3 driver was getting loaded after an
14462          * initial scan.
14463          *
14464          * Broadcom tells me:
14465          *   ...the DMA engine is connected to the GRC block and a DMA
14466          *   reset may affect the GRC block in some unpredictable way...
14467          *   The behavior of resets to individual blocks has not been tested.
14468          *
14469          * Broadcom noted the GRC reset will also reset all sub-components.
14470          */
14471         if (to_device) {
14472                 test_desc.cqid_sqid = (13 << 8) | 2;
14473
14474                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14475                 udelay(40);
14476         } else {
14477                 test_desc.cqid_sqid = (16 << 8) | 7;
14478
14479                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14480                 udelay(40);
14481         }
14482         test_desc.flags = 0x00000005;
14483
14484         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14485                 u32 val;
14486
14487                 val = *(((u32 *)&test_desc) + i);
14488                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14489                                        sram_dma_descs + (i * sizeof(u32)));
14490                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14491         }
14492         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14493
14494         if (to_device)
14495                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14496         else
14497                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14498
14499         ret = -ENODEV;
14500         for (i = 0; i < 40; i++) {
14501                 u32 val;
14502
14503                 if (to_device)
14504                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14505                 else
14506                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14507                 if ((val & 0xffff) == sram_dma_descs) {
14508                         ret = 0;
14509                         break;
14510                 }
14511
14512                 udelay(100);
14513         }
14514
14515         return ret;
14516 }
14517
14518 #define TEST_BUFFER_SIZE        0x2000
14519
14520 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14521         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14522         { },
14523 };
14524
14525 static int __devinit tg3_test_dma(struct tg3 *tp)
14526 {
14527         dma_addr_t buf_dma;
14528         u32 *buf, saved_dma_rwctrl;
14529         int ret = 0;
14530
14531         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14532                                  &buf_dma, GFP_KERNEL);
14533         if (!buf) {
14534                 ret = -ENOMEM;
14535                 goto out_nofree;
14536         }
14537
14538         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14539                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14540
14541         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14542
14543         if (tg3_flag(tp, 57765_PLUS))
14544                 goto out;
14545
14546         if (tg3_flag(tp, PCI_EXPRESS)) {
14547                 /* DMA read watermark not used on PCIE */
14548                 tp->dma_rwctrl |= 0x00180000;
14549         } else if (!tg3_flag(tp, PCIX_MODE)) {
14550                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14551                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14552                         tp->dma_rwctrl |= 0x003f0000;
14553                 else
14554                         tp->dma_rwctrl |= 0x003f000f;
14555         } else {
14556                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14557                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14558                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14559                         u32 read_water = 0x7;
14560
14561                         /* If the 5704 is behind the EPB bridge, we can
14562                          * do the less restrictive ONE_DMA workaround for
14563                          * better performance.
14564                          */
14565                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14566                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14567                                 tp->dma_rwctrl |= 0x8000;
14568                         else if (ccval == 0x6 || ccval == 0x7)
14569                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14570
14571                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14572                                 read_water = 4;
14573                         /* Set bit 23 to enable PCIX hw bug fix */
14574                         tp->dma_rwctrl |=
14575                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14576                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14577                                 (1 << 23);
14578                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14579                         /* 5780 always in PCIX mode */
14580                         tp->dma_rwctrl |= 0x00144000;
14581                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14582                         /* 5714 always in PCIX mode */
14583                         tp->dma_rwctrl |= 0x00148000;
14584                 } else {
14585                         tp->dma_rwctrl |= 0x001b000f;
14586                 }
14587         }
14588
14589         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14590             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14591                 tp->dma_rwctrl &= 0xfffffff0;
14592
14593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14594             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14595                 /* Remove this if it causes problems for some boards. */
14596                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14597
14598                 /* On 5700/5701 chips, we need to set this bit.
14599                  * Otherwise the chip will issue cacheline transactions
14600                  * to streamable DMA memory with not all the byte
14601                  * enables turned on.  This is an error on several
14602                  * RISC PCI controllers, in particular sparc64.
14603                  *
14604                  * On 5703/5704 chips, this bit has been reassigned
14605                  * a different meaning.  In particular, it is used
14606                  * on those chips to enable a PCI-X workaround.
14607                  */
14608                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14609         }
14610
14611         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14612
14613 #if 0
14614         /* Unneeded, already done by tg3_get_invariants.  */
14615         tg3_switch_clocks(tp);
14616 #endif
14617
14618         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14619             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14620                 goto out;
14621
14622         /* It is best to perform DMA test with maximum write burst size
14623          * to expose the 5700/5701 write DMA bug.
14624          */
14625         saved_dma_rwctrl = tp->dma_rwctrl;
14626         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14627         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14628
14629         while (1) {
14630                 u32 *p = buf, i;
14631
14632                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14633                         p[i] = i;
14634
14635                 /* Send the buffer to the chip. */
14636                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14637                 if (ret) {
14638                         dev_err(&tp->pdev->dev,
14639                                 "%s: Buffer write failed. err = %d\n",
14640                                 __func__, ret);
14641                         break;
14642                 }
14643
14644 #if 0
14645                 /* validate data reached card RAM correctly. */
14646                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14647                         u32 val;
14648                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14649                         if (le32_to_cpu(val) != p[i]) {
14650                                 dev_err(&tp->pdev->dev,
14651                                         "%s: Buffer corrupted on device! "
14652                                         "(%d != %d)\n", __func__, val, i);
14653                                 /* ret = -ENODEV here? */
14654                         }
14655                         p[i] = 0;
14656                 }
14657 #endif
14658                 /* Now read it back. */
14659                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14660                 if (ret) {
14661                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14662                                 "err = %d\n", __func__, ret);
14663                         break;
14664                 }
14665
14666                 /* Verify it. */
14667                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14668                         if (p[i] == i)
14669                                 continue;
14670
14671                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14672                             DMA_RWCTRL_WRITE_BNDRY_16) {
14673                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14674                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14675                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14676                                 break;
14677                         } else {
14678                                 dev_err(&tp->pdev->dev,
14679                                         "%s: Buffer corrupted on read back! "
14680                                         "(%d != %d)\n", __func__, p[i], i);
14681                                 ret = -ENODEV;
14682                                 goto out;
14683                         }
14684                 }
14685
14686                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14687                         /* Success. */
14688                         ret = 0;
14689                         break;
14690                 }
14691         }
14692         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14693             DMA_RWCTRL_WRITE_BNDRY_16) {
14694                 /* DMA test passed without adjusting DMA boundary,
14695                  * now look for chipsets that are known to expose the
14696                  * DMA bug without failing the test.
14697                  */
14698                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14699                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14700                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14701                 } else {
14702                         /* Safe to use the calculated DMA boundary. */
14703                         tp->dma_rwctrl = saved_dma_rwctrl;
14704                 }
14705
14706                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14707         }
14708
14709 out:
14710         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14711 out_nofree:
14712         return ret;
14713 }
14714
14715 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14716 {
14717         if (tg3_flag(tp, 57765_PLUS)) {
14718                 tp->bufmgr_config.mbuf_read_dma_low_water =
14719                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14720                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14721                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14722                 tp->bufmgr_config.mbuf_high_water =
14723                         DEFAULT_MB_HIGH_WATER_57765;
14724
14725                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14726                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14727                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14728                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14729                 tp->bufmgr_config.mbuf_high_water_jumbo =
14730                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14731         } else if (tg3_flag(tp, 5705_PLUS)) {
14732                 tp->bufmgr_config.mbuf_read_dma_low_water =
14733                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14734                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14735                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14736                 tp->bufmgr_config.mbuf_high_water =
14737                         DEFAULT_MB_HIGH_WATER_5705;
14738                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14739                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14740                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14741                         tp->bufmgr_config.mbuf_high_water =
14742                                 DEFAULT_MB_HIGH_WATER_5906;
14743                 }
14744
14745                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14746                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14747                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14748                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14749                 tp->bufmgr_config.mbuf_high_water_jumbo =
14750                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14751         } else {
14752                 tp->bufmgr_config.mbuf_read_dma_low_water =
14753                         DEFAULT_MB_RDMA_LOW_WATER;
14754                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14755                         DEFAULT_MB_MACRX_LOW_WATER;
14756                 tp->bufmgr_config.mbuf_high_water =
14757                         DEFAULT_MB_HIGH_WATER;
14758
14759                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14760                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14761                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14762                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14763                 tp->bufmgr_config.mbuf_high_water_jumbo =
14764                         DEFAULT_MB_HIGH_WATER_JUMBO;
14765         }
14766
14767         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14768         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14769 }
14770
14771 static char * __devinit tg3_phy_string(struct tg3 *tp)
14772 {
14773         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14774         case TG3_PHY_ID_BCM5400:        return "5400";
14775         case TG3_PHY_ID_BCM5401:        return "5401";
14776         case TG3_PHY_ID_BCM5411:        return "5411";
14777         case TG3_PHY_ID_BCM5701:        return "5701";
14778         case TG3_PHY_ID_BCM5703:        return "5703";
14779         case TG3_PHY_ID_BCM5704:        return "5704";
14780         case TG3_PHY_ID_BCM5705:        return "5705";
14781         case TG3_PHY_ID_BCM5750:        return "5750";
14782         case TG3_PHY_ID_BCM5752:        return "5752";
14783         case TG3_PHY_ID_BCM5714:        return "5714";
14784         case TG3_PHY_ID_BCM5780:        return "5780";
14785         case TG3_PHY_ID_BCM5755:        return "5755";
14786         case TG3_PHY_ID_BCM5787:        return "5787";
14787         case TG3_PHY_ID_BCM5784:        return "5784";
14788         case TG3_PHY_ID_BCM5756:        return "5722/5756";
14789         case TG3_PHY_ID_BCM5906:        return "5906";
14790         case TG3_PHY_ID_BCM5761:        return "5761";
14791         case TG3_PHY_ID_BCM5718C:       return "5718C";
14792         case TG3_PHY_ID_BCM5718S:       return "5718S";
14793         case TG3_PHY_ID_BCM57765:       return "57765";
14794         case TG3_PHY_ID_BCM5719C:       return "5719C";
14795         case TG3_PHY_ID_BCM5720C:       return "5720C";
14796         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
14797         case 0:                 return "serdes";
14798         default:                return "unknown";
14799         }
14800 }
14801
14802 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14803 {
14804         if (tg3_flag(tp, PCI_EXPRESS)) {
14805                 strcpy(str, "PCI Express");
14806                 return str;
14807         } else if (tg3_flag(tp, PCIX_MODE)) {
14808                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14809
14810                 strcpy(str, "PCIX:");
14811
14812                 if ((clock_ctrl == 7) ||
14813                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14814                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14815                         strcat(str, "133MHz");
14816                 else if (clock_ctrl == 0)
14817                         strcat(str, "33MHz");
14818                 else if (clock_ctrl == 2)
14819                         strcat(str, "50MHz");
14820                 else if (clock_ctrl == 4)
14821                         strcat(str, "66MHz");
14822                 else if (clock_ctrl == 6)
14823                         strcat(str, "100MHz");
14824         } else {
14825                 strcpy(str, "PCI:");
14826                 if (tg3_flag(tp, PCI_HIGH_SPEED))
14827                         strcat(str, "66MHz");
14828                 else
14829                         strcat(str, "33MHz");
14830         }
14831         if (tg3_flag(tp, PCI_32BIT))
14832                 strcat(str, ":32-bit");
14833         else
14834                 strcat(str, ":64-bit");
14835         return str;
14836 }
14837
14838 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14839 {
14840         struct pci_dev *peer;
14841         unsigned int func, devnr = tp->pdev->devfn & ~7;
14842
14843         for (func = 0; func < 8; func++) {
14844                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14845                 if (peer && peer != tp->pdev)
14846                         break;
14847                 pci_dev_put(peer);
14848         }
14849         /* 5704 can be configured in single-port mode, set peer to
14850          * tp->pdev in that case.
14851          */
14852         if (!peer) {
14853                 peer = tp->pdev;
14854                 return peer;
14855         }
14856
14857         /*
14858          * We don't need to keep the refcount elevated; there's no way
14859          * to remove one half of this device without removing the other
14860          */
14861         pci_dev_put(peer);
14862
14863         return peer;
14864 }
14865
14866 static void __devinit tg3_init_coal(struct tg3 *tp)
14867 {
14868         struct ethtool_coalesce *ec = &tp->coal;
14869
14870         memset(ec, 0, sizeof(*ec));
14871         ec->cmd = ETHTOOL_GCOALESCE;
14872         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14873         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14874         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14875         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14876         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14877         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14878         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14879         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14880         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14881
14882         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14883                                  HOSTCC_MODE_CLRTICK_TXBD)) {
14884                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14885                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14886                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14887                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14888         }
14889
14890         if (tg3_flag(tp, 5705_PLUS)) {
14891                 ec->rx_coalesce_usecs_irq = 0;
14892                 ec->tx_coalesce_usecs_irq = 0;
14893                 ec->stats_block_coalesce_usecs = 0;
14894         }
14895 }
14896
14897 static const struct net_device_ops tg3_netdev_ops = {
14898         .ndo_open               = tg3_open,
14899         .ndo_stop               = tg3_close,
14900         .ndo_start_xmit         = tg3_start_xmit,
14901         .ndo_get_stats64        = tg3_get_stats64,
14902         .ndo_validate_addr      = eth_validate_addr,
14903         .ndo_set_multicast_list = tg3_set_rx_mode,
14904         .ndo_set_mac_address    = tg3_set_mac_addr,
14905         .ndo_do_ioctl           = tg3_ioctl,
14906         .ndo_tx_timeout         = tg3_tx_timeout,
14907         .ndo_change_mtu         = tg3_change_mtu,
14908         .ndo_fix_features       = tg3_fix_features,
14909         .ndo_set_features       = tg3_set_features,
14910 #ifdef CONFIG_NET_POLL_CONTROLLER
14911         .ndo_poll_controller    = tg3_poll_controller,
14912 #endif
14913 };
14914
14915 static int __devinit tg3_init_one(struct pci_dev *pdev,
14916                                   const struct pci_device_id *ent)
14917 {
14918         struct net_device *dev;
14919         struct tg3 *tp;
14920         int i, err, pm_cap;
14921         u32 sndmbx, rcvmbx, intmbx;
14922         char str[40];
14923         u64 dma_mask, persist_dma_mask;
14924         u32 hw_features = 0;
14925
14926         printk_once(KERN_INFO "%s\n", version);
14927
14928         err = pci_enable_device(pdev);
14929         if (err) {
14930                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14931                 return err;
14932         }
14933
14934         err = pci_request_regions(pdev, DRV_MODULE_NAME);
14935         if (err) {
14936                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14937                 goto err_out_disable_pdev;
14938         }
14939
14940         pci_set_master(pdev);
14941
14942         /* Find power-management capability. */
14943         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14944         if (pm_cap == 0) {
14945                 dev_err(&pdev->dev,
14946                         "Cannot find Power Management capability, aborting\n");
14947                 err = -EIO;
14948                 goto err_out_free_res;
14949         }
14950
14951         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14952         if (!dev) {
14953                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14954                 err = -ENOMEM;
14955                 goto err_out_free_res;
14956         }
14957
14958         SET_NETDEV_DEV(dev, &pdev->dev);
14959
14960         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14961
14962         tp = netdev_priv(dev);
14963         tp->pdev = pdev;
14964         tp->dev = dev;
14965         tp->pm_cap = pm_cap;
14966         tp->rx_mode = TG3_DEF_RX_MODE;
14967         tp->tx_mode = TG3_DEF_TX_MODE;
14968
14969         if (tg3_debug > 0)
14970                 tp->msg_enable = tg3_debug;
14971         else
14972                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14973
14974         /* The word/byte swap controls here control register access byte
14975          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
14976          * setting below.
14977          */
14978         tp->misc_host_ctrl =
14979                 MISC_HOST_CTRL_MASK_PCI_INT |
14980                 MISC_HOST_CTRL_WORD_SWAP |
14981                 MISC_HOST_CTRL_INDIR_ACCESS |
14982                 MISC_HOST_CTRL_PCISTATE_RW;
14983
14984         /* The NONFRM (non-frame) byte/word swap controls take effect
14985          * on descriptor entries, anything which isn't packet data.
14986          *
14987          * The StrongARM chips on the board (one for tx, one for rx)
14988          * are running in big-endian mode.
14989          */
14990         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14991                         GRC_MODE_WSWAP_NONFRM_DATA);
14992 #ifdef __BIG_ENDIAN
14993         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14994 #endif
14995         spin_lock_init(&tp->lock);
14996         spin_lock_init(&tp->indirect_lock);
14997         INIT_WORK(&tp->reset_task, tg3_reset_task);
14998
14999         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15000         if (!tp->regs) {
15001                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15002                 err = -ENOMEM;
15003                 goto err_out_free_dev;
15004         }
15005
15006         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15007         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15008
15009         dev->ethtool_ops = &tg3_ethtool_ops;
15010         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15011         dev->netdev_ops = &tg3_netdev_ops;
15012         dev->irq = pdev->irq;
15013
15014         err = tg3_get_invariants(tp);
15015         if (err) {
15016                 dev_err(&pdev->dev,
15017                         "Problem fetching invariants of chip, aborting\n");
15018                 goto err_out_iounmap;
15019         }
15020
15021         /* The EPB bridge inside 5714, 5715, and 5780 and any
15022          * device behind the EPB cannot support DMA addresses > 40-bit.
15023          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15024          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15025          * do DMA address check in tg3_start_xmit().
15026          */
15027         if (tg3_flag(tp, IS_5788))
15028                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15029         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15030                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15031 #ifdef CONFIG_HIGHMEM
15032                 dma_mask = DMA_BIT_MASK(64);
15033 #endif
15034         } else
15035                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15036
15037         /* Configure DMA attributes. */
15038         if (dma_mask > DMA_BIT_MASK(32)) {
15039                 err = pci_set_dma_mask(pdev, dma_mask);
15040                 if (!err) {
15041                         dev->features |= NETIF_F_HIGHDMA;
15042                         err = pci_set_consistent_dma_mask(pdev,
15043                                                           persist_dma_mask);
15044                         if (err < 0) {
15045                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15046                                         "DMA for consistent allocations\n");
15047                                 goto err_out_iounmap;
15048                         }
15049                 }
15050         }
15051         if (err || dma_mask == DMA_BIT_MASK(32)) {
15052                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15053                 if (err) {
15054                         dev_err(&pdev->dev,
15055                                 "No usable DMA configuration, aborting\n");
15056                         goto err_out_iounmap;
15057                 }
15058         }
15059
15060         tg3_init_bufmgr_config(tp);
15061
15062         /* Selectively allow TSO based on operating conditions */
15063         if ((tg3_flag(tp, HW_TSO_1) ||
15064              tg3_flag(tp, HW_TSO_2) ||
15065              tg3_flag(tp, HW_TSO_3)) ||
15066             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
15067                 tg3_flag_set(tp, TSO_CAPABLE);
15068         else {
15069                 tg3_flag_clear(tp, TSO_CAPABLE);
15070                 tg3_flag_clear(tp, TSO_BUG);
15071                 tp->fw_needed = NULL;
15072         }
15073
15074         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15075                 tp->fw_needed = FIRMWARE_TG3;
15076
15077         /* TSO is on by default on chips that support hardware TSO.
15078          * Firmware TSO on older chips gives lower performance, so it
15079          * is off by default, but can be enabled using ethtool.
15080          */
15081         if ((tg3_flag(tp, HW_TSO_1) ||
15082              tg3_flag(tp, HW_TSO_2) ||
15083              tg3_flag(tp, HW_TSO_3)) &&
15084             (dev->features & NETIF_F_IP_CSUM))
15085                 hw_features |= NETIF_F_TSO;
15086         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15087                 if (dev->features & NETIF_F_IPV6_CSUM)
15088                         hw_features |= NETIF_F_TSO6;
15089                 if (tg3_flag(tp, HW_TSO_3) ||
15090                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15091                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15092                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15093                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15094                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15095                         hw_features |= NETIF_F_TSO_ECN;
15096         }
15097
15098         dev->hw_features |= hw_features;
15099         dev->features |= hw_features;
15100         dev->vlan_features |= hw_features;
15101
15102         /*
15103          * Add loopback capability only for a subset of devices that support
15104          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15105          * loopback for the remaining devices.
15106          */
15107         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15108             !tg3_flag(tp, CPMU_PRESENT))
15109                 /* Add the loopback capability */
15110                 dev->hw_features |= NETIF_F_LOOPBACK;
15111
15112         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15113             !tg3_flag(tp, TSO_CAPABLE) &&
15114             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15115                 tg3_flag_set(tp, MAX_RXPEND_64);
15116                 tp->rx_pending = 63;
15117         }
15118
15119         err = tg3_get_device_address(tp);
15120         if (err) {
15121                 dev_err(&pdev->dev,
15122                         "Could not obtain valid ethernet address, aborting\n");
15123                 goto err_out_iounmap;
15124         }
15125
15126         if (tg3_flag(tp, ENABLE_APE)) {
15127                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15128                 if (!tp->aperegs) {
15129                         dev_err(&pdev->dev,
15130                                 "Cannot map APE registers, aborting\n");
15131                         err = -ENOMEM;
15132                         goto err_out_iounmap;
15133                 }
15134
15135                 tg3_ape_lock_init(tp);
15136
15137                 if (tg3_flag(tp, ENABLE_ASF))
15138                         tg3_read_dash_ver(tp);
15139         }
15140
15141         /*
15142          * Reset chip in case UNDI or EFI driver did not shutdown
15143          * DMA self test will enable WDMAC and we'll see (spurious)
15144          * pending DMA on the PCI bus at that point.
15145          */
15146         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15147             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15148                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15149                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15150         }
15151
15152         err = tg3_test_dma(tp);
15153         if (err) {
15154                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15155                 goto err_out_apeunmap;
15156         }
15157
15158         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15159         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15160         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15161         for (i = 0; i < tp->irq_max; i++) {
15162                 struct tg3_napi *tnapi = &tp->napi[i];
15163
15164                 tnapi->tp = tp;
15165                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15166
15167                 tnapi->int_mbox = intmbx;
15168                 if (i < 4)
15169                         intmbx += 0x8;
15170                 else
15171                         intmbx += 0x4;
15172
15173                 tnapi->consmbox = rcvmbx;
15174                 tnapi->prodmbox = sndmbx;
15175
15176                 if (i)
15177                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15178                 else
15179                         tnapi->coal_now = HOSTCC_MODE_NOW;
15180
15181                 if (!tg3_flag(tp, SUPPORT_MSIX))
15182                         break;
15183
15184                 /*
15185                  * If we support MSIX, we'll be using RSS.  If we're using
15186                  * RSS, the first vector only handles link interrupts and the
15187                  * remaining vectors handle rx and tx interrupts.  Reuse the
15188                  * mailbox values for the next iteration.  The values we setup
15189                  * above are still useful for the single vectored mode.
15190                  */
15191                 if (!i)
15192                         continue;
15193
15194                 rcvmbx += 0x8;
15195
15196                 if (sndmbx & 0x4)
15197                         sndmbx -= 0x4;
15198                 else
15199                         sndmbx += 0xc;
15200         }
15201
15202         tg3_init_coal(tp);
15203
15204         pci_set_drvdata(pdev, dev);
15205
15206         err = register_netdev(dev);
15207         if (err) {
15208                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15209                 goto err_out_apeunmap;
15210         }
15211
15212         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15213                     tp->board_part_number,
15214                     tp->pci_chip_rev_id,
15215                     tg3_bus_string(tp, str),
15216                     dev->dev_addr);
15217
15218         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15219                 struct phy_device *phydev;
15220                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15221                 netdev_info(dev,
15222                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15223                             phydev->drv->name, dev_name(&phydev->dev));
15224         } else {
15225                 char *ethtype;
15226
15227                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15228                         ethtype = "10/100Base-TX";
15229                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15230                         ethtype = "1000Base-SX";
15231                 else
15232                         ethtype = "10/100/1000Base-T";
15233
15234                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15235                             "(WireSpeed[%d], EEE[%d])\n",
15236                             tg3_phy_string(tp), ethtype,
15237                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15238                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15239         }
15240
15241         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15242                     (dev->features & NETIF_F_RXCSUM) != 0,
15243                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15244                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15245                     tg3_flag(tp, ENABLE_ASF) != 0,
15246                     tg3_flag(tp, TSO_CAPABLE) != 0);
15247         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15248                     tp->dma_rwctrl,
15249                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15250                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15251
15252         pci_save_state(pdev);
15253
15254         return 0;
15255
15256 err_out_apeunmap:
15257         if (tp->aperegs) {
15258                 iounmap(tp->aperegs);
15259                 tp->aperegs = NULL;
15260         }
15261
15262 err_out_iounmap:
15263         if (tp->regs) {
15264                 iounmap(tp->regs);
15265                 tp->regs = NULL;
15266         }
15267
15268 err_out_free_dev:
15269         free_netdev(dev);
15270
15271 err_out_free_res:
15272         pci_release_regions(pdev);
15273
15274 err_out_disable_pdev:
15275         pci_disable_device(pdev);
15276         pci_set_drvdata(pdev, NULL);
15277         return err;
15278 }
15279
15280 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15281 {
15282         struct net_device *dev = pci_get_drvdata(pdev);
15283
15284         if (dev) {
15285                 struct tg3 *tp = netdev_priv(dev);
15286
15287                 if (tp->fw)
15288                         release_firmware(tp->fw);
15289
15290                 cancel_work_sync(&tp->reset_task);
15291
15292                 if (!tg3_flag(tp, USE_PHYLIB)) {
15293                         tg3_phy_fini(tp);
15294                         tg3_mdio_fini(tp);
15295                 }
15296
15297                 unregister_netdev(dev);
15298                 if (tp->aperegs) {
15299                         iounmap(tp->aperegs);
15300                         tp->aperegs = NULL;
15301                 }
15302                 if (tp->regs) {
15303                         iounmap(tp->regs);
15304                         tp->regs = NULL;
15305                 }
15306                 free_netdev(dev);
15307                 pci_release_regions(pdev);
15308                 pci_disable_device(pdev);
15309                 pci_set_drvdata(pdev, NULL);
15310         }
15311 }
15312
15313 #ifdef CONFIG_PM_SLEEP
15314 static int tg3_suspend(struct device *device)
15315 {
15316         struct pci_dev *pdev = to_pci_dev(device);
15317         struct net_device *dev = pci_get_drvdata(pdev);
15318         struct tg3 *tp = netdev_priv(dev);
15319         int err;
15320
15321         if (!netif_running(dev))
15322                 return 0;
15323
15324         flush_work_sync(&tp->reset_task);
15325         tg3_phy_stop(tp);
15326         tg3_netif_stop(tp);
15327
15328         del_timer_sync(&tp->timer);
15329
15330         tg3_full_lock(tp, 1);
15331         tg3_disable_ints(tp);
15332         tg3_full_unlock(tp);
15333
15334         netif_device_detach(dev);
15335
15336         tg3_full_lock(tp, 0);
15337         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15338         tg3_flag_clear(tp, INIT_COMPLETE);
15339         tg3_full_unlock(tp);
15340
15341         err = tg3_power_down_prepare(tp);
15342         if (err) {
15343                 int err2;
15344
15345                 tg3_full_lock(tp, 0);
15346
15347                 tg3_flag_set(tp, INIT_COMPLETE);
15348                 err2 = tg3_restart_hw(tp, 1);
15349                 if (err2)
15350                         goto out;
15351
15352                 tp->timer.expires = jiffies + tp->timer_offset;
15353                 add_timer(&tp->timer);
15354
15355                 netif_device_attach(dev);
15356                 tg3_netif_start(tp);
15357
15358 out:
15359                 tg3_full_unlock(tp);
15360
15361                 if (!err2)
15362                         tg3_phy_start(tp);
15363         }
15364
15365         return err;
15366 }
15367
15368 static int tg3_resume(struct device *device)
15369 {
15370         struct pci_dev *pdev = to_pci_dev(device);
15371         struct net_device *dev = pci_get_drvdata(pdev);
15372         struct tg3 *tp = netdev_priv(dev);
15373         int err;
15374
15375         if (!netif_running(dev))
15376                 return 0;
15377
15378         netif_device_attach(dev);
15379
15380         tg3_full_lock(tp, 0);
15381
15382         tg3_flag_set(tp, INIT_COMPLETE);
15383         err = tg3_restart_hw(tp, 1);
15384         if (err)
15385                 goto out;
15386
15387         tp->timer.expires = jiffies + tp->timer_offset;
15388         add_timer(&tp->timer);
15389
15390         tg3_netif_start(tp);
15391
15392 out:
15393         tg3_full_unlock(tp);
15394
15395         if (!err)
15396                 tg3_phy_start(tp);
15397
15398         return err;
15399 }
15400
15401 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15402 #define TG3_PM_OPS (&tg3_pm_ops)
15403
15404 #else
15405
15406 #define TG3_PM_OPS NULL
15407
15408 #endif /* CONFIG_PM_SLEEP */
15409
15410 /**
15411  * tg3_io_error_detected - called when PCI error is detected
15412  * @pdev: Pointer to PCI device
15413  * @state: The current pci connection state
15414  *
15415  * This function is called after a PCI bus error affecting
15416  * this device has been detected.
15417  */
15418 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15419                                               pci_channel_state_t state)
15420 {
15421         struct net_device *netdev = pci_get_drvdata(pdev);
15422         struct tg3 *tp = netdev_priv(netdev);
15423         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15424
15425         netdev_info(netdev, "PCI I/O error detected\n");
15426
15427         rtnl_lock();
15428
15429         if (!netif_running(netdev))
15430                 goto done;
15431
15432         tg3_phy_stop(tp);
15433
15434         tg3_netif_stop(tp);
15435
15436         del_timer_sync(&tp->timer);
15437         tg3_flag_clear(tp, RESTART_TIMER);
15438
15439         /* Want to make sure that the reset task doesn't run */
15440         cancel_work_sync(&tp->reset_task);
15441         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15442         tg3_flag_clear(tp, RESTART_TIMER);
15443
15444         netif_device_detach(netdev);
15445
15446         /* Clean up software state, even if MMIO is blocked */
15447         tg3_full_lock(tp, 0);
15448         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15449         tg3_full_unlock(tp);
15450
15451 done:
15452         if (state == pci_channel_io_perm_failure)
15453                 err = PCI_ERS_RESULT_DISCONNECT;
15454         else
15455                 pci_disable_device(pdev);
15456
15457         rtnl_unlock();
15458
15459         return err;
15460 }
15461
15462 /**
15463  * tg3_io_slot_reset - called after the pci bus has been reset.
15464  * @pdev: Pointer to PCI device
15465  *
15466  * Restart the card from scratch, as if from a cold-boot.
15467  * At this point, the card has exprienced a hard reset,
15468  * followed by fixups by BIOS, and has its config space
15469  * set up identically to what it was at cold boot.
15470  */
15471 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15472 {
15473         struct net_device *netdev = pci_get_drvdata(pdev);
15474         struct tg3 *tp = netdev_priv(netdev);
15475         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15476         int err;
15477
15478         rtnl_lock();
15479
15480         if (pci_enable_device(pdev)) {
15481                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15482                 goto done;
15483         }
15484
15485         pci_set_master(pdev);
15486         pci_restore_state(pdev);
15487         pci_save_state(pdev);
15488
15489         if (!netif_running(netdev)) {
15490                 rc = PCI_ERS_RESULT_RECOVERED;
15491                 goto done;
15492         }
15493
15494         err = tg3_power_up(tp);
15495         if (err) {
15496                 netdev_err(netdev, "Failed to restore register access.\n");
15497                 goto done;
15498         }
15499
15500         rc = PCI_ERS_RESULT_RECOVERED;
15501
15502 done:
15503         rtnl_unlock();
15504
15505         return rc;
15506 }
15507
15508 /**
15509  * tg3_io_resume - called when traffic can start flowing again.
15510  * @pdev: Pointer to PCI device
15511  *
15512  * This callback is called when the error recovery driver tells
15513  * us that its OK to resume normal operation.
15514  */
15515 static void tg3_io_resume(struct pci_dev *pdev)
15516 {
15517         struct net_device *netdev = pci_get_drvdata(pdev);
15518         struct tg3 *tp = netdev_priv(netdev);
15519         int err;
15520
15521         rtnl_lock();
15522
15523         if (!netif_running(netdev))
15524                 goto done;
15525
15526         tg3_full_lock(tp, 0);
15527         tg3_flag_set(tp, INIT_COMPLETE);
15528         err = tg3_restart_hw(tp, 1);
15529         tg3_full_unlock(tp);
15530         if (err) {
15531                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15532                 goto done;
15533         }
15534
15535         netif_device_attach(netdev);
15536
15537         tp->timer.expires = jiffies + tp->timer_offset;
15538         add_timer(&tp->timer);
15539
15540         tg3_netif_start(tp);
15541
15542         tg3_phy_start(tp);
15543
15544 done:
15545         rtnl_unlock();
15546 }
15547
15548 static struct pci_error_handlers tg3_err_handler = {
15549         .error_detected = tg3_io_error_detected,
15550         .slot_reset     = tg3_io_slot_reset,
15551         .resume         = tg3_io_resume
15552 };
15553
15554 static struct pci_driver tg3_driver = {
15555         .name           = DRV_MODULE_NAME,
15556         .id_table       = tg3_pci_tbl,
15557         .probe          = tg3_init_one,
15558         .remove         = __devexit_p(tg3_remove_one),
15559         .err_handler    = &tg3_err_handler,
15560         .driver.pm      = TG3_PM_OPS,
15561 };
15562
15563 static int __init tg3_init(void)
15564 {
15565         return pci_register_driver(&tg3_driver);
15566 }
15567
15568 static void __exit tg3_cleanup(void)
15569 {
15570         pci_unregister_driver(&tg3_driver);
15571 }
15572
15573 module_init(tg3_init);
15574 module_exit(tg3_cleanup);