]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/tg3.c
4c441682a2913bdbadb185cb4e7799bc5f55548d
[mv-sheeva.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
46
47 #include <net/checksum.h>
48 #include <net/ip.h>
49
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     118
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "April 22, 2011"
95
96 #define TG3_DEF_MAC_MODE        0
97 #define TG3_DEF_RX_MODE         0
98 #define TG3_DEF_TX_MODE         0
99 #define TG3_DEF_MSG_ENABLE        \
100         (NETIF_MSG_DRV          | \
101          NETIF_MSG_PROBE        | \
102          NETIF_MSG_LINK         | \
103          NETIF_MSG_TIMER        | \
104          NETIF_MSG_IFDOWN       | \
105          NETIF_MSG_IFUP         | \
106          NETIF_MSG_RX_ERR       | \
107          NETIF_MSG_TX_ERR)
108
109 /* length of time before we decide the hardware is borked,
110  * and dev->tx_timeout() should be called to fix the problem
111  */
112
113 #define TG3_TX_TIMEOUT                  (5 * HZ)
114
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU                     60
117 #define TG3_MAX_MTU(tp) \
118         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
119
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121  * You can't change the ring sizes, but you can change where you place
122  * them in the NIC onboard memory.
123  */
124 #define TG3_RX_STD_RING_SIZE(tp) \
125         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING         200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
132 #define TG3_RSS_INDIR_TBL_SIZE          128
133
134 /* Do not place this n-ring entries value into the tp struct itself,
135  * we really want to expose these constants to GCC so that modulo et
136  * al.  operations are done with shifts and masks instead of with
137  * hw multiply/modulo instructions.  Another solution would be to
138  * replace things like '% foo' with '& (foo - 1)'.
139  */
140
141 #define TG3_TX_RING_SIZE                512
142 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
143
144 #define TG3_RX_STD_RING_BYTES(tp) \
145         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
151                                  TG3_TX_RING_SIZE)
152 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
153
154 #define TG3_DMA_BYTE_ENAB               64
155
156 #define TG3_RX_STD_DMA_SZ               1536
157 #define TG3_RX_JMB_DMA_SZ               9046
158
159 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
160
161 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
163
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
166
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
169
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171  * that are at least dword aligned when used in PCIX mode.  The driver
172  * works around this bug by double copying the packet.  This workaround
173  * is built into the normal double copy length check for efficiency.
174  *
175  * However, the double copy is only necessary on those architectures
176  * where unaligned memory accesses are inefficient.  For those architectures
177  * where unaligned memory accesses incur little penalty, we can reintegrate
178  * the 5701 in the normal rx path.  Doing so saves a device structure
179  * dereference by hardcoding the double copy threshold in place.
180  */
181 #define TG3_RX_COPY_THRESHOLD           256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
184 #else
185         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
186 #endif
187
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
190
191 #define TG3_RAW_IP_ALIGN 2
192
193 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
194
195 #define FIRMWARE_TG3            "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
198
199 static char version[] __devinitdata =
200         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
201
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
209
210 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
213
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
295         {}
296 };
297
298 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
299
300 static const struct {
301         const char string[ETH_GSTRING_LEN];
302 } ethtool_stats_keys[] = {
303         { "rx_octets" },
304         { "rx_fragments" },
305         { "rx_ucast_packets" },
306         { "rx_mcast_packets" },
307         { "rx_bcast_packets" },
308         { "rx_fcs_errors" },
309         { "rx_align_errors" },
310         { "rx_xon_pause_rcvd" },
311         { "rx_xoff_pause_rcvd" },
312         { "rx_mac_ctrl_rcvd" },
313         { "rx_xoff_entered" },
314         { "rx_frame_too_long_errors" },
315         { "rx_jabbers" },
316         { "rx_undersize_packets" },
317         { "rx_in_length_errors" },
318         { "rx_out_length_errors" },
319         { "rx_64_or_less_octet_packets" },
320         { "rx_65_to_127_octet_packets" },
321         { "rx_128_to_255_octet_packets" },
322         { "rx_256_to_511_octet_packets" },
323         { "rx_512_to_1023_octet_packets" },
324         { "rx_1024_to_1522_octet_packets" },
325         { "rx_1523_to_2047_octet_packets" },
326         { "rx_2048_to_4095_octet_packets" },
327         { "rx_4096_to_8191_octet_packets" },
328         { "rx_8192_to_9022_octet_packets" },
329
330         { "tx_octets" },
331         { "tx_collisions" },
332
333         { "tx_xon_sent" },
334         { "tx_xoff_sent" },
335         { "tx_flow_control" },
336         { "tx_mac_errors" },
337         { "tx_single_collisions" },
338         { "tx_mult_collisions" },
339         { "tx_deferred" },
340         { "tx_excessive_collisions" },
341         { "tx_late_collisions" },
342         { "tx_collide_2times" },
343         { "tx_collide_3times" },
344         { "tx_collide_4times" },
345         { "tx_collide_5times" },
346         { "tx_collide_6times" },
347         { "tx_collide_7times" },
348         { "tx_collide_8times" },
349         { "tx_collide_9times" },
350         { "tx_collide_10times" },
351         { "tx_collide_11times" },
352         { "tx_collide_12times" },
353         { "tx_collide_13times" },
354         { "tx_collide_14times" },
355         { "tx_collide_15times" },
356         { "tx_ucast_packets" },
357         { "tx_mcast_packets" },
358         { "tx_bcast_packets" },
359         { "tx_carrier_sense_errors" },
360         { "tx_discards" },
361         { "tx_errors" },
362
363         { "dma_writeq_full" },
364         { "dma_write_prioq_full" },
365         { "rxbds_empty" },
366         { "rx_discards" },
367         { "mbuf_lwm_thresh_hit" },
368         { "rx_errors" },
369         { "rx_threshold_hit" },
370
371         { "dma_readq_full" },
372         { "dma_read_prioq_full" },
373         { "tx_comp_queue_full" },
374
375         { "ring_set_send_prod_index" },
376         { "ring_status_update" },
377         { "nic_irqs" },
378         { "nic_avoided_irqs" },
379         { "nic_tx_threshold_hit" }
380 };
381
382 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
383
384
385 static const struct {
386         const char string[ETH_GSTRING_LEN];
387 } ethtool_test_keys[] = {
388         { "nvram test     (online) " },
389         { "link test      (online) " },
390         { "register test  (offline)" },
391         { "memory test    (offline)" },
392         { "loopback test  (offline)" },
393         { "interrupt test (offline)" },
394 };
395
396 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
397
398
399 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
400 {
401         writel(val, tp->regs + off);
402 }
403
404 static u32 tg3_read32(struct tg3 *tp, u32 off)
405 {
406         return readl(tp->regs + off);
407 }
408
409 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
410 {
411         writel(val, tp->aperegs + off);
412 }
413
414 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
415 {
416         return readl(tp->aperegs + off);
417 }
418
419 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
420 {
421         unsigned long flags;
422
423         spin_lock_irqsave(&tp->indirect_lock, flags);
424         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
425         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
426         spin_unlock_irqrestore(&tp->indirect_lock, flags);
427 }
428
429 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
430 {
431         writel(val, tp->regs + off);
432         readl(tp->regs + off);
433 }
434
435 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
436 {
437         unsigned long flags;
438         u32 val;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444         return val;
445 }
446
447 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
448 {
449         unsigned long flags;
450
451         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
452                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
453                                        TG3_64BIT_REG_LOW, val);
454                 return;
455         }
456         if (off == TG3_RX_STD_PROD_IDX_REG) {
457                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
458                                        TG3_64BIT_REG_LOW, val);
459                 return;
460         }
461
462         spin_lock_irqsave(&tp->indirect_lock, flags);
463         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
464         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
465         spin_unlock_irqrestore(&tp->indirect_lock, flags);
466
467         /* In indirect mode when disabling interrupts, we also need
468          * to clear the interrupt bit in the GRC local ctrl register.
469          */
470         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
471             (val == 0x1)) {
472                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
473                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
474         }
475 }
476
477 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
478 {
479         unsigned long flags;
480         u32 val;
481
482         spin_lock_irqsave(&tp->indirect_lock, flags);
483         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
484         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
485         spin_unlock_irqrestore(&tp->indirect_lock, flags);
486         return val;
487 }
488
489 /* usec_wait specifies the wait time in usec when writing to certain registers
490  * where it is unsafe to read back the register without some delay.
491  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
492  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
493  */
494 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
495 {
496         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
497                 /* Non-posted methods */
498                 tp->write32(tp, off, val);
499         else {
500                 /* Posted method */
501                 tg3_write32(tp, off, val);
502                 if (usec_wait)
503                         udelay(usec_wait);
504                 tp->read32(tp, off);
505         }
506         /* Wait again after the read for the posted method to guarantee that
507          * the wait time is met.
508          */
509         if (usec_wait)
510                 udelay(usec_wait);
511 }
512
513 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
514 {
515         tp->write32_mbox(tp, off, val);
516         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
517                 tp->read32_mbox(tp, off);
518 }
519
520 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
521 {
522         void __iomem *mbox = tp->regs + off;
523         writel(val, mbox);
524         if (tg3_flag(tp, TXD_MBOX_HWBUG))
525                 writel(val, mbox);
526         if (tg3_flag(tp, MBOX_WRITE_REORDER))
527                 readl(mbox);
528 }
529
530 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
531 {
532         return readl(tp->regs + off + GRCMBOX_BASE);
533 }
534
535 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
536 {
537         writel(val, tp->regs + off + GRCMBOX_BASE);
538 }
539
540 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
541 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
542 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
543 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
544 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
545
546 #define tw32(reg, val)                  tp->write32(tp, reg, val)
547 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
548 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
549 #define tr32(reg)                       tp->read32(tp, reg)
550
551 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
552 {
553         unsigned long flags;
554
555         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
556             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
557                 return;
558
559         spin_lock_irqsave(&tp->indirect_lock, flags);
560         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
561                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
562                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
563
564                 /* Always leave this as zero. */
565                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
566         } else {
567                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
568                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
569
570                 /* Always leave this as zero. */
571                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
572         }
573         spin_unlock_irqrestore(&tp->indirect_lock, flags);
574 }
575
576 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
577 {
578         unsigned long flags;
579
580         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
581             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
582                 *val = 0;
583                 return;
584         }
585
586         spin_lock_irqsave(&tp->indirect_lock, flags);
587         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
588                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
589                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
590
591                 /* Always leave this as zero. */
592                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
593         } else {
594                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
595                 *val = tr32(TG3PCI_MEM_WIN_DATA);
596
597                 /* Always leave this as zero. */
598                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
599         }
600         spin_unlock_irqrestore(&tp->indirect_lock, flags);
601 }
602
603 static void tg3_ape_lock_init(struct tg3 *tp)
604 {
605         int i;
606         u32 regbase;
607
608         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
609                 regbase = TG3_APE_LOCK_GRANT;
610         else
611                 regbase = TG3_APE_PER_LOCK_GRANT;
612
613         /* Make sure the driver hasn't any stale locks. */
614         for (i = 0; i < 8; i++)
615                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
616 }
617
618 static int tg3_ape_lock(struct tg3 *tp, int locknum)
619 {
620         int i, off;
621         int ret = 0;
622         u32 status, req, gnt;
623
624         if (!tg3_flag(tp, ENABLE_APE))
625                 return 0;
626
627         switch (locknum) {
628         case TG3_APE_LOCK_GRC:
629         case TG3_APE_LOCK_MEM:
630                 break;
631         default:
632                 return -EINVAL;
633         }
634
635         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
636                 req = TG3_APE_LOCK_REQ;
637                 gnt = TG3_APE_LOCK_GRANT;
638         } else {
639                 req = TG3_APE_PER_LOCK_REQ;
640                 gnt = TG3_APE_PER_LOCK_GRANT;
641         }
642
643         off = 4 * locknum;
644
645         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
646
647         /* Wait for up to 1 millisecond to acquire lock. */
648         for (i = 0; i < 100; i++) {
649                 status = tg3_ape_read32(tp, gnt + off);
650                 if (status == APE_LOCK_GRANT_DRIVER)
651                         break;
652                 udelay(10);
653         }
654
655         if (status != APE_LOCK_GRANT_DRIVER) {
656                 /* Revoke the lock request. */
657                 tg3_ape_write32(tp, gnt + off,
658                                 APE_LOCK_GRANT_DRIVER);
659
660                 ret = -EBUSY;
661         }
662
663         return ret;
664 }
665
666 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
667 {
668         u32 gnt;
669
670         if (!tg3_flag(tp, ENABLE_APE))
671                 return;
672
673         switch (locknum) {
674         case TG3_APE_LOCK_GRC:
675         case TG3_APE_LOCK_MEM:
676                 break;
677         default:
678                 return;
679         }
680
681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
682                 gnt = TG3_APE_LOCK_GRANT;
683         else
684                 gnt = TG3_APE_PER_LOCK_GRANT;
685
686         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
687 }
688
689 static void tg3_disable_ints(struct tg3 *tp)
690 {
691         int i;
692
693         tw32(TG3PCI_MISC_HOST_CTRL,
694              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
695         for (i = 0; i < tp->irq_max; i++)
696                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
697 }
698
699 static void tg3_enable_ints(struct tg3 *tp)
700 {
701         int i;
702
703         tp->irq_sync = 0;
704         wmb();
705
706         tw32(TG3PCI_MISC_HOST_CTRL,
707              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
708
709         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
710         for (i = 0; i < tp->irq_cnt; i++) {
711                 struct tg3_napi *tnapi = &tp->napi[i];
712
713                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
714                 if (tg3_flag(tp, 1SHOT_MSI))
715                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
716
717                 tp->coal_now |= tnapi->coal_now;
718         }
719
720         /* Force an initial interrupt */
721         if (!tg3_flag(tp, TAGGED_STATUS) &&
722             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
723                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
724         else
725                 tw32(HOSTCC_MODE, tp->coal_now);
726
727         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
728 }
729
730 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
731 {
732         struct tg3 *tp = tnapi->tp;
733         struct tg3_hw_status *sblk = tnapi->hw_status;
734         unsigned int work_exists = 0;
735
736         /* check for phy events */
737         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
738                 if (sblk->status & SD_STATUS_LINK_CHG)
739                         work_exists = 1;
740         }
741         /* check for RX/TX work to do */
742         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
743             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
744                 work_exists = 1;
745
746         return work_exists;
747 }
748
749 /* tg3_int_reenable
750  *  similar to tg3_enable_ints, but it accurately determines whether there
751  *  is new work pending and can return without flushing the PIO write
752  *  which reenables interrupts
753  */
754 static void tg3_int_reenable(struct tg3_napi *tnapi)
755 {
756         struct tg3 *tp = tnapi->tp;
757
758         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
759         mmiowb();
760
761         /* When doing tagged status, this work check is unnecessary.
762          * The last_tag we write above tells the chip which piece of
763          * work we've completed.
764          */
765         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
766                 tw32(HOSTCC_MODE, tp->coalesce_mode |
767                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
768 }
769
770 static void tg3_switch_clocks(struct tg3 *tp)
771 {
772         u32 clock_ctrl;
773         u32 orig_clock_ctrl;
774
775         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
776                 return;
777
778         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
779
780         orig_clock_ctrl = clock_ctrl;
781         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
782                        CLOCK_CTRL_CLKRUN_OENABLE |
783                        0x1f);
784         tp->pci_clock_ctrl = clock_ctrl;
785
786         if (tg3_flag(tp, 5705_PLUS)) {
787                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
788                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
789                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
790                 }
791         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
792                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
793                             clock_ctrl |
794                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
795                             40);
796                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
797                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
798                             40);
799         }
800         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
801 }
802
803 #define PHY_BUSY_LOOPS  5000
804
805 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
806 {
807         u32 frame_val;
808         unsigned int loops;
809         int ret;
810
811         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
812                 tw32_f(MAC_MI_MODE,
813                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
814                 udelay(80);
815         }
816
817         *val = 0x0;
818
819         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
820                       MI_COM_PHY_ADDR_MASK);
821         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
822                       MI_COM_REG_ADDR_MASK);
823         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
824
825         tw32_f(MAC_MI_COM, frame_val);
826
827         loops = PHY_BUSY_LOOPS;
828         while (loops != 0) {
829                 udelay(10);
830                 frame_val = tr32(MAC_MI_COM);
831
832                 if ((frame_val & MI_COM_BUSY) == 0) {
833                         udelay(5);
834                         frame_val = tr32(MAC_MI_COM);
835                         break;
836                 }
837                 loops -= 1;
838         }
839
840         ret = -EBUSY;
841         if (loops != 0) {
842                 *val = frame_val & MI_COM_DATA_MASK;
843                 ret = 0;
844         }
845
846         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
847                 tw32_f(MAC_MI_MODE, tp->mi_mode);
848                 udelay(80);
849         }
850
851         return ret;
852 }
853
854 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
855 {
856         u32 frame_val;
857         unsigned int loops;
858         int ret;
859
860         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
861             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
862                 return 0;
863
864         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
865                 tw32_f(MAC_MI_MODE,
866                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
867                 udelay(80);
868         }
869
870         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
871                       MI_COM_PHY_ADDR_MASK);
872         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
873                       MI_COM_REG_ADDR_MASK);
874         frame_val |= (val & MI_COM_DATA_MASK);
875         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
876
877         tw32_f(MAC_MI_COM, frame_val);
878
879         loops = PHY_BUSY_LOOPS;
880         while (loops != 0) {
881                 udelay(10);
882                 frame_val = tr32(MAC_MI_COM);
883                 if ((frame_val & MI_COM_BUSY) == 0) {
884                         udelay(5);
885                         frame_val = tr32(MAC_MI_COM);
886                         break;
887                 }
888                 loops -= 1;
889         }
890
891         ret = -EBUSY;
892         if (loops != 0)
893                 ret = 0;
894
895         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
896                 tw32_f(MAC_MI_MODE, tp->mi_mode);
897                 udelay(80);
898         }
899
900         return ret;
901 }
902
903 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
904 {
905         int err;
906
907         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
908         if (err)
909                 goto done;
910
911         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
912         if (err)
913                 goto done;
914
915         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
916                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
917         if (err)
918                 goto done;
919
920         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
921
922 done:
923         return err;
924 }
925
926 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
927 {
928         int err;
929
930         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
931         if (err)
932                 goto done;
933
934         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
935         if (err)
936                 goto done;
937
938         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
939                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
940         if (err)
941                 goto done;
942
943         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
944
945 done:
946         return err;
947 }
948
949 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
950 {
951         int err;
952
953         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
954         if (!err)
955                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
956
957         return err;
958 }
959
960 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
961 {
962         int err;
963
964         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
965         if (!err)
966                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
967
968         return err;
969 }
970
971 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
972 {
973         int err;
974
975         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
976                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
977                            MII_TG3_AUXCTL_SHDWSEL_MISC);
978         if (!err)
979                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
980
981         return err;
982 }
983
984 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
985 {
986         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
987                 set |= MII_TG3_AUXCTL_MISC_WREN;
988
989         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
990 }
991
992 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
993         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
994                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
995                              MII_TG3_AUXCTL_ACTL_TX_6DB)
996
997 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
998         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1000
1001 static int tg3_bmcr_reset(struct tg3 *tp)
1002 {
1003         u32 phy_control;
1004         int limit, err;
1005
1006         /* OK, reset it, and poll the BMCR_RESET bit until it
1007          * clears or we time out.
1008          */
1009         phy_control = BMCR_RESET;
1010         err = tg3_writephy(tp, MII_BMCR, phy_control);
1011         if (err != 0)
1012                 return -EBUSY;
1013
1014         limit = 5000;
1015         while (limit--) {
1016                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1017                 if (err != 0)
1018                         return -EBUSY;
1019
1020                 if ((phy_control & BMCR_RESET) == 0) {
1021                         udelay(40);
1022                         break;
1023                 }
1024                 udelay(10);
1025         }
1026         if (limit < 0)
1027                 return -EBUSY;
1028
1029         return 0;
1030 }
1031
1032 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1033 {
1034         struct tg3 *tp = bp->priv;
1035         u32 val;
1036
1037         spin_lock_bh(&tp->lock);
1038
1039         if (tg3_readphy(tp, reg, &val))
1040                 val = -EIO;
1041
1042         spin_unlock_bh(&tp->lock);
1043
1044         return val;
1045 }
1046
1047 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1048 {
1049         struct tg3 *tp = bp->priv;
1050         u32 ret = 0;
1051
1052         spin_lock_bh(&tp->lock);
1053
1054         if (tg3_writephy(tp, reg, val))
1055                 ret = -EIO;
1056
1057         spin_unlock_bh(&tp->lock);
1058
1059         return ret;
1060 }
1061
1062 static int tg3_mdio_reset(struct mii_bus *bp)
1063 {
1064         return 0;
1065 }
1066
1067 static void tg3_mdio_config_5785(struct tg3 *tp)
1068 {
1069         u32 val;
1070         struct phy_device *phydev;
1071
1072         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1073         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1074         case PHY_ID_BCM50610:
1075         case PHY_ID_BCM50610M:
1076                 val = MAC_PHYCFG2_50610_LED_MODES;
1077                 break;
1078         case PHY_ID_BCMAC131:
1079                 val = MAC_PHYCFG2_AC131_LED_MODES;
1080                 break;
1081         case PHY_ID_RTL8211C:
1082                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1083                 break;
1084         case PHY_ID_RTL8201E:
1085                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1086                 break;
1087         default:
1088                 return;
1089         }
1090
1091         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1092                 tw32(MAC_PHYCFG2, val);
1093
1094                 val = tr32(MAC_PHYCFG1);
1095                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1096                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1097                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1098                 tw32(MAC_PHYCFG1, val);
1099
1100                 return;
1101         }
1102
1103         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1104                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1105                        MAC_PHYCFG2_FMODE_MASK_MASK |
1106                        MAC_PHYCFG2_GMODE_MASK_MASK |
1107                        MAC_PHYCFG2_ACT_MASK_MASK   |
1108                        MAC_PHYCFG2_QUAL_MASK_MASK |
1109                        MAC_PHYCFG2_INBAND_ENABLE;
1110
1111         tw32(MAC_PHYCFG2, val);
1112
1113         val = tr32(MAC_PHYCFG1);
1114         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1115                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1116         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1117                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1118                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1119                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1120                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1121         }
1122         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1123                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1124         tw32(MAC_PHYCFG1, val);
1125
1126         val = tr32(MAC_EXT_RGMII_MODE);
1127         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1128                  MAC_RGMII_MODE_RX_QUALITY |
1129                  MAC_RGMII_MODE_RX_ACTIVITY |
1130                  MAC_RGMII_MODE_RX_ENG_DET |
1131                  MAC_RGMII_MODE_TX_ENABLE |
1132                  MAC_RGMII_MODE_TX_LOWPWR |
1133                  MAC_RGMII_MODE_TX_RESET);
1134         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1135                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1136                         val |= MAC_RGMII_MODE_RX_INT_B |
1137                                MAC_RGMII_MODE_RX_QUALITY |
1138                                MAC_RGMII_MODE_RX_ACTIVITY |
1139                                MAC_RGMII_MODE_RX_ENG_DET;
1140                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1141                         val |= MAC_RGMII_MODE_TX_ENABLE |
1142                                MAC_RGMII_MODE_TX_LOWPWR |
1143                                MAC_RGMII_MODE_TX_RESET;
1144         }
1145         tw32(MAC_EXT_RGMII_MODE, val);
1146 }
1147
1148 static void tg3_mdio_start(struct tg3 *tp)
1149 {
1150         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1151         tw32_f(MAC_MI_MODE, tp->mi_mode);
1152         udelay(80);
1153
1154         if (tg3_flag(tp, MDIOBUS_INITED) &&
1155             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1156                 tg3_mdio_config_5785(tp);
1157 }
1158
1159 static int tg3_mdio_init(struct tg3 *tp)
1160 {
1161         int i;
1162         u32 reg;
1163         struct phy_device *phydev;
1164
1165         if (tg3_flag(tp, 5717_PLUS)) {
1166                 u32 is_serdes;
1167
1168                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1169
1170                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1171                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1172                 else
1173                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1174                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1175                 if (is_serdes)
1176                         tp->phy_addr += 7;
1177         } else
1178                 tp->phy_addr = TG3_PHY_MII_ADDR;
1179
1180         tg3_mdio_start(tp);
1181
1182         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1183                 return 0;
1184
1185         tp->mdio_bus = mdiobus_alloc();
1186         if (tp->mdio_bus == NULL)
1187                 return -ENOMEM;
1188
1189         tp->mdio_bus->name     = "tg3 mdio bus";
1190         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1191                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1192         tp->mdio_bus->priv     = tp;
1193         tp->mdio_bus->parent   = &tp->pdev->dev;
1194         tp->mdio_bus->read     = &tg3_mdio_read;
1195         tp->mdio_bus->write    = &tg3_mdio_write;
1196         tp->mdio_bus->reset    = &tg3_mdio_reset;
1197         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1198         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1199
1200         for (i = 0; i < PHY_MAX_ADDR; i++)
1201                 tp->mdio_bus->irq[i] = PHY_POLL;
1202
1203         /* The bus registration will look for all the PHYs on the mdio bus.
1204          * Unfortunately, it does not ensure the PHY is powered up before
1205          * accessing the PHY ID registers.  A chip reset is the
1206          * quickest way to bring the device back to an operational state..
1207          */
1208         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1209                 tg3_bmcr_reset(tp);
1210
1211         i = mdiobus_register(tp->mdio_bus);
1212         if (i) {
1213                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1214                 mdiobus_free(tp->mdio_bus);
1215                 return i;
1216         }
1217
1218         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1219
1220         if (!phydev || !phydev->drv) {
1221                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1222                 mdiobus_unregister(tp->mdio_bus);
1223                 mdiobus_free(tp->mdio_bus);
1224                 return -ENODEV;
1225         }
1226
1227         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1228         case PHY_ID_BCM57780:
1229                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1230                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1231                 break;
1232         case PHY_ID_BCM50610:
1233         case PHY_ID_BCM50610M:
1234                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1235                                      PHY_BRCM_RX_REFCLK_UNUSED |
1236                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1237                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1238                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1239                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1240                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1241                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1242                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1243                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1244                 /* fallthru */
1245         case PHY_ID_RTL8211C:
1246                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1247                 break;
1248         case PHY_ID_RTL8201E:
1249         case PHY_ID_BCMAC131:
1250                 phydev->interface = PHY_INTERFACE_MODE_MII;
1251                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1252                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1253                 break;
1254         }
1255
1256         tg3_flag_set(tp, MDIOBUS_INITED);
1257
1258         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1259                 tg3_mdio_config_5785(tp);
1260
1261         return 0;
1262 }
1263
1264 static void tg3_mdio_fini(struct tg3 *tp)
1265 {
1266         if (tg3_flag(tp, MDIOBUS_INITED)) {
1267                 tg3_flag_clear(tp, MDIOBUS_INITED);
1268                 mdiobus_unregister(tp->mdio_bus);
1269                 mdiobus_free(tp->mdio_bus);
1270         }
1271 }
1272
1273 /* tp->lock is held. */
1274 static inline void tg3_generate_fw_event(struct tg3 *tp)
1275 {
1276         u32 val;
1277
1278         val = tr32(GRC_RX_CPU_EVENT);
1279         val |= GRC_RX_CPU_DRIVER_EVENT;
1280         tw32_f(GRC_RX_CPU_EVENT, val);
1281
1282         tp->last_event_jiffies = jiffies;
1283 }
1284
1285 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1286
1287 /* tp->lock is held. */
1288 static void tg3_wait_for_event_ack(struct tg3 *tp)
1289 {
1290         int i;
1291         unsigned int delay_cnt;
1292         long time_remain;
1293
1294         /* If enough time has passed, no wait is necessary. */
1295         time_remain = (long)(tp->last_event_jiffies + 1 +
1296                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1297                       (long)jiffies;
1298         if (time_remain < 0)
1299                 return;
1300
1301         /* Check if we can shorten the wait time. */
1302         delay_cnt = jiffies_to_usecs(time_remain);
1303         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1304                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1305         delay_cnt = (delay_cnt >> 3) + 1;
1306
1307         for (i = 0; i < delay_cnt; i++) {
1308                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1309                         break;
1310                 udelay(8);
1311         }
1312 }
1313
1314 /* tp->lock is held. */
1315 static void tg3_ump_link_report(struct tg3 *tp)
1316 {
1317         u32 reg;
1318         u32 val;
1319
1320         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1321                 return;
1322
1323         tg3_wait_for_event_ack(tp);
1324
1325         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1326
1327         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1328
1329         val = 0;
1330         if (!tg3_readphy(tp, MII_BMCR, &reg))
1331                 val = reg << 16;
1332         if (!tg3_readphy(tp, MII_BMSR, &reg))
1333                 val |= (reg & 0xffff);
1334         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1335
1336         val = 0;
1337         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1338                 val = reg << 16;
1339         if (!tg3_readphy(tp, MII_LPA, &reg))
1340                 val |= (reg & 0xffff);
1341         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1342
1343         val = 0;
1344         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1345                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1346                         val = reg << 16;
1347                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1348                         val |= (reg & 0xffff);
1349         }
1350         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1351
1352         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1353                 val = reg << 16;
1354         else
1355                 val = 0;
1356         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1357
1358         tg3_generate_fw_event(tp);
1359 }
1360
1361 static void tg3_link_report(struct tg3 *tp)
1362 {
1363         if (!netif_carrier_ok(tp->dev)) {
1364                 netif_info(tp, link, tp->dev, "Link is down\n");
1365                 tg3_ump_link_report(tp);
1366         } else if (netif_msg_link(tp)) {
1367                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1368                             (tp->link_config.active_speed == SPEED_1000 ?
1369                              1000 :
1370                              (tp->link_config.active_speed == SPEED_100 ?
1371                               100 : 10)),
1372                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1373                              "full" : "half"));
1374
1375                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1376                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1377                             "on" : "off",
1378                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1379                             "on" : "off");
1380
1381                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1382                         netdev_info(tp->dev, "EEE is %s\n",
1383                                     tp->setlpicnt ? "enabled" : "disabled");
1384
1385                 tg3_ump_link_report(tp);
1386         }
1387 }
1388
1389 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1390 {
1391         u16 miireg;
1392
1393         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1394                 miireg = ADVERTISE_PAUSE_CAP;
1395         else if (flow_ctrl & FLOW_CTRL_TX)
1396                 miireg = ADVERTISE_PAUSE_ASYM;
1397         else if (flow_ctrl & FLOW_CTRL_RX)
1398                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1399         else
1400                 miireg = 0;
1401
1402         return miireg;
1403 }
1404
1405 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1406 {
1407         u16 miireg;
1408
1409         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1410                 miireg = ADVERTISE_1000XPAUSE;
1411         else if (flow_ctrl & FLOW_CTRL_TX)
1412                 miireg = ADVERTISE_1000XPSE_ASYM;
1413         else if (flow_ctrl & FLOW_CTRL_RX)
1414                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1415         else
1416                 miireg = 0;
1417
1418         return miireg;
1419 }
1420
1421 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1422 {
1423         u8 cap = 0;
1424
1425         if (lcladv & ADVERTISE_1000XPAUSE) {
1426                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1427                         if (rmtadv & LPA_1000XPAUSE)
1428                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1429                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1430                                 cap = FLOW_CTRL_RX;
1431                 } else {
1432                         if (rmtadv & LPA_1000XPAUSE)
1433                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1434                 }
1435         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1436                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1437                         cap = FLOW_CTRL_TX;
1438         }
1439
1440         return cap;
1441 }
1442
1443 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1444 {
1445         u8 autoneg;
1446         u8 flowctrl = 0;
1447         u32 old_rx_mode = tp->rx_mode;
1448         u32 old_tx_mode = tp->tx_mode;
1449
1450         if (tg3_flag(tp, USE_PHYLIB))
1451                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1452         else
1453                 autoneg = tp->link_config.autoneg;
1454
1455         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1456                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1457                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1458                 else
1459                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1460         } else
1461                 flowctrl = tp->link_config.flowctrl;
1462
1463         tp->link_config.active_flowctrl = flowctrl;
1464
1465         if (flowctrl & FLOW_CTRL_RX)
1466                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1467         else
1468                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1469
1470         if (old_rx_mode != tp->rx_mode)
1471                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1472
1473         if (flowctrl & FLOW_CTRL_TX)
1474                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1475         else
1476                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1477
1478         if (old_tx_mode != tp->tx_mode)
1479                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1480 }
1481
1482 static void tg3_adjust_link(struct net_device *dev)
1483 {
1484         u8 oldflowctrl, linkmesg = 0;
1485         u32 mac_mode, lcl_adv, rmt_adv;
1486         struct tg3 *tp = netdev_priv(dev);
1487         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1488
1489         spin_lock_bh(&tp->lock);
1490
1491         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1492                                     MAC_MODE_HALF_DUPLEX);
1493
1494         oldflowctrl = tp->link_config.active_flowctrl;
1495
1496         if (phydev->link) {
1497                 lcl_adv = 0;
1498                 rmt_adv = 0;
1499
1500                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1501                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1502                 else if (phydev->speed == SPEED_1000 ||
1503                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1504                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1505                 else
1506                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1507
1508                 if (phydev->duplex == DUPLEX_HALF)
1509                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1510                 else {
1511                         lcl_adv = tg3_advert_flowctrl_1000T(
1512                                   tp->link_config.flowctrl);
1513
1514                         if (phydev->pause)
1515                                 rmt_adv = LPA_PAUSE_CAP;
1516                         if (phydev->asym_pause)
1517                                 rmt_adv |= LPA_PAUSE_ASYM;
1518                 }
1519
1520                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1521         } else
1522                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1523
1524         if (mac_mode != tp->mac_mode) {
1525                 tp->mac_mode = mac_mode;
1526                 tw32_f(MAC_MODE, tp->mac_mode);
1527                 udelay(40);
1528         }
1529
1530         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1531                 if (phydev->speed == SPEED_10)
1532                         tw32(MAC_MI_STAT,
1533                              MAC_MI_STAT_10MBPS_MODE |
1534                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1535                 else
1536                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1537         }
1538
1539         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1540                 tw32(MAC_TX_LENGTHS,
1541                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1542                       (6 << TX_LENGTHS_IPG_SHIFT) |
1543                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1544         else
1545                 tw32(MAC_TX_LENGTHS,
1546                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547                       (6 << TX_LENGTHS_IPG_SHIFT) |
1548                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1549
1550         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1551             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1552             phydev->speed != tp->link_config.active_speed ||
1553             phydev->duplex != tp->link_config.active_duplex ||
1554             oldflowctrl != tp->link_config.active_flowctrl)
1555                 linkmesg = 1;
1556
1557         tp->link_config.active_speed = phydev->speed;
1558         tp->link_config.active_duplex = phydev->duplex;
1559
1560         spin_unlock_bh(&tp->lock);
1561
1562         if (linkmesg)
1563                 tg3_link_report(tp);
1564 }
1565
1566 static int tg3_phy_init(struct tg3 *tp)
1567 {
1568         struct phy_device *phydev;
1569
1570         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1571                 return 0;
1572
1573         /* Bring the PHY back to a known state. */
1574         tg3_bmcr_reset(tp);
1575
1576         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1577
1578         /* Attach the MAC to the PHY. */
1579         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1580                              phydev->dev_flags, phydev->interface);
1581         if (IS_ERR(phydev)) {
1582                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1583                 return PTR_ERR(phydev);
1584         }
1585
1586         /* Mask with MAC supported features. */
1587         switch (phydev->interface) {
1588         case PHY_INTERFACE_MODE_GMII:
1589         case PHY_INTERFACE_MODE_RGMII:
1590                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1591                         phydev->supported &= (PHY_GBIT_FEATURES |
1592                                               SUPPORTED_Pause |
1593                                               SUPPORTED_Asym_Pause);
1594                         break;
1595                 }
1596                 /* fallthru */
1597         case PHY_INTERFACE_MODE_MII:
1598                 phydev->supported &= (PHY_BASIC_FEATURES |
1599                                       SUPPORTED_Pause |
1600                                       SUPPORTED_Asym_Pause);
1601                 break;
1602         default:
1603                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1604                 return -EINVAL;
1605         }
1606
1607         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1608
1609         phydev->advertising = phydev->supported;
1610
1611         return 0;
1612 }
1613
1614 static void tg3_phy_start(struct tg3 *tp)
1615 {
1616         struct phy_device *phydev;
1617
1618         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1619                 return;
1620
1621         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1622
1623         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1624                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1625                 phydev->speed = tp->link_config.orig_speed;
1626                 phydev->duplex = tp->link_config.orig_duplex;
1627                 phydev->autoneg = tp->link_config.orig_autoneg;
1628                 phydev->advertising = tp->link_config.orig_advertising;
1629         }
1630
1631         phy_start(phydev);
1632
1633         phy_start_aneg(phydev);
1634 }
1635
1636 static void tg3_phy_stop(struct tg3 *tp)
1637 {
1638         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1639                 return;
1640
1641         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1642 }
1643
1644 static void tg3_phy_fini(struct tg3 *tp)
1645 {
1646         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1647                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1648                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1649         }
1650 }
1651
1652 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1653 {
1654         u32 phytest;
1655
1656         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1657                 u32 phy;
1658
1659                 tg3_writephy(tp, MII_TG3_FET_TEST,
1660                              phytest | MII_TG3_FET_SHADOW_EN);
1661                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1662                         if (enable)
1663                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1664                         else
1665                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1666                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1667                 }
1668                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1669         }
1670 }
1671
1672 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1673 {
1674         u32 reg;
1675
1676         if (!tg3_flag(tp, 5705_PLUS) ||
1677             (tg3_flag(tp, 5717_PLUS) &&
1678              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1679                 return;
1680
1681         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1682                 tg3_phy_fet_toggle_apd(tp, enable);
1683                 return;
1684         }
1685
1686         reg = MII_TG3_MISC_SHDW_WREN |
1687               MII_TG3_MISC_SHDW_SCR5_SEL |
1688               MII_TG3_MISC_SHDW_SCR5_LPED |
1689               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1690               MII_TG3_MISC_SHDW_SCR5_SDTL |
1691               MII_TG3_MISC_SHDW_SCR5_C125OE;
1692         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1693                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1694
1695         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1696
1697
1698         reg = MII_TG3_MISC_SHDW_WREN |
1699               MII_TG3_MISC_SHDW_APD_SEL |
1700               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1701         if (enable)
1702                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1703
1704         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1705 }
1706
1707 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1708 {
1709         u32 phy;
1710
1711         if (!tg3_flag(tp, 5705_PLUS) ||
1712             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1713                 return;
1714
1715         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1716                 u32 ephy;
1717
1718                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1719                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1720
1721                         tg3_writephy(tp, MII_TG3_FET_TEST,
1722                                      ephy | MII_TG3_FET_SHADOW_EN);
1723                         if (!tg3_readphy(tp, reg, &phy)) {
1724                                 if (enable)
1725                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1726                                 else
1727                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1728                                 tg3_writephy(tp, reg, phy);
1729                         }
1730                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1731                 }
1732         } else {
1733                 int ret;
1734
1735                 ret = tg3_phy_auxctl_read(tp,
1736                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1737                 if (!ret) {
1738                         if (enable)
1739                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1740                         else
1741                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1742                         tg3_phy_auxctl_write(tp,
1743                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1744                 }
1745         }
1746 }
1747
1748 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1749 {
1750         int ret;
1751         u32 val;
1752
1753         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1754                 return;
1755
1756         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1757         if (!ret)
1758                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1759                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1760 }
1761
1762 static void tg3_phy_apply_otp(struct tg3 *tp)
1763 {
1764         u32 otp, phy;
1765
1766         if (!tp->phy_otp)
1767                 return;
1768
1769         otp = tp->phy_otp;
1770
1771         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1772                 return;
1773
1774         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1775         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1776         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1777
1778         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1779               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1780         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1781
1782         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1783         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1784         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1785
1786         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1787         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1788
1789         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1790         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1791
1792         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1793               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1794         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1795
1796         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1797 }
1798
1799 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1800 {
1801         u32 val;
1802
1803         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1804                 return;
1805
1806         tp->setlpicnt = 0;
1807
1808         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1809             current_link_up == 1 &&
1810             tp->link_config.active_duplex == DUPLEX_FULL &&
1811             (tp->link_config.active_speed == SPEED_100 ||
1812              tp->link_config.active_speed == SPEED_1000)) {
1813                 u32 eeectl;
1814
1815                 if (tp->link_config.active_speed == SPEED_1000)
1816                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1817                 else
1818                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1819
1820                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1821
1822                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1823                                   TG3_CL45_D7_EEERES_STAT, &val);
1824
1825                 switch (val) {
1826                 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1827                         switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1828                         case ASIC_REV_5717:
1829                         case ASIC_REV_5719:
1830                         case ASIC_REV_57765:
1831                                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1832                                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26,
1833                                                          0x0000);
1834                                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1835                                 }
1836                         }
1837                         /* Fallthrough */
1838                 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1839                         tp->setlpicnt = 2;
1840                 }
1841         }
1842
1843         if (!tp->setlpicnt) {
1844                 val = tr32(TG3_CPMU_EEE_MODE);
1845                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1846         }
1847 }
1848
1849 static int tg3_wait_macro_done(struct tg3 *tp)
1850 {
1851         int limit = 100;
1852
1853         while (limit--) {
1854                 u32 tmp32;
1855
1856                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1857                         if ((tmp32 & 0x1000) == 0)
1858                                 break;
1859                 }
1860         }
1861         if (limit < 0)
1862                 return -EBUSY;
1863
1864         return 0;
1865 }
1866
1867 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1868 {
1869         static const u32 test_pat[4][6] = {
1870         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1871         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1872         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1873         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1874         };
1875         int chan;
1876
1877         for (chan = 0; chan < 4; chan++) {
1878                 int i;
1879
1880                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1881                              (chan * 0x2000) | 0x0200);
1882                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1883
1884                 for (i = 0; i < 6; i++)
1885                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1886                                      test_pat[chan][i]);
1887
1888                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1889                 if (tg3_wait_macro_done(tp)) {
1890                         *resetp = 1;
1891                         return -EBUSY;
1892                 }
1893
1894                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1895                              (chan * 0x2000) | 0x0200);
1896                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1897                 if (tg3_wait_macro_done(tp)) {
1898                         *resetp = 1;
1899                         return -EBUSY;
1900                 }
1901
1902                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1903                 if (tg3_wait_macro_done(tp)) {
1904                         *resetp = 1;
1905                         return -EBUSY;
1906                 }
1907
1908                 for (i = 0; i < 6; i += 2) {
1909                         u32 low, high;
1910
1911                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1912                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1913                             tg3_wait_macro_done(tp)) {
1914                                 *resetp = 1;
1915                                 return -EBUSY;
1916                         }
1917                         low &= 0x7fff;
1918                         high &= 0x000f;
1919                         if (low != test_pat[chan][i] ||
1920                             high != test_pat[chan][i+1]) {
1921                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1922                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1923                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1924
1925                                 return -EBUSY;
1926                         }
1927                 }
1928         }
1929
1930         return 0;
1931 }
1932
1933 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1934 {
1935         int chan;
1936
1937         for (chan = 0; chan < 4; chan++) {
1938                 int i;
1939
1940                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1941                              (chan * 0x2000) | 0x0200);
1942                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1943                 for (i = 0; i < 6; i++)
1944                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1945                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1946                 if (tg3_wait_macro_done(tp))
1947                         return -EBUSY;
1948         }
1949
1950         return 0;
1951 }
1952
1953 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1954 {
1955         u32 reg32, phy9_orig;
1956         int retries, do_phy_reset, err;
1957
1958         retries = 10;
1959         do_phy_reset = 1;
1960         do {
1961                 if (do_phy_reset) {
1962                         err = tg3_bmcr_reset(tp);
1963                         if (err)
1964                                 return err;
1965                         do_phy_reset = 0;
1966                 }
1967
1968                 /* Disable transmitter and interrupt.  */
1969                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1970                         continue;
1971
1972                 reg32 |= 0x3000;
1973                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1974
1975                 /* Set full-duplex, 1000 mbps.  */
1976                 tg3_writephy(tp, MII_BMCR,
1977                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1978
1979                 /* Set to master mode.  */
1980                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1981                         continue;
1982
1983                 tg3_writephy(tp, MII_TG3_CTRL,
1984                              (MII_TG3_CTRL_AS_MASTER |
1985                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1986
1987                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1988                 if (err)
1989                         return err;
1990
1991                 /* Block the PHY control access.  */
1992                 tg3_phydsp_write(tp, 0x8005, 0x0800);
1993
1994                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1995                 if (!err)
1996                         break;
1997         } while (--retries);
1998
1999         err = tg3_phy_reset_chanpat(tp);
2000         if (err)
2001                 return err;
2002
2003         tg3_phydsp_write(tp, 0x8005, 0x0000);
2004
2005         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2006         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2007
2008         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2009
2010         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2011
2012         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2013                 reg32 &= ~0x3000;
2014                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2015         } else if (!err)
2016                 err = -EBUSY;
2017
2018         return err;
2019 }
2020
2021 /* This will reset the tigon3 PHY if there is no valid
2022  * link unless the FORCE argument is non-zero.
2023  */
2024 static int tg3_phy_reset(struct tg3 *tp)
2025 {
2026         u32 val, cpmuctrl;
2027         int err;
2028
2029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2030                 val = tr32(GRC_MISC_CFG);
2031                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2032                 udelay(40);
2033         }
2034         err  = tg3_readphy(tp, MII_BMSR, &val);
2035         err |= tg3_readphy(tp, MII_BMSR, &val);
2036         if (err != 0)
2037                 return -EBUSY;
2038
2039         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2040                 netif_carrier_off(tp->dev);
2041                 tg3_link_report(tp);
2042         }
2043
2044         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2045             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2046             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2047                 err = tg3_phy_reset_5703_4_5(tp);
2048                 if (err)
2049                         return err;
2050                 goto out;
2051         }
2052
2053         cpmuctrl = 0;
2054         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2055             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2056                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2057                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2058                         tw32(TG3_CPMU_CTRL,
2059                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2060         }
2061
2062         err = tg3_bmcr_reset(tp);
2063         if (err)
2064                 return err;
2065
2066         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2067                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2068                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2069
2070                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2071         }
2072
2073         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2074             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2075                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2076                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2077                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2078                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2079                         udelay(40);
2080                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2081                 }
2082         }
2083
2084         if (tg3_flag(tp, 5717_PLUS) &&
2085             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2086                 return 0;
2087
2088         tg3_phy_apply_otp(tp);
2089
2090         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2091                 tg3_phy_toggle_apd(tp, true);
2092         else
2093                 tg3_phy_toggle_apd(tp, false);
2094
2095 out:
2096         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2097             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2098                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2099                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2100                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2101         }
2102
2103         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2104                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2105                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2106         }
2107
2108         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2109                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2110                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2111                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2112                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2113                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2114                 }
2115         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2116                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2117                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2118                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2119                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2120                                 tg3_writephy(tp, MII_TG3_TEST1,
2121                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2122                         } else
2123                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2124
2125                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2126                 }
2127         }
2128
2129         /* Set Extended packet length bit (bit 14) on all chips that */
2130         /* support jumbo frames */
2131         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2132                 /* Cannot do read-modify-write on 5401 */
2133                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2134         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2135                 /* Set bit 14 with read-modify-write to preserve other bits */
2136                 err = tg3_phy_auxctl_read(tp,
2137                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2138                 if (!err)
2139                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2140                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2141         }
2142
2143         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2144          * jumbo frames transmission.
2145          */
2146         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2147                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2148                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2149                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2150         }
2151
2152         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2153                 /* adjust output voltage */
2154                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2155         }
2156
2157         tg3_phy_toggle_automdix(tp, 1);
2158         tg3_phy_set_wirespeed(tp);
2159         return 0;
2160 }
2161
2162 static void tg3_frob_aux_power(struct tg3 *tp)
2163 {
2164         bool need_vaux = false;
2165
2166         /* The GPIOs do something completely different on 57765. */
2167         if (!tg3_flag(tp, IS_NIC) ||
2168             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2169             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2170                 return;
2171
2172         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2173              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2174              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2175              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2176             tp->pdev_peer != tp->pdev) {
2177                 struct net_device *dev_peer;
2178
2179                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2180
2181                 /* remove_one() may have been run on the peer. */
2182                 if (dev_peer) {
2183                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2184
2185                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2186                                 return;
2187
2188                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2189                             tg3_flag(tp_peer, ENABLE_ASF))
2190                                 need_vaux = true;
2191                 }
2192         }
2193
2194         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2195                 need_vaux = true;
2196
2197         if (need_vaux) {
2198                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2199                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2200                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2201                                     (GRC_LCLCTRL_GPIO_OE0 |
2202                                      GRC_LCLCTRL_GPIO_OE1 |
2203                                      GRC_LCLCTRL_GPIO_OE2 |
2204                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2205                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2206                                     100);
2207                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2208                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2209                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2210                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2211                                              GRC_LCLCTRL_GPIO_OE1 |
2212                                              GRC_LCLCTRL_GPIO_OE2 |
2213                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2214                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2215                                              tp->grc_local_ctrl;
2216                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2217
2218                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2219                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2220
2221                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2222                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2223                 } else {
2224                         u32 no_gpio2;
2225                         u32 grc_local_ctrl = 0;
2226
2227                         /* Workaround to prevent overdrawing Amps. */
2228                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2229                             ASIC_REV_5714) {
2230                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2231                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2232                                             grc_local_ctrl, 100);
2233                         }
2234
2235                         /* On 5753 and variants, GPIO2 cannot be used. */
2236                         no_gpio2 = tp->nic_sram_data_cfg &
2237                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2238
2239                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2240                                          GRC_LCLCTRL_GPIO_OE1 |
2241                                          GRC_LCLCTRL_GPIO_OE2 |
2242                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2243                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2244                         if (no_gpio2) {
2245                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2246                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2247                         }
2248                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2249                                                     grc_local_ctrl, 100);
2250
2251                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2252
2253                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2254                                                     grc_local_ctrl, 100);
2255
2256                         if (!no_gpio2) {
2257                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2258                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2259                                             grc_local_ctrl, 100);
2260                         }
2261                 }
2262         } else {
2263                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2264                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2265                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2266                                     (GRC_LCLCTRL_GPIO_OE1 |
2267                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2268
2269                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2270                                     GRC_LCLCTRL_GPIO_OE1, 100);
2271
2272                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2273                                     (GRC_LCLCTRL_GPIO_OE1 |
2274                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2275                 }
2276         }
2277 }
2278
2279 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2280 {
2281         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2282                 return 1;
2283         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2284                 if (speed != SPEED_10)
2285                         return 1;
2286         } else if (speed == SPEED_10)
2287                 return 1;
2288
2289         return 0;
2290 }
2291
2292 static int tg3_setup_phy(struct tg3 *, int);
2293
2294 #define RESET_KIND_SHUTDOWN     0
2295 #define RESET_KIND_INIT         1
2296 #define RESET_KIND_SUSPEND      2
2297
2298 static void tg3_write_sig_post_reset(struct tg3 *, int);
2299 static int tg3_halt_cpu(struct tg3 *, u32);
2300
2301 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2302 {
2303         u32 val;
2304
2305         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2306                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2307                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2308                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2309
2310                         sg_dig_ctrl |=
2311                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2312                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2313                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2314                 }
2315                 return;
2316         }
2317
2318         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2319                 tg3_bmcr_reset(tp);
2320                 val = tr32(GRC_MISC_CFG);
2321                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2322                 udelay(40);
2323                 return;
2324         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2325                 u32 phytest;
2326                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2327                         u32 phy;
2328
2329                         tg3_writephy(tp, MII_ADVERTISE, 0);
2330                         tg3_writephy(tp, MII_BMCR,
2331                                      BMCR_ANENABLE | BMCR_ANRESTART);
2332
2333                         tg3_writephy(tp, MII_TG3_FET_TEST,
2334                                      phytest | MII_TG3_FET_SHADOW_EN);
2335                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2336                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2337                                 tg3_writephy(tp,
2338                                              MII_TG3_FET_SHDW_AUXMODE4,
2339                                              phy);
2340                         }
2341                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2342                 }
2343                 return;
2344         } else if (do_low_power) {
2345                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2346                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2347
2348                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2349                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2350                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2351                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2352         }
2353
2354         /* The PHY should not be powered down on some chips because
2355          * of bugs.
2356          */
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2358             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2360              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2361                 return;
2362
2363         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2364             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2365                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2366                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2367                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2368                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2369         }
2370
2371         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2372 }
2373
2374 /* tp->lock is held. */
2375 static int tg3_nvram_lock(struct tg3 *tp)
2376 {
2377         if (tg3_flag(tp, NVRAM)) {
2378                 int i;
2379
2380                 if (tp->nvram_lock_cnt == 0) {
2381                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2382                         for (i = 0; i < 8000; i++) {
2383                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2384                                         break;
2385                                 udelay(20);
2386                         }
2387                         if (i == 8000) {
2388                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2389                                 return -ENODEV;
2390                         }
2391                 }
2392                 tp->nvram_lock_cnt++;
2393         }
2394         return 0;
2395 }
2396
2397 /* tp->lock is held. */
2398 static void tg3_nvram_unlock(struct tg3 *tp)
2399 {
2400         if (tg3_flag(tp, NVRAM)) {
2401                 if (tp->nvram_lock_cnt > 0)
2402                         tp->nvram_lock_cnt--;
2403                 if (tp->nvram_lock_cnt == 0)
2404                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2405         }
2406 }
2407
2408 /* tp->lock is held. */
2409 static void tg3_enable_nvram_access(struct tg3 *tp)
2410 {
2411         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2412                 u32 nvaccess = tr32(NVRAM_ACCESS);
2413
2414                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2415         }
2416 }
2417
2418 /* tp->lock is held. */
2419 static void tg3_disable_nvram_access(struct tg3 *tp)
2420 {
2421         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2422                 u32 nvaccess = tr32(NVRAM_ACCESS);
2423
2424                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2425         }
2426 }
2427
2428 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2429                                         u32 offset, u32 *val)
2430 {
2431         u32 tmp;
2432         int i;
2433
2434         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2435                 return -EINVAL;
2436
2437         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2438                                         EEPROM_ADDR_DEVID_MASK |
2439                                         EEPROM_ADDR_READ);
2440         tw32(GRC_EEPROM_ADDR,
2441              tmp |
2442              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2443              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2444               EEPROM_ADDR_ADDR_MASK) |
2445              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2446
2447         for (i = 0; i < 1000; i++) {
2448                 tmp = tr32(GRC_EEPROM_ADDR);
2449
2450                 if (tmp & EEPROM_ADDR_COMPLETE)
2451                         break;
2452                 msleep(1);
2453         }
2454         if (!(tmp & EEPROM_ADDR_COMPLETE))
2455                 return -EBUSY;
2456
2457         tmp = tr32(GRC_EEPROM_DATA);
2458
2459         /*
2460          * The data will always be opposite the native endian
2461          * format.  Perform a blind byteswap to compensate.
2462          */
2463         *val = swab32(tmp);
2464
2465         return 0;
2466 }
2467
2468 #define NVRAM_CMD_TIMEOUT 10000
2469
2470 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2471 {
2472         int i;
2473
2474         tw32(NVRAM_CMD, nvram_cmd);
2475         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2476                 udelay(10);
2477                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2478                         udelay(10);
2479                         break;
2480                 }
2481         }
2482
2483         if (i == NVRAM_CMD_TIMEOUT)
2484                 return -EBUSY;
2485
2486         return 0;
2487 }
2488
2489 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2490 {
2491         if (tg3_flag(tp, NVRAM) &&
2492             tg3_flag(tp, NVRAM_BUFFERED) &&
2493             tg3_flag(tp, FLASH) &&
2494             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2495             (tp->nvram_jedecnum == JEDEC_ATMEL))
2496
2497                 addr = ((addr / tp->nvram_pagesize) <<
2498                         ATMEL_AT45DB0X1B_PAGE_POS) +
2499                        (addr % tp->nvram_pagesize);
2500
2501         return addr;
2502 }
2503
2504 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2505 {
2506         if (tg3_flag(tp, NVRAM) &&
2507             tg3_flag(tp, NVRAM_BUFFERED) &&
2508             tg3_flag(tp, FLASH) &&
2509             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2510             (tp->nvram_jedecnum == JEDEC_ATMEL))
2511
2512                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2513                         tp->nvram_pagesize) +
2514                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2515
2516         return addr;
2517 }
2518
2519 /* NOTE: Data read in from NVRAM is byteswapped according to
2520  * the byteswapping settings for all other register accesses.
2521  * tg3 devices are BE devices, so on a BE machine, the data
2522  * returned will be exactly as it is seen in NVRAM.  On a LE
2523  * machine, the 32-bit value will be byteswapped.
2524  */
2525 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2526 {
2527         int ret;
2528
2529         if (!tg3_flag(tp, NVRAM))
2530                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2531
2532         offset = tg3_nvram_phys_addr(tp, offset);
2533
2534         if (offset > NVRAM_ADDR_MSK)
2535                 return -EINVAL;
2536
2537         ret = tg3_nvram_lock(tp);
2538         if (ret)
2539                 return ret;
2540
2541         tg3_enable_nvram_access(tp);
2542
2543         tw32(NVRAM_ADDR, offset);
2544         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2545                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2546
2547         if (ret == 0)
2548                 *val = tr32(NVRAM_RDDATA);
2549
2550         tg3_disable_nvram_access(tp);
2551
2552         tg3_nvram_unlock(tp);
2553
2554         return ret;
2555 }
2556
2557 /* Ensures NVRAM data is in bytestream format. */
2558 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2559 {
2560         u32 v;
2561         int res = tg3_nvram_read(tp, offset, &v);
2562         if (!res)
2563                 *val = cpu_to_be32(v);
2564         return res;
2565 }
2566
2567 /* tp->lock is held. */
2568 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2569 {
2570         u32 addr_high, addr_low;
2571         int i;
2572
2573         addr_high = ((tp->dev->dev_addr[0] << 8) |
2574                      tp->dev->dev_addr[1]);
2575         addr_low = ((tp->dev->dev_addr[2] << 24) |
2576                     (tp->dev->dev_addr[3] << 16) |
2577                     (tp->dev->dev_addr[4] <<  8) |
2578                     (tp->dev->dev_addr[5] <<  0));
2579         for (i = 0; i < 4; i++) {
2580                 if (i == 1 && skip_mac_1)
2581                         continue;
2582                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2583                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2584         }
2585
2586         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2587             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2588                 for (i = 0; i < 12; i++) {
2589                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2590                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2591                 }
2592         }
2593
2594         addr_high = (tp->dev->dev_addr[0] +
2595                      tp->dev->dev_addr[1] +
2596                      tp->dev->dev_addr[2] +
2597                      tp->dev->dev_addr[3] +
2598                      tp->dev->dev_addr[4] +
2599                      tp->dev->dev_addr[5]) &
2600                 TX_BACKOFF_SEED_MASK;
2601         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2602 }
2603
2604 static void tg3_enable_register_access(struct tg3 *tp)
2605 {
2606         /*
2607          * Make sure register accesses (indirect or otherwise) will function
2608          * correctly.
2609          */
2610         pci_write_config_dword(tp->pdev,
2611                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2612 }
2613
2614 static int tg3_power_up(struct tg3 *tp)
2615 {
2616         tg3_enable_register_access(tp);
2617
2618         pci_set_power_state(tp->pdev, PCI_D0);
2619
2620         /* Switch out of Vaux if it is a NIC */
2621         if (tg3_flag(tp, IS_NIC))
2622                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2623
2624         return 0;
2625 }
2626
2627 static int tg3_power_down_prepare(struct tg3 *tp)
2628 {
2629         u32 misc_host_ctrl;
2630         bool device_should_wake, do_low_power;
2631
2632         tg3_enable_register_access(tp);
2633
2634         /* Restore the CLKREQ setting. */
2635         if (tg3_flag(tp, CLKREQ_BUG)) {
2636                 u16 lnkctl;
2637
2638                 pci_read_config_word(tp->pdev,
2639                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2640                                      &lnkctl);
2641                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2642                 pci_write_config_word(tp->pdev,
2643                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2644                                       lnkctl);
2645         }
2646
2647         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2648         tw32(TG3PCI_MISC_HOST_CTRL,
2649              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2650
2651         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2652                              tg3_flag(tp, WOL_ENABLE);
2653
2654         if (tg3_flag(tp, USE_PHYLIB)) {
2655                 do_low_power = false;
2656                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2657                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2658                         struct phy_device *phydev;
2659                         u32 phyid, advertising;
2660
2661                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2662
2663                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2664
2665                         tp->link_config.orig_speed = phydev->speed;
2666                         tp->link_config.orig_duplex = phydev->duplex;
2667                         tp->link_config.orig_autoneg = phydev->autoneg;
2668                         tp->link_config.orig_advertising = phydev->advertising;
2669
2670                         advertising = ADVERTISED_TP |
2671                                       ADVERTISED_Pause |
2672                                       ADVERTISED_Autoneg |
2673                                       ADVERTISED_10baseT_Half;
2674
2675                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2676                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2677                                         advertising |=
2678                                                 ADVERTISED_100baseT_Half |
2679                                                 ADVERTISED_100baseT_Full |
2680                                                 ADVERTISED_10baseT_Full;
2681                                 else
2682                                         advertising |= ADVERTISED_10baseT_Full;
2683                         }
2684
2685                         phydev->advertising = advertising;
2686
2687                         phy_start_aneg(phydev);
2688
2689                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2690                         if (phyid != PHY_ID_BCMAC131) {
2691                                 phyid &= PHY_BCM_OUI_MASK;
2692                                 if (phyid == PHY_BCM_OUI_1 ||
2693                                     phyid == PHY_BCM_OUI_2 ||
2694                                     phyid == PHY_BCM_OUI_3)
2695                                         do_low_power = true;
2696                         }
2697                 }
2698         } else {
2699                 do_low_power = true;
2700
2701                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2702                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2703                         tp->link_config.orig_speed = tp->link_config.speed;
2704                         tp->link_config.orig_duplex = tp->link_config.duplex;
2705                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2706                 }
2707
2708                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2709                         tp->link_config.speed = SPEED_10;
2710                         tp->link_config.duplex = DUPLEX_HALF;
2711                         tp->link_config.autoneg = AUTONEG_ENABLE;
2712                         tg3_setup_phy(tp, 0);
2713                 }
2714         }
2715
2716         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2717                 u32 val;
2718
2719                 val = tr32(GRC_VCPU_EXT_CTRL);
2720                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2721         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2722                 int i;
2723                 u32 val;
2724
2725                 for (i = 0; i < 200; i++) {
2726                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2727                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2728                                 break;
2729                         msleep(1);
2730                 }
2731         }
2732         if (tg3_flag(tp, WOL_CAP))
2733                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2734                                                      WOL_DRV_STATE_SHUTDOWN |
2735                                                      WOL_DRV_WOL |
2736                                                      WOL_SET_MAGIC_PKT);
2737
2738         if (device_should_wake) {
2739                 u32 mac_mode;
2740
2741                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2742                         if (do_low_power &&
2743                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2744                                 tg3_phy_auxctl_write(tp,
2745                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2746                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2747                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2748                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2749                                 udelay(40);
2750                         }
2751
2752                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2753                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2754                         else
2755                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2756
2757                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2758                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2759                             ASIC_REV_5700) {
2760                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2761                                              SPEED_100 : SPEED_10;
2762                                 if (tg3_5700_link_polarity(tp, speed))
2763                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2764                                 else
2765                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2766                         }
2767                 } else {
2768                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2769                 }
2770
2771                 if (!tg3_flag(tp, 5750_PLUS))
2772                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2773
2774                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2775                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2776                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2777                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2778
2779                 if (tg3_flag(tp, ENABLE_APE))
2780                         mac_mode |= MAC_MODE_APE_TX_EN |
2781                                     MAC_MODE_APE_RX_EN |
2782                                     MAC_MODE_TDE_ENABLE;
2783
2784                 tw32_f(MAC_MODE, mac_mode);
2785                 udelay(100);
2786
2787                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2788                 udelay(10);
2789         }
2790
2791         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2792             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2793              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2794                 u32 base_val;
2795
2796                 base_val = tp->pci_clock_ctrl;
2797                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2798                              CLOCK_CTRL_TXCLK_DISABLE);
2799
2800                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2801                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2802         } else if (tg3_flag(tp, 5780_CLASS) ||
2803                    tg3_flag(tp, CPMU_PRESENT) ||
2804                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2805                 /* do nothing */
2806         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2807                 u32 newbits1, newbits2;
2808
2809                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2810                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2811                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2812                                     CLOCK_CTRL_TXCLK_DISABLE |
2813                                     CLOCK_CTRL_ALTCLK);
2814                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2815                 } else if (tg3_flag(tp, 5705_PLUS)) {
2816                         newbits1 = CLOCK_CTRL_625_CORE;
2817                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2818                 } else {
2819                         newbits1 = CLOCK_CTRL_ALTCLK;
2820                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2821                 }
2822
2823                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2824                             40);
2825
2826                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2827                             40);
2828
2829                 if (!tg3_flag(tp, 5705_PLUS)) {
2830                         u32 newbits3;
2831
2832                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2833                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2834                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2835                                             CLOCK_CTRL_TXCLK_DISABLE |
2836                                             CLOCK_CTRL_44MHZ_CORE);
2837                         } else {
2838                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2839                         }
2840
2841                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2842                                     tp->pci_clock_ctrl | newbits3, 40);
2843                 }
2844         }
2845
2846         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2847                 tg3_power_down_phy(tp, do_low_power);
2848
2849         tg3_frob_aux_power(tp);
2850
2851         /* Workaround for unstable PLL clock */
2852         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2853             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2854                 u32 val = tr32(0x7d00);
2855
2856                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2857                 tw32(0x7d00, val);
2858                 if (!tg3_flag(tp, ENABLE_ASF)) {
2859                         int err;
2860
2861                         err = tg3_nvram_lock(tp);
2862                         tg3_halt_cpu(tp, RX_CPU_BASE);
2863                         if (!err)
2864                                 tg3_nvram_unlock(tp);
2865                 }
2866         }
2867
2868         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2869
2870         return 0;
2871 }
2872
2873 static void tg3_power_down(struct tg3 *tp)
2874 {
2875         tg3_power_down_prepare(tp);
2876
2877         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2878         pci_set_power_state(tp->pdev, PCI_D3hot);
2879 }
2880
2881 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2882 {
2883         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2884         case MII_TG3_AUX_STAT_10HALF:
2885                 *speed = SPEED_10;
2886                 *duplex = DUPLEX_HALF;
2887                 break;
2888
2889         case MII_TG3_AUX_STAT_10FULL:
2890                 *speed = SPEED_10;
2891                 *duplex = DUPLEX_FULL;
2892                 break;
2893
2894         case MII_TG3_AUX_STAT_100HALF:
2895                 *speed = SPEED_100;
2896                 *duplex = DUPLEX_HALF;
2897                 break;
2898
2899         case MII_TG3_AUX_STAT_100FULL:
2900                 *speed = SPEED_100;
2901                 *duplex = DUPLEX_FULL;
2902                 break;
2903
2904         case MII_TG3_AUX_STAT_1000HALF:
2905                 *speed = SPEED_1000;
2906                 *duplex = DUPLEX_HALF;
2907                 break;
2908
2909         case MII_TG3_AUX_STAT_1000FULL:
2910                 *speed = SPEED_1000;
2911                 *duplex = DUPLEX_FULL;
2912                 break;
2913
2914         default:
2915                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2916                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2917                                  SPEED_10;
2918                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2919                                   DUPLEX_HALF;
2920                         break;
2921                 }
2922                 *speed = SPEED_INVALID;
2923                 *duplex = DUPLEX_INVALID;
2924                 break;
2925         }
2926 }
2927
2928 static void tg3_phy_copper_begin(struct tg3 *tp)
2929 {
2930         u32 new_adv;
2931         int i;
2932
2933         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2934                 /* Entering low power mode.  Disable gigabit and
2935                  * 100baseT advertisements.
2936                  */
2937                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2938
2939                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2940                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2941                 if (tg3_flag(tp, WOL_SPEED_100MB))
2942                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2943
2944                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2945         } else if (tp->link_config.speed == SPEED_INVALID) {
2946                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2947                         tp->link_config.advertising &=
2948                                 ~(ADVERTISED_1000baseT_Half |
2949                                   ADVERTISED_1000baseT_Full);
2950
2951                 new_adv = ADVERTISE_CSMA;
2952                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2953                         new_adv |= ADVERTISE_10HALF;
2954                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2955                         new_adv |= ADVERTISE_10FULL;
2956                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2957                         new_adv |= ADVERTISE_100HALF;
2958                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2959                         new_adv |= ADVERTISE_100FULL;
2960
2961                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2962
2963                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2964
2965                 if (tp->link_config.advertising &
2966                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2967                         new_adv = 0;
2968                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2969                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2970                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2971                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2972                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2973                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2974                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2975                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2976                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2977                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2978                 } else {
2979                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2980                 }
2981         } else {
2982                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2983                 new_adv |= ADVERTISE_CSMA;
2984
2985                 /* Asking for a specific link mode. */
2986                 if (tp->link_config.speed == SPEED_1000) {
2987                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2988
2989                         if (tp->link_config.duplex == DUPLEX_FULL)
2990                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2991                         else
2992                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2993                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2994                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2995                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2996                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2997                 } else {
2998                         if (tp->link_config.speed == SPEED_100) {
2999                                 if (tp->link_config.duplex == DUPLEX_FULL)
3000                                         new_adv |= ADVERTISE_100FULL;
3001                                 else
3002                                         new_adv |= ADVERTISE_100HALF;
3003                         } else {
3004                                 if (tp->link_config.duplex == DUPLEX_FULL)
3005                                         new_adv |= ADVERTISE_10FULL;
3006                                 else
3007                                         new_adv |= ADVERTISE_10HALF;
3008                         }
3009                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3010
3011                         new_adv = 0;
3012                 }
3013
3014                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
3015         }
3016
3017         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
3018                 u32 val;
3019
3020                 tw32(TG3_CPMU_EEE_MODE,
3021                      tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3022
3023                 TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3024
3025                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3026                 case ASIC_REV_5717:
3027                 case ASIC_REV_57765:
3028                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3029                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3030                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3031                         /* Fall through */
3032                 case ASIC_REV_5719:
3033                         val = MII_TG3_DSP_TAP26_ALNOKO |
3034                               MII_TG3_DSP_TAP26_RMRXSTO |
3035                               MII_TG3_DSP_TAP26_OPCSINPT;
3036                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3037                 }
3038
3039                 val = 0;
3040                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3041                         /* Advertise 100-BaseTX EEE ability */
3042                         if (tp->link_config.advertising &
3043                             ADVERTISED_100baseT_Full)
3044                                 val |= MDIO_AN_EEE_ADV_100TX;
3045                         /* Advertise 1000-BaseT EEE ability */
3046                         if (tp->link_config.advertising &
3047                             ADVERTISED_1000baseT_Full)
3048                                 val |= MDIO_AN_EEE_ADV_1000T;
3049                 }
3050                 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3051
3052                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3053         }
3054
3055         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3056             tp->link_config.speed != SPEED_INVALID) {
3057                 u32 bmcr, orig_bmcr;
3058
3059                 tp->link_config.active_speed = tp->link_config.speed;
3060                 tp->link_config.active_duplex = tp->link_config.duplex;
3061
3062                 bmcr = 0;
3063                 switch (tp->link_config.speed) {
3064                 default:
3065                 case SPEED_10:
3066                         break;
3067
3068                 case SPEED_100:
3069                         bmcr |= BMCR_SPEED100;
3070                         break;
3071
3072                 case SPEED_1000:
3073                         bmcr |= TG3_BMCR_SPEED1000;
3074                         break;
3075                 }
3076
3077                 if (tp->link_config.duplex == DUPLEX_FULL)
3078                         bmcr |= BMCR_FULLDPLX;
3079
3080                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3081                     (bmcr != orig_bmcr)) {
3082                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3083                         for (i = 0; i < 1500; i++) {
3084                                 u32 tmp;
3085
3086                                 udelay(10);
3087                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3088                                     tg3_readphy(tp, MII_BMSR, &tmp))
3089                                         continue;
3090                                 if (!(tmp & BMSR_LSTATUS)) {
3091                                         udelay(40);
3092                                         break;
3093                                 }
3094                         }
3095                         tg3_writephy(tp, MII_BMCR, bmcr);
3096                         udelay(40);
3097                 }
3098         } else {
3099                 tg3_writephy(tp, MII_BMCR,
3100                              BMCR_ANENABLE | BMCR_ANRESTART);
3101         }
3102 }
3103
3104 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3105 {
3106         int err;
3107
3108         /* Turn off tap power management. */
3109         /* Set Extended packet length bit */
3110         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3111
3112         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3113         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3114         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3115         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3116         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3117
3118         udelay(40);
3119
3120         return err;
3121 }
3122
3123 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3124 {
3125         u32 adv_reg, all_mask = 0;
3126
3127         if (mask & ADVERTISED_10baseT_Half)
3128                 all_mask |= ADVERTISE_10HALF;
3129         if (mask & ADVERTISED_10baseT_Full)
3130                 all_mask |= ADVERTISE_10FULL;
3131         if (mask & ADVERTISED_100baseT_Half)
3132                 all_mask |= ADVERTISE_100HALF;
3133         if (mask & ADVERTISED_100baseT_Full)
3134                 all_mask |= ADVERTISE_100FULL;
3135
3136         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3137                 return 0;
3138
3139         if ((adv_reg & all_mask) != all_mask)
3140                 return 0;
3141         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3142                 u32 tg3_ctrl;
3143
3144                 all_mask = 0;
3145                 if (mask & ADVERTISED_1000baseT_Half)
3146                         all_mask |= ADVERTISE_1000HALF;
3147                 if (mask & ADVERTISED_1000baseT_Full)
3148                         all_mask |= ADVERTISE_1000FULL;
3149
3150                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3151                         return 0;
3152
3153                 if ((tg3_ctrl & all_mask) != all_mask)
3154                         return 0;
3155         }
3156         return 1;
3157 }
3158
3159 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3160 {
3161         u32 curadv, reqadv;
3162
3163         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3164                 return 1;
3165
3166         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3167         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3168
3169         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3170                 if (curadv != reqadv)
3171                         return 0;
3172
3173                 if (tg3_flag(tp, PAUSE_AUTONEG))
3174                         tg3_readphy(tp, MII_LPA, rmtadv);
3175         } else {
3176                 /* Reprogram the advertisement register, even if it
3177                  * does not affect the current link.  If the link
3178                  * gets renegotiated in the future, we can save an
3179                  * additional renegotiation cycle by advertising
3180                  * it correctly in the first place.
3181                  */
3182                 if (curadv != reqadv) {
3183                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3184                                      ADVERTISE_PAUSE_ASYM);
3185                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3186                 }
3187         }
3188
3189         return 1;
3190 }
3191
3192 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3193 {
3194         int current_link_up;
3195         u32 bmsr, val;
3196         u32 lcl_adv, rmt_adv;
3197         u16 current_speed;
3198         u8 current_duplex;
3199         int i, err;
3200
3201         tw32(MAC_EVENT, 0);
3202
3203         tw32_f(MAC_STATUS,
3204              (MAC_STATUS_SYNC_CHANGED |
3205               MAC_STATUS_CFG_CHANGED |
3206               MAC_STATUS_MI_COMPLETION |
3207               MAC_STATUS_LNKSTATE_CHANGED));
3208         udelay(40);
3209
3210         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3211                 tw32_f(MAC_MI_MODE,
3212                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3213                 udelay(80);
3214         }
3215
3216         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3217
3218         /* Some third-party PHYs need to be reset on link going
3219          * down.
3220          */
3221         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3222              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3223              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3224             netif_carrier_ok(tp->dev)) {
3225                 tg3_readphy(tp, MII_BMSR, &bmsr);
3226                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3227                     !(bmsr & BMSR_LSTATUS))
3228                         force_reset = 1;
3229         }
3230         if (force_reset)
3231                 tg3_phy_reset(tp);
3232
3233         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3234                 tg3_readphy(tp, MII_BMSR, &bmsr);
3235                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3236                     !tg3_flag(tp, INIT_COMPLETE))
3237                         bmsr = 0;
3238
3239                 if (!(bmsr & BMSR_LSTATUS)) {
3240                         err = tg3_init_5401phy_dsp(tp);
3241                         if (err)
3242                                 return err;
3243
3244                         tg3_readphy(tp, MII_BMSR, &bmsr);
3245                         for (i = 0; i < 1000; i++) {
3246                                 udelay(10);
3247                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3248                                     (bmsr & BMSR_LSTATUS)) {
3249                                         udelay(40);
3250                                         break;
3251                                 }
3252                         }
3253
3254                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3255                             TG3_PHY_REV_BCM5401_B0 &&
3256                             !(bmsr & BMSR_LSTATUS) &&
3257                             tp->link_config.active_speed == SPEED_1000) {
3258                                 err = tg3_phy_reset(tp);
3259                                 if (!err)
3260                                         err = tg3_init_5401phy_dsp(tp);
3261                                 if (err)
3262                                         return err;
3263                         }
3264                 }
3265         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3266                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3267                 /* 5701 {A0,B0} CRC bug workaround */
3268                 tg3_writephy(tp, 0x15, 0x0a75);
3269                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3270                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3271                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3272         }
3273
3274         /* Clear pending interrupts... */
3275         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3276         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3277
3278         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3279                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3280         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3281                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3282
3283         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3284             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3285                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3286                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3287                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3288                 else
3289                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3290         }
3291
3292         current_link_up = 0;
3293         current_speed = SPEED_INVALID;
3294         current_duplex = DUPLEX_INVALID;
3295
3296         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3297                 err = tg3_phy_auxctl_read(tp,
3298                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3299                                           &val);
3300                 if (!err && !(val & (1 << 10))) {
3301                         tg3_phy_auxctl_write(tp,
3302                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3303                                              val | (1 << 10));
3304                         goto relink;
3305                 }
3306         }
3307
3308         bmsr = 0;
3309         for (i = 0; i < 100; i++) {
3310                 tg3_readphy(tp, MII_BMSR, &bmsr);
3311                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3312                     (bmsr & BMSR_LSTATUS))
3313                         break;
3314                 udelay(40);
3315         }
3316
3317         if (bmsr & BMSR_LSTATUS) {
3318                 u32 aux_stat, bmcr;
3319
3320                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3321                 for (i = 0; i < 2000; i++) {
3322                         udelay(10);
3323                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3324                             aux_stat)
3325                                 break;
3326                 }
3327
3328                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3329                                              &current_speed,
3330                                              &current_duplex);
3331
3332                 bmcr = 0;
3333                 for (i = 0; i < 200; i++) {
3334                         tg3_readphy(tp, MII_BMCR, &bmcr);
3335                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3336                                 continue;
3337                         if (bmcr && bmcr != 0x7fff)
3338                                 break;
3339                         udelay(10);
3340                 }
3341
3342                 lcl_adv = 0;
3343                 rmt_adv = 0;
3344
3345                 tp->link_config.active_speed = current_speed;
3346                 tp->link_config.active_duplex = current_duplex;
3347
3348                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3349                         if ((bmcr & BMCR_ANENABLE) &&
3350                             tg3_copper_is_advertising_all(tp,
3351                                                 tp->link_config.advertising)) {
3352                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3353                                                                   &rmt_adv))
3354                                         current_link_up = 1;
3355                         }
3356                 } else {
3357                         if (!(bmcr & BMCR_ANENABLE) &&
3358                             tp->link_config.speed == current_speed &&
3359                             tp->link_config.duplex == current_duplex &&
3360                             tp->link_config.flowctrl ==
3361                             tp->link_config.active_flowctrl) {
3362                                 current_link_up = 1;
3363                         }
3364                 }
3365
3366                 if (current_link_up == 1 &&
3367                     tp->link_config.active_duplex == DUPLEX_FULL)
3368                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3369         }
3370
3371 relink:
3372         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3373                 tg3_phy_copper_begin(tp);
3374
3375                 tg3_readphy(tp, MII_BMSR, &bmsr);
3376                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3377                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3378                         current_link_up = 1;
3379         }
3380
3381         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3382         if (current_link_up == 1) {
3383                 if (tp->link_config.active_speed == SPEED_100 ||
3384                     tp->link_config.active_speed == SPEED_10)
3385                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3386                 else
3387                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3388         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3389                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3390         else
3391                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3392
3393         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3394         if (tp->link_config.active_duplex == DUPLEX_HALF)
3395                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3396
3397         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3398                 if (current_link_up == 1 &&
3399                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3400                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3401                 else
3402                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3403         }
3404
3405         /* ??? Without this setting Netgear GA302T PHY does not
3406          * ??? send/receive packets...
3407          */
3408         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3409             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3410                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3411                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3412                 udelay(80);
3413         }
3414
3415         tw32_f(MAC_MODE, tp->mac_mode);
3416         udelay(40);
3417
3418         tg3_phy_eee_adjust(tp, current_link_up);
3419
3420         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3421                 /* Polled via timer. */
3422                 tw32_f(MAC_EVENT, 0);
3423         } else {
3424                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3425         }
3426         udelay(40);
3427
3428         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3429             current_link_up == 1 &&
3430             tp->link_config.active_speed == SPEED_1000 &&
3431             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3432                 udelay(120);
3433                 tw32_f(MAC_STATUS,
3434                      (MAC_STATUS_SYNC_CHANGED |
3435                       MAC_STATUS_CFG_CHANGED));
3436                 udelay(40);
3437                 tg3_write_mem(tp,
3438                               NIC_SRAM_FIRMWARE_MBOX,
3439                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3440         }
3441
3442         /* Prevent send BD corruption. */
3443         if (tg3_flag(tp, CLKREQ_BUG)) {
3444                 u16 oldlnkctl, newlnkctl;
3445
3446                 pci_read_config_word(tp->pdev,
3447                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3448                                      &oldlnkctl);
3449                 if (tp->link_config.active_speed == SPEED_100 ||
3450                     tp->link_config.active_speed == SPEED_10)
3451                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3452                 else
3453                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3454                 if (newlnkctl != oldlnkctl)
3455                         pci_write_config_word(tp->pdev,
3456                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3457                                               newlnkctl);
3458         }
3459
3460         if (current_link_up != netif_carrier_ok(tp->dev)) {
3461                 if (current_link_up)
3462                         netif_carrier_on(tp->dev);
3463                 else
3464                         netif_carrier_off(tp->dev);
3465                 tg3_link_report(tp);
3466         }
3467
3468         return 0;
3469 }
3470
3471 struct tg3_fiber_aneginfo {
3472         int state;
3473 #define ANEG_STATE_UNKNOWN              0
3474 #define ANEG_STATE_AN_ENABLE            1
3475 #define ANEG_STATE_RESTART_INIT         2
3476 #define ANEG_STATE_RESTART              3
3477 #define ANEG_STATE_DISABLE_LINK_OK      4
3478 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3479 #define ANEG_STATE_ABILITY_DETECT       6
3480 #define ANEG_STATE_ACK_DETECT_INIT      7
3481 #define ANEG_STATE_ACK_DETECT           8
3482 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3483 #define ANEG_STATE_COMPLETE_ACK         10
3484 #define ANEG_STATE_IDLE_DETECT_INIT     11
3485 #define ANEG_STATE_IDLE_DETECT          12
3486 #define ANEG_STATE_LINK_OK              13
3487 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3488 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3489
3490         u32 flags;
3491 #define MR_AN_ENABLE            0x00000001
3492 #define MR_RESTART_AN           0x00000002
3493 #define MR_AN_COMPLETE          0x00000004
3494 #define MR_PAGE_RX              0x00000008
3495 #define MR_NP_LOADED            0x00000010
3496 #define MR_TOGGLE_TX            0x00000020
3497 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3498 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3499 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3500 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3501 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3502 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3503 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3504 #define MR_TOGGLE_RX            0x00002000
3505 #define MR_NP_RX                0x00004000
3506
3507 #define MR_LINK_OK              0x80000000
3508
3509         unsigned long link_time, cur_time;
3510
3511         u32 ability_match_cfg;
3512         int ability_match_count;
3513
3514         char ability_match, idle_match, ack_match;
3515
3516         u32 txconfig, rxconfig;
3517 #define ANEG_CFG_NP             0x00000080
3518 #define ANEG_CFG_ACK            0x00000040
3519 #define ANEG_CFG_RF2            0x00000020
3520 #define ANEG_CFG_RF1            0x00000010
3521 #define ANEG_CFG_PS2            0x00000001
3522 #define ANEG_CFG_PS1            0x00008000
3523 #define ANEG_CFG_HD             0x00004000
3524 #define ANEG_CFG_FD             0x00002000
3525 #define ANEG_CFG_INVAL          0x00001f06
3526
3527 };
3528 #define ANEG_OK         0
3529 #define ANEG_DONE       1
3530 #define ANEG_TIMER_ENAB 2
3531 #define ANEG_FAILED     -1
3532
3533 #define ANEG_STATE_SETTLE_TIME  10000
3534
3535 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3536                                    struct tg3_fiber_aneginfo *ap)
3537 {
3538         u16 flowctrl;
3539         unsigned long delta;
3540         u32 rx_cfg_reg;
3541         int ret;
3542
3543         if (ap->state == ANEG_STATE_UNKNOWN) {
3544                 ap->rxconfig = 0;
3545                 ap->link_time = 0;
3546                 ap->cur_time = 0;
3547                 ap->ability_match_cfg = 0;
3548                 ap->ability_match_count = 0;
3549                 ap->ability_match = 0;
3550                 ap->idle_match = 0;
3551                 ap->ack_match = 0;
3552         }
3553         ap->cur_time++;
3554
3555         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3556                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3557
3558                 if (rx_cfg_reg != ap->ability_match_cfg) {
3559                         ap->ability_match_cfg = rx_cfg_reg;
3560                         ap->ability_match = 0;
3561                         ap->ability_match_count = 0;
3562                 } else {
3563                         if (++ap->ability_match_count > 1) {
3564                                 ap->ability_match = 1;
3565                                 ap->ability_match_cfg = rx_cfg_reg;
3566                         }
3567                 }
3568                 if (rx_cfg_reg & ANEG_CFG_ACK)
3569                         ap->ack_match = 1;
3570                 else
3571                         ap->ack_match = 0;
3572
3573                 ap->idle_match = 0;
3574         } else {
3575                 ap->idle_match = 1;
3576                 ap->ability_match_cfg = 0;
3577                 ap->ability_match_count = 0;
3578                 ap->ability_match = 0;
3579                 ap->ack_match = 0;
3580
3581                 rx_cfg_reg = 0;
3582         }
3583
3584         ap->rxconfig = rx_cfg_reg;
3585         ret = ANEG_OK;
3586
3587         switch (ap->state) {
3588         case ANEG_STATE_UNKNOWN:
3589                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3590                         ap->state = ANEG_STATE_AN_ENABLE;
3591
3592                 /* fallthru */
3593         case ANEG_STATE_AN_ENABLE:
3594                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3595                 if (ap->flags & MR_AN_ENABLE) {
3596                         ap->link_time = 0;
3597                         ap->cur_time = 0;
3598                         ap->ability_match_cfg = 0;
3599                         ap->ability_match_count = 0;
3600                         ap->ability_match = 0;
3601                         ap->idle_match = 0;
3602                         ap->ack_match = 0;
3603
3604                         ap->state = ANEG_STATE_RESTART_INIT;
3605                 } else {
3606                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3607                 }
3608                 break;
3609
3610         case ANEG_STATE_RESTART_INIT:
3611                 ap->link_time = ap->cur_time;
3612                 ap->flags &= ~(MR_NP_LOADED);
3613                 ap->txconfig = 0;
3614                 tw32(MAC_TX_AUTO_NEG, 0);
3615                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3616                 tw32_f(MAC_MODE, tp->mac_mode);
3617                 udelay(40);
3618
3619                 ret = ANEG_TIMER_ENAB;
3620                 ap->state = ANEG_STATE_RESTART;
3621
3622                 /* fallthru */
3623         case ANEG_STATE_RESTART:
3624                 delta = ap->cur_time - ap->link_time;
3625                 if (delta > ANEG_STATE_SETTLE_TIME)
3626                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3627                 else
3628                         ret = ANEG_TIMER_ENAB;
3629                 break;
3630
3631         case ANEG_STATE_DISABLE_LINK_OK:
3632                 ret = ANEG_DONE;
3633                 break;
3634
3635         case ANEG_STATE_ABILITY_DETECT_INIT:
3636                 ap->flags &= ~(MR_TOGGLE_TX);
3637                 ap->txconfig = ANEG_CFG_FD;
3638                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3639                 if (flowctrl & ADVERTISE_1000XPAUSE)
3640                         ap->txconfig |= ANEG_CFG_PS1;
3641                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3642                         ap->txconfig |= ANEG_CFG_PS2;
3643                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3644                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3645                 tw32_f(MAC_MODE, tp->mac_mode);
3646                 udelay(40);
3647
3648                 ap->state = ANEG_STATE_ABILITY_DETECT;
3649                 break;
3650
3651         case ANEG_STATE_ABILITY_DETECT:
3652                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3653                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3654                 break;
3655
3656         case ANEG_STATE_ACK_DETECT_INIT:
3657                 ap->txconfig |= ANEG_CFG_ACK;
3658                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3659                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3660                 tw32_f(MAC_MODE, tp->mac_mode);
3661                 udelay(40);
3662
3663                 ap->state = ANEG_STATE_ACK_DETECT;
3664
3665                 /* fallthru */
3666         case ANEG_STATE_ACK_DETECT:
3667                 if (ap->ack_match != 0) {
3668                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3669                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3670                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3671                         } else {
3672                                 ap->state = ANEG_STATE_AN_ENABLE;
3673                         }
3674                 } else if (ap->ability_match != 0 &&
3675                            ap->rxconfig == 0) {
3676                         ap->state = ANEG_STATE_AN_ENABLE;
3677                 }
3678                 break;
3679
3680         case ANEG_STATE_COMPLETE_ACK_INIT:
3681                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3682                         ret = ANEG_FAILED;
3683                         break;
3684                 }
3685                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3686                                MR_LP_ADV_HALF_DUPLEX |
3687                                MR_LP_ADV_SYM_PAUSE |
3688                                MR_LP_ADV_ASYM_PAUSE |
3689                                MR_LP_ADV_REMOTE_FAULT1 |
3690                                MR_LP_ADV_REMOTE_FAULT2 |
3691                                MR_LP_ADV_NEXT_PAGE |
3692                                MR_TOGGLE_RX |
3693                                MR_NP_RX);
3694                 if (ap->rxconfig & ANEG_CFG_FD)
3695                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3696                 if (ap->rxconfig & ANEG_CFG_HD)
3697                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3698                 if (ap->rxconfig & ANEG_CFG_PS1)
3699                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3700                 if (ap->rxconfig & ANEG_CFG_PS2)
3701                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3702                 if (ap->rxconfig & ANEG_CFG_RF1)
3703                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3704                 if (ap->rxconfig & ANEG_CFG_RF2)
3705                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3706                 if (ap->rxconfig & ANEG_CFG_NP)
3707                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3708
3709                 ap->link_time = ap->cur_time;
3710
3711                 ap->flags ^= (MR_TOGGLE_TX);
3712                 if (ap->rxconfig & 0x0008)
3713                         ap->flags |= MR_TOGGLE_RX;
3714                 if (ap->rxconfig & ANEG_CFG_NP)
3715                         ap->flags |= MR_NP_RX;
3716                 ap->flags |= MR_PAGE_RX;
3717
3718                 ap->state = ANEG_STATE_COMPLETE_ACK;
3719                 ret = ANEG_TIMER_ENAB;
3720                 break;
3721
3722         case ANEG_STATE_COMPLETE_ACK:
3723                 if (ap->ability_match != 0 &&
3724                     ap->rxconfig == 0) {
3725                         ap->state = ANEG_STATE_AN_ENABLE;
3726                         break;
3727                 }
3728                 delta = ap->cur_time - ap->link_time;
3729                 if (delta > ANEG_STATE_SETTLE_TIME) {
3730                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3731                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3732                         } else {
3733                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3734                                     !(ap->flags & MR_NP_RX)) {
3735                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3736                                 } else {
3737                                         ret = ANEG_FAILED;
3738                                 }
3739                         }
3740                 }
3741                 break;
3742
3743         case ANEG_STATE_IDLE_DETECT_INIT:
3744                 ap->link_time = ap->cur_time;
3745                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3746                 tw32_f(MAC_MODE, tp->mac_mode);
3747                 udelay(40);
3748
3749                 ap->state = ANEG_STATE_IDLE_DETECT;
3750                 ret = ANEG_TIMER_ENAB;
3751                 break;
3752
3753         case ANEG_STATE_IDLE_DETECT:
3754                 if (ap->ability_match != 0 &&
3755                     ap->rxconfig == 0) {
3756                         ap->state = ANEG_STATE_AN_ENABLE;
3757                         break;
3758                 }
3759                 delta = ap->cur_time - ap->link_time;
3760                 if (delta > ANEG_STATE_SETTLE_TIME) {
3761                         /* XXX another gem from the Broadcom driver :( */
3762                         ap->state = ANEG_STATE_LINK_OK;
3763                 }
3764                 break;
3765
3766         case ANEG_STATE_LINK_OK:
3767                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3768                 ret = ANEG_DONE;
3769                 break;
3770
3771         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3772                 /* ??? unimplemented */
3773                 break;
3774
3775         case ANEG_STATE_NEXT_PAGE_WAIT:
3776                 /* ??? unimplemented */
3777                 break;
3778
3779         default:
3780                 ret = ANEG_FAILED;
3781                 break;
3782         }
3783
3784         return ret;
3785 }
3786
3787 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3788 {
3789         int res = 0;
3790         struct tg3_fiber_aneginfo aninfo;
3791         int status = ANEG_FAILED;
3792         unsigned int tick;
3793         u32 tmp;
3794
3795         tw32_f(MAC_TX_AUTO_NEG, 0);
3796
3797         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3798         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3799         udelay(40);
3800
3801         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3802         udelay(40);
3803
3804         memset(&aninfo, 0, sizeof(aninfo));
3805         aninfo.flags |= MR_AN_ENABLE;
3806         aninfo.state = ANEG_STATE_UNKNOWN;
3807         aninfo.cur_time = 0;
3808         tick = 0;
3809         while (++tick < 195000) {
3810                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3811                 if (status == ANEG_DONE || status == ANEG_FAILED)
3812                         break;
3813
3814                 udelay(1);
3815         }
3816
3817         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3818         tw32_f(MAC_MODE, tp->mac_mode);
3819         udelay(40);
3820
3821         *txflags = aninfo.txconfig;
3822         *rxflags = aninfo.flags;
3823
3824         if (status == ANEG_DONE &&
3825             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3826                              MR_LP_ADV_FULL_DUPLEX)))
3827                 res = 1;
3828
3829         return res;
3830 }
3831
3832 static void tg3_init_bcm8002(struct tg3 *tp)
3833 {
3834         u32 mac_status = tr32(MAC_STATUS);
3835         int i;
3836
3837         /* Reset when initting first time or we have a link. */
3838         if (tg3_flag(tp, INIT_COMPLETE) &&
3839             !(mac_status & MAC_STATUS_PCS_SYNCED))
3840                 return;
3841
3842         /* Set PLL lock range. */
3843         tg3_writephy(tp, 0x16, 0x8007);
3844
3845         /* SW reset */
3846         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3847
3848         /* Wait for reset to complete. */
3849         /* XXX schedule_timeout() ... */
3850         for (i = 0; i < 500; i++)
3851                 udelay(10);
3852
3853         /* Config mode; select PMA/Ch 1 regs. */
3854         tg3_writephy(tp, 0x10, 0x8411);
3855
3856         /* Enable auto-lock and comdet, select txclk for tx. */
3857         tg3_writephy(tp, 0x11, 0x0a10);
3858
3859         tg3_writephy(tp, 0x18, 0x00a0);
3860         tg3_writephy(tp, 0x16, 0x41ff);
3861
3862         /* Assert and deassert POR. */
3863         tg3_writephy(tp, 0x13, 0x0400);
3864         udelay(40);
3865         tg3_writephy(tp, 0x13, 0x0000);
3866
3867         tg3_writephy(tp, 0x11, 0x0a50);
3868         udelay(40);
3869         tg3_writephy(tp, 0x11, 0x0a10);
3870
3871         /* Wait for signal to stabilize */
3872         /* XXX schedule_timeout() ... */
3873         for (i = 0; i < 15000; i++)
3874                 udelay(10);
3875
3876         /* Deselect the channel register so we can read the PHYID
3877          * later.
3878          */
3879         tg3_writephy(tp, 0x10, 0x8011);
3880 }
3881
3882 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3883 {
3884         u16 flowctrl;
3885         u32 sg_dig_ctrl, sg_dig_status;
3886         u32 serdes_cfg, expected_sg_dig_ctrl;
3887         int workaround, port_a;
3888         int current_link_up;
3889
3890         serdes_cfg = 0;
3891         expected_sg_dig_ctrl = 0;
3892         workaround = 0;
3893         port_a = 1;
3894         current_link_up = 0;
3895
3896         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3897             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3898                 workaround = 1;
3899                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3900                         port_a = 0;
3901
3902                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3903                 /* preserve bits 20-23 for voltage regulator */
3904                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3905         }
3906
3907         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3908
3909         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3910                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3911                         if (workaround) {
3912                                 u32 val = serdes_cfg;
3913
3914                                 if (port_a)
3915                                         val |= 0xc010000;
3916                                 else
3917                                         val |= 0x4010000;
3918                                 tw32_f(MAC_SERDES_CFG, val);
3919                         }
3920
3921                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3922                 }
3923                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3924                         tg3_setup_flow_control(tp, 0, 0);
3925                         current_link_up = 1;
3926                 }
3927                 goto out;
3928         }
3929
3930         /* Want auto-negotiation.  */
3931         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3932
3933         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3934         if (flowctrl & ADVERTISE_1000XPAUSE)
3935                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3936         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3937                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3938
3939         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3940                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3941                     tp->serdes_counter &&
3942                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3943                                     MAC_STATUS_RCVD_CFG)) ==
3944                      MAC_STATUS_PCS_SYNCED)) {
3945                         tp->serdes_counter--;
3946                         current_link_up = 1;
3947                         goto out;
3948                 }
3949 restart_autoneg:
3950                 if (workaround)
3951                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3952                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3953                 udelay(5);
3954                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3955
3956                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3957                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3958         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3959                                  MAC_STATUS_SIGNAL_DET)) {
3960                 sg_dig_status = tr32(SG_DIG_STATUS);
3961                 mac_status = tr32(MAC_STATUS);
3962
3963                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3964                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3965                         u32 local_adv = 0, remote_adv = 0;
3966
3967                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3968                                 local_adv |= ADVERTISE_1000XPAUSE;
3969                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3970                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3971
3972                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3973                                 remote_adv |= LPA_1000XPAUSE;
3974                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3975                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3976
3977                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3978                         current_link_up = 1;
3979                         tp->serdes_counter = 0;
3980                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3981                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3982                         if (tp->serdes_counter)
3983                                 tp->serdes_counter--;
3984                         else {
3985                                 if (workaround) {
3986                                         u32 val = serdes_cfg;
3987
3988                                         if (port_a)
3989                                                 val |= 0xc010000;
3990                                         else
3991                                                 val |= 0x4010000;
3992
3993                                         tw32_f(MAC_SERDES_CFG, val);
3994                                 }
3995
3996                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3997                                 udelay(40);
3998
3999                                 /* Link parallel detection - link is up */
4000                                 /* only if we have PCS_SYNC and not */
4001                                 /* receiving config code words */
4002                                 mac_status = tr32(MAC_STATUS);
4003                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4004                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4005                                         tg3_setup_flow_control(tp, 0, 0);
4006                                         current_link_up = 1;
4007                                         tp->phy_flags |=
4008                                                 TG3_PHYFLG_PARALLEL_DETECT;
4009                                         tp->serdes_counter =
4010                                                 SERDES_PARALLEL_DET_TIMEOUT;
4011                                 } else
4012                                         goto restart_autoneg;
4013                         }
4014                 }
4015         } else {
4016                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4017                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4018         }
4019
4020 out:
4021         return current_link_up;
4022 }
4023
4024 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4025 {
4026         int current_link_up = 0;
4027
4028         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4029                 goto out;
4030
4031         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4032                 u32 txflags, rxflags;
4033                 int i;
4034
4035                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4036                         u32 local_adv = 0, remote_adv = 0;
4037
4038                         if (txflags & ANEG_CFG_PS1)
4039                                 local_adv |= ADVERTISE_1000XPAUSE;
4040                         if (txflags & ANEG_CFG_PS2)
4041                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4042
4043                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4044                                 remote_adv |= LPA_1000XPAUSE;
4045                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4046                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4047
4048                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4049
4050                         current_link_up = 1;
4051                 }
4052                 for (i = 0; i < 30; i++) {
4053                         udelay(20);
4054                         tw32_f(MAC_STATUS,
4055                                (MAC_STATUS_SYNC_CHANGED |
4056                                 MAC_STATUS_CFG_CHANGED));
4057                         udelay(40);
4058                         if ((tr32(MAC_STATUS) &
4059                              (MAC_STATUS_SYNC_CHANGED |
4060                               MAC_STATUS_CFG_CHANGED)) == 0)
4061                                 break;
4062                 }
4063
4064                 mac_status = tr32(MAC_STATUS);
4065                 if (current_link_up == 0 &&
4066                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4067                     !(mac_status & MAC_STATUS_RCVD_CFG))
4068                         current_link_up = 1;
4069         } else {
4070                 tg3_setup_flow_control(tp, 0, 0);
4071
4072                 /* Forcing 1000FD link up. */
4073                 current_link_up = 1;
4074
4075                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4076                 udelay(40);
4077
4078                 tw32_f(MAC_MODE, tp->mac_mode);
4079                 udelay(40);
4080         }
4081
4082 out:
4083         return current_link_up;
4084 }
4085
4086 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4087 {
4088         u32 orig_pause_cfg;
4089         u16 orig_active_speed;
4090         u8 orig_active_duplex;
4091         u32 mac_status;
4092         int current_link_up;
4093         int i;
4094
4095         orig_pause_cfg = tp->link_config.active_flowctrl;
4096         orig_active_speed = tp->link_config.active_speed;
4097         orig_active_duplex = tp->link_config.active_duplex;
4098
4099         if (!tg3_flag(tp, HW_AUTONEG) &&
4100             netif_carrier_ok(tp->dev) &&
4101             tg3_flag(tp, INIT_COMPLETE)) {
4102                 mac_status = tr32(MAC_STATUS);
4103                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4104                                MAC_STATUS_SIGNAL_DET |
4105                                MAC_STATUS_CFG_CHANGED |
4106                                MAC_STATUS_RCVD_CFG);
4107                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4108                                    MAC_STATUS_SIGNAL_DET)) {
4109                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4110                                             MAC_STATUS_CFG_CHANGED));
4111                         return 0;
4112                 }
4113         }
4114
4115         tw32_f(MAC_TX_AUTO_NEG, 0);
4116
4117         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4118         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4119         tw32_f(MAC_MODE, tp->mac_mode);
4120         udelay(40);
4121
4122         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4123                 tg3_init_bcm8002(tp);
4124
4125         /* Enable link change event even when serdes polling.  */
4126         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4127         udelay(40);
4128
4129         current_link_up = 0;
4130         mac_status = tr32(MAC_STATUS);
4131
4132         if (tg3_flag(tp, HW_AUTONEG))
4133                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4134         else
4135                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4136
4137         tp->napi[0].hw_status->status =
4138                 (SD_STATUS_UPDATED |
4139                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4140
4141         for (i = 0; i < 100; i++) {
4142                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4143                                     MAC_STATUS_CFG_CHANGED));
4144                 udelay(5);
4145                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4146                                          MAC_STATUS_CFG_CHANGED |
4147                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4148                         break;
4149         }
4150
4151         mac_status = tr32(MAC_STATUS);
4152         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4153                 current_link_up = 0;
4154                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4155                     tp->serdes_counter == 0) {
4156                         tw32_f(MAC_MODE, (tp->mac_mode |
4157                                           MAC_MODE_SEND_CONFIGS));
4158                         udelay(1);
4159                         tw32_f(MAC_MODE, tp->mac_mode);
4160                 }
4161         }
4162
4163         if (current_link_up == 1) {
4164                 tp->link_config.active_speed = SPEED_1000;
4165                 tp->link_config.active_duplex = DUPLEX_FULL;
4166                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4167                                     LED_CTRL_LNKLED_OVERRIDE |
4168                                     LED_CTRL_1000MBPS_ON));
4169         } else {
4170                 tp->link_config.active_speed = SPEED_INVALID;
4171                 tp->link_config.active_duplex = DUPLEX_INVALID;
4172                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4173                                     LED_CTRL_LNKLED_OVERRIDE |
4174                                     LED_CTRL_TRAFFIC_OVERRIDE));
4175         }
4176
4177         if (current_link_up != netif_carrier_ok(tp->dev)) {
4178                 if (current_link_up)
4179                         netif_carrier_on(tp->dev);
4180                 else
4181                         netif_carrier_off(tp->dev);
4182                 tg3_link_report(tp);
4183         } else {
4184                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4185                 if (orig_pause_cfg != now_pause_cfg ||
4186                     orig_active_speed != tp->link_config.active_speed ||
4187                     orig_active_duplex != tp->link_config.active_duplex)
4188                         tg3_link_report(tp);
4189         }
4190
4191         return 0;
4192 }
4193
4194 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4195 {
4196         int current_link_up, err = 0;
4197         u32 bmsr, bmcr;
4198         u16 current_speed;
4199         u8 current_duplex;
4200         u32 local_adv, remote_adv;
4201
4202         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4203         tw32_f(MAC_MODE, tp->mac_mode);
4204         udelay(40);
4205
4206         tw32(MAC_EVENT, 0);
4207
4208         tw32_f(MAC_STATUS,
4209              (MAC_STATUS_SYNC_CHANGED |
4210               MAC_STATUS_CFG_CHANGED |
4211               MAC_STATUS_MI_COMPLETION |
4212               MAC_STATUS_LNKSTATE_CHANGED));
4213         udelay(40);
4214
4215         if (force_reset)
4216                 tg3_phy_reset(tp);
4217
4218         current_link_up = 0;
4219         current_speed = SPEED_INVALID;
4220         current_duplex = DUPLEX_INVALID;
4221
4222         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4223         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4224         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4225                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4226                         bmsr |= BMSR_LSTATUS;
4227                 else
4228                         bmsr &= ~BMSR_LSTATUS;
4229         }
4230
4231         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4232
4233         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4234             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4235                 /* do nothing, just check for link up at the end */
4236         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4237                 u32 adv, new_adv;
4238
4239                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4240                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4241                                   ADVERTISE_1000XPAUSE |
4242                                   ADVERTISE_1000XPSE_ASYM |
4243                                   ADVERTISE_SLCT);
4244
4245                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4246
4247                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4248                         new_adv |= ADVERTISE_1000XHALF;
4249                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4250                         new_adv |= ADVERTISE_1000XFULL;
4251
4252                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4253                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4254                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4255                         tg3_writephy(tp, MII_BMCR, bmcr);
4256
4257                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4258                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4259                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4260
4261                         return err;
4262                 }
4263         } else {
4264                 u32 new_bmcr;
4265
4266                 bmcr &= ~BMCR_SPEED1000;
4267                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4268
4269                 if (tp->link_config.duplex == DUPLEX_FULL)
4270                         new_bmcr |= BMCR_FULLDPLX;
4271
4272                 if (new_bmcr != bmcr) {
4273                         /* BMCR_SPEED1000 is a reserved bit that needs
4274                          * to be set on write.
4275                          */
4276                         new_bmcr |= BMCR_SPEED1000;
4277
4278                         /* Force a linkdown */
4279                         if (netif_carrier_ok(tp->dev)) {
4280                                 u32 adv;
4281
4282                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4283                                 adv &= ~(ADVERTISE_1000XFULL |
4284                                          ADVERTISE_1000XHALF |
4285                                          ADVERTISE_SLCT);
4286                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4287                                 tg3_writephy(tp, MII_BMCR, bmcr |
4288                                                            BMCR_ANRESTART |
4289                                                            BMCR_ANENABLE);
4290                                 udelay(10);
4291                                 netif_carrier_off(tp->dev);
4292                         }
4293                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4294                         bmcr = new_bmcr;
4295                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4296                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4297                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4298                             ASIC_REV_5714) {
4299                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4300                                         bmsr |= BMSR_LSTATUS;
4301                                 else
4302                                         bmsr &= ~BMSR_LSTATUS;
4303                         }
4304                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4305                 }
4306         }
4307
4308         if (bmsr & BMSR_LSTATUS) {
4309                 current_speed = SPEED_1000;
4310                 current_link_up = 1;
4311                 if (bmcr & BMCR_FULLDPLX)
4312                         current_duplex = DUPLEX_FULL;
4313                 else
4314                         current_duplex = DUPLEX_HALF;
4315
4316                 local_adv = 0;
4317                 remote_adv = 0;
4318
4319                 if (bmcr & BMCR_ANENABLE) {
4320                         u32 common;
4321
4322                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4323                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4324                         common = local_adv & remote_adv;
4325                         if (common & (ADVERTISE_1000XHALF |
4326                                       ADVERTISE_1000XFULL)) {
4327                                 if (common & ADVERTISE_1000XFULL)
4328                                         current_duplex = DUPLEX_FULL;
4329                                 else
4330                                         current_duplex = DUPLEX_HALF;
4331                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4332                                 /* Link is up via parallel detect */
4333                         } else {
4334                                 current_link_up = 0;
4335                         }
4336                 }
4337         }
4338
4339         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4340                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4341
4342         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4343         if (tp->link_config.active_duplex == DUPLEX_HALF)
4344                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4345
4346         tw32_f(MAC_MODE, tp->mac_mode);
4347         udelay(40);
4348
4349         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4350
4351         tp->link_config.active_speed = current_speed;
4352         tp->link_config.active_duplex = current_duplex;
4353
4354         if (current_link_up != netif_carrier_ok(tp->dev)) {
4355                 if (current_link_up)
4356                         netif_carrier_on(tp->dev);
4357                 else {
4358                         netif_carrier_off(tp->dev);
4359                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4360                 }
4361                 tg3_link_report(tp);
4362         }
4363         return err;
4364 }
4365
4366 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4367 {
4368         if (tp->serdes_counter) {
4369                 /* Give autoneg time to complete. */
4370                 tp->serdes_counter--;
4371                 return;
4372         }
4373
4374         if (!netif_carrier_ok(tp->dev) &&
4375             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4376                 u32 bmcr;
4377
4378                 tg3_readphy(tp, MII_BMCR, &bmcr);
4379                 if (bmcr & BMCR_ANENABLE) {
4380                         u32 phy1, phy2;
4381
4382                         /* Select shadow register 0x1f */
4383                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4384                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4385
4386                         /* Select expansion interrupt status register */
4387                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4388                                          MII_TG3_DSP_EXP1_INT_STAT);
4389                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4390                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4391
4392                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4393                                 /* We have signal detect and not receiving
4394                                  * config code words, link is up by parallel
4395                                  * detection.
4396                                  */
4397
4398                                 bmcr &= ~BMCR_ANENABLE;
4399                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4400                                 tg3_writephy(tp, MII_BMCR, bmcr);
4401                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4402                         }
4403                 }
4404         } else if (netif_carrier_ok(tp->dev) &&
4405                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4406                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4407                 u32 phy2;
4408
4409                 /* Select expansion interrupt status register */
4410                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4411                                  MII_TG3_DSP_EXP1_INT_STAT);
4412                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4413                 if (phy2 & 0x20) {
4414                         u32 bmcr;
4415
4416                         /* Config code words received, turn on autoneg. */
4417                         tg3_readphy(tp, MII_BMCR, &bmcr);
4418                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4419
4420                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4421
4422                 }
4423         }
4424 }
4425
4426 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4427 {
4428         u32 val;
4429         int err;
4430
4431         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4432                 err = tg3_setup_fiber_phy(tp, force_reset);
4433         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4434                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4435         else
4436                 err = tg3_setup_copper_phy(tp, force_reset);
4437
4438         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4439                 u32 scale;
4440
4441                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4442                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4443                         scale = 65;
4444                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4445                         scale = 6;
4446                 else
4447                         scale = 12;
4448
4449                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4450                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4451                 tw32(GRC_MISC_CFG, val);
4452         }
4453
4454         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4455               (6 << TX_LENGTHS_IPG_SHIFT);
4456         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4457                 val |= tr32(MAC_TX_LENGTHS) &
4458                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4459                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4460
4461         if (tp->link_config.active_speed == SPEED_1000 &&
4462             tp->link_config.active_duplex == DUPLEX_HALF)
4463                 tw32(MAC_TX_LENGTHS, val |
4464                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4465         else
4466                 tw32(MAC_TX_LENGTHS, val |
4467                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4468
4469         if (!tg3_flag(tp, 5705_PLUS)) {
4470                 if (netif_carrier_ok(tp->dev)) {
4471                         tw32(HOSTCC_STAT_COAL_TICKS,
4472                              tp->coal.stats_block_coalesce_usecs);
4473                 } else {
4474                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4475                 }
4476         }
4477
4478         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4479                 val = tr32(PCIE_PWR_MGMT_THRESH);
4480                 if (!netif_carrier_ok(tp->dev))
4481                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4482                               tp->pwrmgmt_thresh;
4483                 else
4484                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4485                 tw32(PCIE_PWR_MGMT_THRESH, val);
4486         }
4487
4488         return err;
4489 }
4490
4491 static inline int tg3_irq_sync(struct tg3 *tp)
4492 {
4493         return tp->irq_sync;
4494 }
4495
4496 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4497 {
4498         int i;
4499
4500         dst = (u32 *)((u8 *)dst + off);
4501         for (i = 0; i < len; i += sizeof(u32))
4502                 *dst++ = tr32(off + i);
4503 }
4504
4505 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4506 {
4507         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4508         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4509         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4510         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4511         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4512         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4513         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4514         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4515         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4516         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4517         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4518         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4519         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4520         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4521         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4522         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4523         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4524         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4525         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4526
4527         if (tg3_flag(tp, SUPPORT_MSIX))
4528                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4529
4530         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4531         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4532         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4533         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4534         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4535         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4536         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4537         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4538
4539         if (!tg3_flag(tp, 5705_PLUS)) {
4540                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4541                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4542                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4543         }
4544
4545         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4546         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4547         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4548         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4549         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4550
4551         if (tg3_flag(tp, NVRAM))
4552                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4553 }
4554
4555 static void tg3_dump_state(struct tg3 *tp)
4556 {
4557         int i;
4558         u32 *regs;
4559
4560         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4561         if (!regs) {
4562                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4563                 return;
4564         }
4565
4566         if (tg3_flag(tp, PCI_EXPRESS)) {
4567                 /* Read up to but not including private PCI registers */
4568                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4569                         regs[i / sizeof(u32)] = tr32(i);
4570         } else
4571                 tg3_dump_legacy_regs(tp, regs);
4572
4573         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4574                 if (!regs[i + 0] && !regs[i + 1] &&
4575                     !regs[i + 2] && !regs[i + 3])
4576                         continue;
4577
4578                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4579                            i * 4,
4580                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4581         }
4582
4583         kfree(regs);
4584
4585         for (i = 0; i < tp->irq_cnt; i++) {
4586                 struct tg3_napi *tnapi = &tp->napi[i];
4587
4588                 /* SW status block */
4589                 netdev_err(tp->dev,
4590                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4591                            i,
4592                            tnapi->hw_status->status,
4593                            tnapi->hw_status->status_tag,
4594                            tnapi->hw_status->rx_jumbo_consumer,
4595                            tnapi->hw_status->rx_consumer,
4596                            tnapi->hw_status->rx_mini_consumer,
4597                            tnapi->hw_status->idx[0].rx_producer,
4598                            tnapi->hw_status->idx[0].tx_consumer);
4599
4600                 netdev_err(tp->dev,
4601                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4602                            i,
4603                            tnapi->last_tag, tnapi->last_irq_tag,
4604                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4605                            tnapi->rx_rcb_ptr,
4606                            tnapi->prodring.rx_std_prod_idx,
4607                            tnapi->prodring.rx_std_cons_idx,
4608                            tnapi->prodring.rx_jmb_prod_idx,
4609                            tnapi->prodring.rx_jmb_cons_idx);
4610         }
4611 }
4612
4613 /* This is called whenever we suspect that the system chipset is re-
4614  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4615  * is bogus tx completions. We try to recover by setting the
4616  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4617  * in the workqueue.
4618  */
4619 static void tg3_tx_recover(struct tg3 *tp)
4620 {
4621         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4622                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4623
4624         netdev_warn(tp->dev,
4625                     "The system may be re-ordering memory-mapped I/O "
4626                     "cycles to the network device, attempting to recover. "
4627                     "Please report the problem to the driver maintainer "
4628                     "and include system chipset information.\n");
4629
4630         spin_lock(&tp->lock);
4631         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4632         spin_unlock(&tp->lock);
4633 }
4634
4635 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4636 {
4637         /* Tell compiler to fetch tx indices from memory. */
4638         barrier();
4639         return tnapi->tx_pending -
4640                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4641 }
4642
4643 /* Tigon3 never reports partial packet sends.  So we do not
4644  * need special logic to handle SKBs that have not had all
4645  * of their frags sent yet, like SunGEM does.
4646  */
4647 static void tg3_tx(struct tg3_napi *tnapi)
4648 {
4649         struct tg3 *tp = tnapi->tp;
4650         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4651         u32 sw_idx = tnapi->tx_cons;
4652         struct netdev_queue *txq;
4653         int index = tnapi - tp->napi;
4654
4655         if (tg3_flag(tp, ENABLE_TSS))
4656                 index--;
4657
4658         txq = netdev_get_tx_queue(tp->dev, index);
4659
4660         while (sw_idx != hw_idx) {
4661                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4662                 struct sk_buff *skb = ri->skb;
4663                 int i, tx_bug = 0;
4664
4665                 if (unlikely(skb == NULL)) {
4666                         tg3_tx_recover(tp);
4667                         return;
4668                 }
4669
4670                 pci_unmap_single(tp->pdev,
4671                                  dma_unmap_addr(ri, mapping),
4672                                  skb_headlen(skb),
4673                                  PCI_DMA_TODEVICE);
4674
4675                 ri->skb = NULL;
4676
4677                 sw_idx = NEXT_TX(sw_idx);
4678
4679                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4680                         ri = &tnapi->tx_buffers[sw_idx];
4681                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4682                                 tx_bug = 1;
4683
4684                         pci_unmap_page(tp->pdev,
4685                                        dma_unmap_addr(ri, mapping),
4686                                        skb_shinfo(skb)->frags[i].size,
4687                                        PCI_DMA_TODEVICE);
4688                         sw_idx = NEXT_TX(sw_idx);
4689                 }
4690
4691                 dev_kfree_skb(skb);
4692
4693                 if (unlikely(tx_bug)) {
4694                         tg3_tx_recover(tp);
4695                         return;
4696                 }
4697         }
4698
4699         tnapi->tx_cons = sw_idx;
4700
4701         /* Need to make the tx_cons update visible to tg3_start_xmit()
4702          * before checking for netif_queue_stopped().  Without the
4703          * memory barrier, there is a small possibility that tg3_start_xmit()
4704          * will miss it and cause the queue to be stopped forever.
4705          */
4706         smp_mb();
4707
4708         if (unlikely(netif_tx_queue_stopped(txq) &&
4709                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4710                 __netif_tx_lock(txq, smp_processor_id());
4711                 if (netif_tx_queue_stopped(txq) &&
4712                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4713                         netif_tx_wake_queue(txq);
4714                 __netif_tx_unlock(txq);
4715         }
4716 }
4717
4718 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4719 {
4720         if (!ri->skb)
4721                 return;
4722
4723         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4724                          map_sz, PCI_DMA_FROMDEVICE);
4725         dev_kfree_skb_any(ri->skb);
4726         ri->skb = NULL;
4727 }
4728
4729 /* Returns size of skb allocated or < 0 on error.
4730  *
4731  * We only need to fill in the address because the other members
4732  * of the RX descriptor are invariant, see tg3_init_rings.
4733  *
4734  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4735  * posting buffers we only dirty the first cache line of the RX
4736  * descriptor (containing the address).  Whereas for the RX status
4737  * buffers the cpu only reads the last cacheline of the RX descriptor
4738  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4739  */
4740 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4741                             u32 opaque_key, u32 dest_idx_unmasked)
4742 {
4743         struct tg3_rx_buffer_desc *desc;
4744         struct ring_info *map;
4745         struct sk_buff *skb;
4746         dma_addr_t mapping;
4747         int skb_size, dest_idx;
4748
4749         switch (opaque_key) {
4750         case RXD_OPAQUE_RING_STD:
4751                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4752                 desc = &tpr->rx_std[dest_idx];
4753                 map = &tpr->rx_std_buffers[dest_idx];
4754                 skb_size = tp->rx_pkt_map_sz;
4755                 break;
4756
4757         case RXD_OPAQUE_RING_JUMBO:
4758                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4759                 desc = &tpr->rx_jmb[dest_idx].std;
4760                 map = &tpr->rx_jmb_buffers[dest_idx];
4761                 skb_size = TG3_RX_JMB_MAP_SZ;
4762                 break;
4763
4764         default:
4765                 return -EINVAL;
4766         }
4767
4768         /* Do not overwrite any of the map or rp information
4769          * until we are sure we can commit to a new buffer.
4770          *
4771          * Callers depend upon this behavior and assume that
4772          * we leave everything unchanged if we fail.
4773          */
4774         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4775         if (skb == NULL)
4776                 return -ENOMEM;
4777
4778         skb_reserve(skb, tp->rx_offset);
4779
4780         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4781                                  PCI_DMA_FROMDEVICE);
4782         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4783                 dev_kfree_skb(skb);
4784                 return -EIO;
4785         }
4786
4787         map->skb = skb;
4788         dma_unmap_addr_set(map, mapping, mapping);
4789
4790         desc->addr_hi = ((u64)mapping >> 32);
4791         desc->addr_lo = ((u64)mapping & 0xffffffff);
4792
4793         return skb_size;
4794 }
4795
4796 /* We only need to move over in the address because the other
4797  * members of the RX descriptor are invariant.  See notes above
4798  * tg3_alloc_rx_skb for full details.
4799  */
4800 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4801                            struct tg3_rx_prodring_set *dpr,
4802                            u32 opaque_key, int src_idx,
4803                            u32 dest_idx_unmasked)
4804 {
4805         struct tg3 *tp = tnapi->tp;
4806         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4807         struct ring_info *src_map, *dest_map;
4808         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4809         int dest_idx;
4810
4811         switch (opaque_key) {
4812         case RXD_OPAQUE_RING_STD:
4813                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4814                 dest_desc = &dpr->rx_std[dest_idx];
4815                 dest_map = &dpr->rx_std_buffers[dest_idx];
4816                 src_desc = &spr->rx_std[src_idx];
4817                 src_map = &spr->rx_std_buffers[src_idx];
4818                 break;
4819
4820         case RXD_OPAQUE_RING_JUMBO:
4821                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4822                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4823                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4824                 src_desc = &spr->rx_jmb[src_idx].std;
4825                 src_map = &spr->rx_jmb_buffers[src_idx];
4826                 break;
4827
4828         default:
4829                 return;
4830         }
4831
4832         dest_map->skb = src_map->skb;
4833         dma_unmap_addr_set(dest_map, mapping,
4834                            dma_unmap_addr(src_map, mapping));
4835         dest_desc->addr_hi = src_desc->addr_hi;
4836         dest_desc->addr_lo = src_desc->addr_lo;
4837
4838         /* Ensure that the update to the skb happens after the physical
4839          * addresses have been transferred to the new BD location.
4840          */
4841         smp_wmb();
4842
4843         src_map->skb = NULL;
4844 }
4845
4846 /* The RX ring scheme is composed of multiple rings which post fresh
4847  * buffers to the chip, and one special ring the chip uses to report
4848  * status back to the host.
4849  *
4850  * The special ring reports the status of received packets to the
4851  * host.  The chip does not write into the original descriptor the
4852  * RX buffer was obtained from.  The chip simply takes the original
4853  * descriptor as provided by the host, updates the status and length
4854  * field, then writes this into the next status ring entry.
4855  *
4856  * Each ring the host uses to post buffers to the chip is described
4857  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4858  * it is first placed into the on-chip ram.  When the packet's length
4859  * is known, it walks down the TG3_BDINFO entries to select the ring.
4860  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4861  * which is within the range of the new packet's length is chosen.
4862  *
4863  * The "separate ring for rx status" scheme may sound queer, but it makes
4864  * sense from a cache coherency perspective.  If only the host writes
4865  * to the buffer post rings, and only the chip writes to the rx status
4866  * rings, then cache lines never move beyond shared-modified state.
4867  * If both the host and chip were to write into the same ring, cache line
4868  * eviction could occur since both entities want it in an exclusive state.
4869  */
4870 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4871 {
4872         struct tg3 *tp = tnapi->tp;
4873         u32 work_mask, rx_std_posted = 0;
4874         u32 std_prod_idx, jmb_prod_idx;
4875         u32 sw_idx = tnapi->rx_rcb_ptr;
4876         u16 hw_idx;
4877         int received;
4878         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4879
4880         hw_idx = *(tnapi->rx_rcb_prod_idx);
4881         /*
4882          * We need to order the read of hw_idx and the read of
4883          * the opaque cookie.
4884          */
4885         rmb();
4886         work_mask = 0;
4887         received = 0;
4888         std_prod_idx = tpr->rx_std_prod_idx;
4889         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4890         while (sw_idx != hw_idx && budget > 0) {
4891                 struct ring_info *ri;
4892                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4893                 unsigned int len;
4894                 struct sk_buff *skb;
4895                 dma_addr_t dma_addr;
4896                 u32 opaque_key, desc_idx, *post_ptr;
4897
4898                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4899                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4900                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4901                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4902                         dma_addr = dma_unmap_addr(ri, mapping);
4903                         skb = ri->skb;
4904                         post_ptr = &std_prod_idx;
4905                         rx_std_posted++;
4906                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4907                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4908                         dma_addr = dma_unmap_addr(ri, mapping);
4909                         skb = ri->skb;
4910                         post_ptr = &jmb_prod_idx;
4911                 } else
4912                         goto next_pkt_nopost;
4913
4914                 work_mask |= opaque_key;
4915
4916                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4917                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4918                 drop_it:
4919                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4920                                        desc_idx, *post_ptr);
4921                 drop_it_no_recycle:
4922                         /* Other statistics kept track of by card. */
4923                         tp->rx_dropped++;
4924                         goto next_pkt;
4925                 }
4926
4927                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4928                       ETH_FCS_LEN;
4929
4930                 if (len > TG3_RX_COPY_THRESH(tp)) {
4931                         int skb_size;
4932
4933                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4934                                                     *post_ptr);
4935                         if (skb_size < 0)
4936                                 goto drop_it;
4937
4938                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4939                                          PCI_DMA_FROMDEVICE);
4940
4941                         /* Ensure that the update to the skb happens
4942                          * after the usage of the old DMA mapping.
4943                          */
4944                         smp_wmb();
4945
4946                         ri->skb = NULL;
4947
4948                         skb_put(skb, len);
4949                 } else {
4950                         struct sk_buff *copy_skb;
4951
4952                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4953                                        desc_idx, *post_ptr);
4954
4955                         copy_skb = netdev_alloc_skb(tp->dev, len +
4956                                                     TG3_RAW_IP_ALIGN);
4957                         if (copy_skb == NULL)
4958                                 goto drop_it_no_recycle;
4959
4960                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4961                         skb_put(copy_skb, len);
4962                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4963                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4964                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4965
4966                         /* We'll reuse the original ring buffer. */
4967                         skb = copy_skb;
4968                 }
4969
4970                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4971                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4972                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4973                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4974                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4975                 else
4976                         skb_checksum_none_assert(skb);
4977
4978                 skb->protocol = eth_type_trans(skb, tp->dev);
4979
4980                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4981                     skb->protocol != htons(ETH_P_8021Q)) {
4982                         dev_kfree_skb(skb);
4983                         goto drop_it_no_recycle;
4984                 }
4985
4986                 if (desc->type_flags & RXD_FLAG_VLAN &&
4987                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4988                         __vlan_hwaccel_put_tag(skb,
4989                                                desc->err_vlan & RXD_VLAN_MASK);
4990
4991                 napi_gro_receive(&tnapi->napi, skb);
4992
4993                 received++;
4994                 budget--;
4995
4996 next_pkt:
4997                 (*post_ptr)++;
4998
4999                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5000                         tpr->rx_std_prod_idx = std_prod_idx &
5001                                                tp->rx_std_ring_mask;
5002                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5003                                      tpr->rx_std_prod_idx);
5004                         work_mask &= ~RXD_OPAQUE_RING_STD;
5005                         rx_std_posted = 0;
5006                 }
5007 next_pkt_nopost:
5008                 sw_idx++;
5009                 sw_idx &= tp->rx_ret_ring_mask;
5010
5011                 /* Refresh hw_idx to see if there is new work */
5012                 if (sw_idx == hw_idx) {
5013                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5014                         rmb();
5015                 }
5016         }
5017
5018         /* ACK the status ring. */
5019         tnapi->rx_rcb_ptr = sw_idx;
5020         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5021
5022         /* Refill RX ring(s). */
5023         if (!tg3_flag(tp, ENABLE_RSS)) {
5024                 if (work_mask & RXD_OPAQUE_RING_STD) {
5025                         tpr->rx_std_prod_idx = std_prod_idx &
5026                                                tp->rx_std_ring_mask;
5027                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5028                                      tpr->rx_std_prod_idx);
5029                 }
5030                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5031                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5032                                                tp->rx_jmb_ring_mask;
5033                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5034                                      tpr->rx_jmb_prod_idx);
5035                 }
5036                 mmiowb();
5037         } else if (work_mask) {
5038                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5039                  * updated before the producer indices can be updated.
5040                  */
5041                 smp_wmb();
5042
5043                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5044                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5045
5046                 if (tnapi != &tp->napi[1])
5047                         napi_schedule(&tp->napi[1].napi);
5048         }
5049
5050         return received;
5051 }
5052
5053 static void tg3_poll_link(struct tg3 *tp)
5054 {
5055         /* handle link change and other phy events */
5056         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5057                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5058
5059                 if (sblk->status & SD_STATUS_LINK_CHG) {
5060                         sblk->status = SD_STATUS_UPDATED |
5061                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5062                         spin_lock(&tp->lock);
5063                         if (tg3_flag(tp, USE_PHYLIB)) {
5064                                 tw32_f(MAC_STATUS,
5065                                      (MAC_STATUS_SYNC_CHANGED |
5066                                       MAC_STATUS_CFG_CHANGED |
5067                                       MAC_STATUS_MI_COMPLETION |
5068                                       MAC_STATUS_LNKSTATE_CHANGED));
5069                                 udelay(40);
5070                         } else
5071                                 tg3_setup_phy(tp, 0);
5072                         spin_unlock(&tp->lock);
5073                 }
5074         }
5075 }
5076
5077 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5078                                 struct tg3_rx_prodring_set *dpr,
5079                                 struct tg3_rx_prodring_set *spr)
5080 {
5081         u32 si, di, cpycnt, src_prod_idx;
5082         int i, err = 0;
5083
5084         while (1) {
5085                 src_prod_idx = spr->rx_std_prod_idx;
5086
5087                 /* Make sure updates to the rx_std_buffers[] entries and the
5088                  * standard producer index are seen in the correct order.
5089                  */
5090                 smp_rmb();
5091
5092                 if (spr->rx_std_cons_idx == src_prod_idx)
5093                         break;
5094
5095                 if (spr->rx_std_cons_idx < src_prod_idx)
5096                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5097                 else
5098                         cpycnt = tp->rx_std_ring_mask + 1 -
5099                                  spr->rx_std_cons_idx;
5100
5101                 cpycnt = min(cpycnt,
5102                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5103
5104                 si = spr->rx_std_cons_idx;
5105                 di = dpr->rx_std_prod_idx;
5106
5107                 for (i = di; i < di + cpycnt; i++) {
5108                         if (dpr->rx_std_buffers[i].skb) {
5109                                 cpycnt = i - di;
5110                                 err = -ENOSPC;
5111                                 break;
5112                         }
5113                 }
5114
5115                 if (!cpycnt)
5116                         break;
5117
5118                 /* Ensure that updates to the rx_std_buffers ring and the
5119                  * shadowed hardware producer ring from tg3_recycle_skb() are
5120                  * ordered correctly WRT the skb check above.
5121                  */
5122                 smp_rmb();
5123
5124                 memcpy(&dpr->rx_std_buffers[di],
5125                        &spr->rx_std_buffers[si],
5126                        cpycnt * sizeof(struct ring_info));
5127
5128                 for (i = 0; i < cpycnt; i++, di++, si++) {
5129                         struct tg3_rx_buffer_desc *sbd, *dbd;
5130                         sbd = &spr->rx_std[si];
5131                         dbd = &dpr->rx_std[di];
5132                         dbd->addr_hi = sbd->addr_hi;
5133                         dbd->addr_lo = sbd->addr_lo;
5134                 }
5135
5136                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5137                                        tp->rx_std_ring_mask;
5138                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5139                                        tp->rx_std_ring_mask;
5140         }
5141
5142         while (1) {
5143                 src_prod_idx = spr->rx_jmb_prod_idx;
5144
5145                 /* Make sure updates to the rx_jmb_buffers[] entries and
5146                  * the jumbo producer index are seen in the correct order.
5147                  */
5148                 smp_rmb();
5149
5150                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5151                         break;
5152
5153                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5154                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5155                 else
5156                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5157                                  spr->rx_jmb_cons_idx;
5158
5159                 cpycnt = min(cpycnt,
5160                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5161
5162                 si = spr->rx_jmb_cons_idx;
5163                 di = dpr->rx_jmb_prod_idx;
5164
5165                 for (i = di; i < di + cpycnt; i++) {
5166                         if (dpr->rx_jmb_buffers[i].skb) {
5167                                 cpycnt = i - di;
5168                                 err = -ENOSPC;
5169                                 break;
5170                         }
5171                 }
5172
5173                 if (!cpycnt)
5174                         break;
5175
5176                 /* Ensure that updates to the rx_jmb_buffers ring and the
5177                  * shadowed hardware producer ring from tg3_recycle_skb() are
5178                  * ordered correctly WRT the skb check above.
5179                  */
5180                 smp_rmb();
5181
5182                 memcpy(&dpr->rx_jmb_buffers[di],
5183                        &spr->rx_jmb_buffers[si],
5184                        cpycnt * sizeof(struct ring_info));
5185
5186                 for (i = 0; i < cpycnt; i++, di++, si++) {
5187                         struct tg3_rx_buffer_desc *sbd, *dbd;
5188                         sbd = &spr->rx_jmb[si].std;
5189                         dbd = &dpr->rx_jmb[di].std;
5190                         dbd->addr_hi = sbd->addr_hi;
5191                         dbd->addr_lo = sbd->addr_lo;
5192                 }
5193
5194                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5195                                        tp->rx_jmb_ring_mask;
5196                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5197                                        tp->rx_jmb_ring_mask;
5198         }
5199
5200         return err;
5201 }
5202
5203 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5204 {
5205         struct tg3 *tp = tnapi->tp;
5206
5207         /* run TX completion thread */
5208         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5209                 tg3_tx(tnapi);
5210                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5211                         return work_done;
5212         }
5213
5214         /* run RX thread, within the bounds set by NAPI.
5215          * All RX "locking" is done by ensuring outside
5216          * code synchronizes with tg3->napi.poll()
5217          */
5218         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5219                 work_done += tg3_rx(tnapi, budget - work_done);
5220
5221         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5222                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5223                 int i, err = 0;
5224                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5225                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5226
5227                 for (i = 1; i < tp->irq_cnt; i++)
5228                         err |= tg3_rx_prodring_xfer(tp, dpr,
5229                                                     &tp->napi[i].prodring);
5230
5231                 wmb();
5232
5233                 if (std_prod_idx != dpr->rx_std_prod_idx)
5234                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5235                                      dpr->rx_std_prod_idx);
5236
5237                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5238                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5239                                      dpr->rx_jmb_prod_idx);
5240
5241                 mmiowb();
5242
5243                 if (err)
5244                         tw32_f(HOSTCC_MODE, tp->coal_now);
5245         }
5246
5247         return work_done;
5248 }
5249
5250 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5251 {
5252         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5253         struct tg3 *tp = tnapi->tp;
5254         int work_done = 0;
5255         struct tg3_hw_status *sblk = tnapi->hw_status;
5256
5257         while (1) {
5258                 work_done = tg3_poll_work(tnapi, work_done, budget);
5259
5260                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5261                         goto tx_recovery;
5262
5263                 if (unlikely(work_done >= budget))
5264                         break;
5265
5266                 /* tp->last_tag is used in tg3_int_reenable() below
5267                  * to tell the hw how much work has been processed,
5268                  * so we must read it before checking for more work.
5269                  */
5270                 tnapi->last_tag = sblk->status_tag;
5271                 tnapi->last_irq_tag = tnapi->last_tag;
5272                 rmb();
5273
5274                 /* check for RX/TX work to do */
5275                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5276                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5277                         napi_complete(napi);
5278                         /* Reenable interrupts. */
5279                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5280                         mmiowb();
5281                         break;
5282                 }
5283         }
5284
5285         return work_done;
5286
5287 tx_recovery:
5288         /* work_done is guaranteed to be less than budget. */
5289         napi_complete(napi);
5290         schedule_work(&tp->reset_task);
5291         return work_done;
5292 }
5293
5294 static void tg3_process_error(struct tg3 *tp)
5295 {
5296         u32 val;
5297         bool real_error = false;
5298
5299         if (tg3_flag(tp, ERROR_PROCESSED))
5300                 return;
5301
5302         /* Check Flow Attention register */
5303         val = tr32(HOSTCC_FLOW_ATTN);
5304         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5305                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5306                 real_error = true;
5307         }
5308
5309         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5310                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5311                 real_error = true;
5312         }
5313
5314         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5315                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5316                 real_error = true;
5317         }
5318
5319         if (!real_error)
5320                 return;
5321
5322         tg3_dump_state(tp);
5323
5324         tg3_flag_set(tp, ERROR_PROCESSED);
5325         schedule_work(&tp->reset_task);
5326 }
5327
5328 static int tg3_poll(struct napi_struct *napi, int budget)
5329 {
5330         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5331         struct tg3 *tp = tnapi->tp;
5332         int work_done = 0;
5333         struct tg3_hw_status *sblk = tnapi->hw_status;
5334
5335         while (1) {
5336                 if (sblk->status & SD_STATUS_ERROR)
5337                         tg3_process_error(tp);
5338
5339                 tg3_poll_link(tp);
5340
5341                 work_done = tg3_poll_work(tnapi, work_done, budget);
5342
5343                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5344                         goto tx_recovery;
5345
5346                 if (unlikely(work_done >= budget))
5347                         break;
5348
5349                 if (tg3_flag(tp, TAGGED_STATUS)) {
5350                         /* tp->last_tag is used in tg3_int_reenable() below
5351                          * to tell the hw how much work has been processed,
5352                          * so we must read it before checking for more work.
5353                          */
5354                         tnapi->last_tag = sblk->status_tag;
5355                         tnapi->last_irq_tag = tnapi->last_tag;
5356                         rmb();
5357                 } else
5358                         sblk->status &= ~SD_STATUS_UPDATED;
5359
5360                 if (likely(!tg3_has_work(tnapi))) {
5361                         napi_complete(napi);
5362                         tg3_int_reenable(tnapi);
5363                         break;
5364                 }
5365         }
5366
5367         return work_done;
5368
5369 tx_recovery:
5370         /* work_done is guaranteed to be less than budget. */
5371         napi_complete(napi);
5372         schedule_work(&tp->reset_task);
5373         return work_done;
5374 }
5375
5376 static void tg3_napi_disable(struct tg3 *tp)
5377 {
5378         int i;
5379
5380         for (i = tp->irq_cnt - 1; i >= 0; i--)
5381                 napi_disable(&tp->napi[i].napi);
5382 }
5383
5384 static void tg3_napi_enable(struct tg3 *tp)
5385 {
5386         int i;
5387
5388         for (i = 0; i < tp->irq_cnt; i++)
5389                 napi_enable(&tp->napi[i].napi);
5390 }
5391
5392 static void tg3_napi_init(struct tg3 *tp)
5393 {
5394         int i;
5395
5396         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5397         for (i = 1; i < tp->irq_cnt; i++)
5398                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5399 }
5400
5401 static void tg3_napi_fini(struct tg3 *tp)
5402 {
5403         int i;
5404
5405         for (i = 0; i < tp->irq_cnt; i++)
5406                 netif_napi_del(&tp->napi[i].napi);
5407 }
5408
5409 static inline void tg3_netif_stop(struct tg3 *tp)
5410 {
5411         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5412         tg3_napi_disable(tp);
5413         netif_tx_disable(tp->dev);
5414 }
5415
5416 static inline void tg3_netif_start(struct tg3 *tp)
5417 {
5418         /* NOTE: unconditional netif_tx_wake_all_queues is only
5419          * appropriate so long as all callers are assured to
5420          * have free tx slots (such as after tg3_init_hw)
5421          */
5422         netif_tx_wake_all_queues(tp->dev);
5423
5424         tg3_napi_enable(tp);
5425         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5426         tg3_enable_ints(tp);
5427 }
5428
5429 static void tg3_irq_quiesce(struct tg3 *tp)
5430 {
5431         int i;
5432
5433         BUG_ON(tp->irq_sync);
5434
5435         tp->irq_sync = 1;
5436         smp_mb();
5437
5438         for (i = 0; i < tp->irq_cnt; i++)
5439                 synchronize_irq(tp->napi[i].irq_vec);
5440 }
5441
5442 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5443  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5444  * with as well.  Most of the time, this is not necessary except when
5445  * shutting down the device.
5446  */
5447 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5448 {
5449         spin_lock_bh(&tp->lock);
5450         if (irq_sync)
5451                 tg3_irq_quiesce(tp);
5452 }
5453
5454 static inline void tg3_full_unlock(struct tg3 *tp)
5455 {
5456         spin_unlock_bh(&tp->lock);
5457 }
5458
5459 /* One-shot MSI handler - Chip automatically disables interrupt
5460  * after sending MSI so driver doesn't have to do it.
5461  */
5462 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5463 {
5464         struct tg3_napi *tnapi = dev_id;
5465         struct tg3 *tp = tnapi->tp;
5466
5467         prefetch(tnapi->hw_status);
5468         if (tnapi->rx_rcb)
5469                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5470
5471         if (likely(!tg3_irq_sync(tp)))
5472                 napi_schedule(&tnapi->napi);
5473
5474         return IRQ_HANDLED;
5475 }
5476
5477 /* MSI ISR - No need to check for interrupt sharing and no need to
5478  * flush status block and interrupt mailbox. PCI ordering rules
5479  * guarantee that MSI will arrive after the status block.
5480  */
5481 static irqreturn_t tg3_msi(int irq, void *dev_id)
5482 {
5483         struct tg3_napi *tnapi = dev_id;
5484         struct tg3 *tp = tnapi->tp;
5485
5486         prefetch(tnapi->hw_status);
5487         if (tnapi->rx_rcb)
5488                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5489         /*
5490          * Writing any value to intr-mbox-0 clears PCI INTA# and
5491          * chip-internal interrupt pending events.
5492          * Writing non-zero to intr-mbox-0 additional tells the
5493          * NIC to stop sending us irqs, engaging "in-intr-handler"
5494          * event coalescing.
5495          */
5496         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5497         if (likely(!tg3_irq_sync(tp)))
5498                 napi_schedule(&tnapi->napi);
5499
5500         return IRQ_RETVAL(1);
5501 }
5502
5503 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5504 {
5505         struct tg3_napi *tnapi = dev_id;
5506         struct tg3 *tp = tnapi->tp;
5507         struct tg3_hw_status *sblk = tnapi->hw_status;
5508         unsigned int handled = 1;
5509
5510         /* In INTx mode, it is possible for the interrupt to arrive at
5511          * the CPU before the status block posted prior to the interrupt.
5512          * Reading the PCI State register will confirm whether the
5513          * interrupt is ours and will flush the status block.
5514          */
5515         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5516                 if (tg3_flag(tp, CHIP_RESETTING) ||
5517                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5518                         handled = 0;
5519                         goto out;
5520                 }
5521         }
5522
5523         /*
5524          * Writing any value to intr-mbox-0 clears PCI INTA# and
5525          * chip-internal interrupt pending events.
5526          * Writing non-zero to intr-mbox-0 additional tells the
5527          * NIC to stop sending us irqs, engaging "in-intr-handler"
5528          * event coalescing.
5529          *
5530          * Flush the mailbox to de-assert the IRQ immediately to prevent
5531          * spurious interrupts.  The flush impacts performance but
5532          * excessive spurious interrupts can be worse in some cases.
5533          */
5534         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5535         if (tg3_irq_sync(tp))
5536                 goto out;
5537         sblk->status &= ~SD_STATUS_UPDATED;
5538         if (likely(tg3_has_work(tnapi))) {
5539                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5540                 napi_schedule(&tnapi->napi);
5541         } else {
5542                 /* No work, shared interrupt perhaps?  re-enable
5543                  * interrupts, and flush that PCI write
5544                  */
5545                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5546                                0x00000000);
5547         }
5548 out:
5549         return IRQ_RETVAL(handled);
5550 }
5551
5552 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5553 {
5554         struct tg3_napi *tnapi = dev_id;
5555         struct tg3 *tp = tnapi->tp;
5556         struct tg3_hw_status *sblk = tnapi->hw_status;
5557         unsigned int handled = 1;
5558
5559         /* In INTx mode, it is possible for the interrupt to arrive at
5560          * the CPU before the status block posted prior to the interrupt.
5561          * Reading the PCI State register will confirm whether the
5562          * interrupt is ours and will flush the status block.
5563          */
5564         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5565                 if (tg3_flag(tp, CHIP_RESETTING) ||
5566                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5567                         handled = 0;
5568                         goto out;
5569                 }
5570         }
5571
5572         /*
5573          * writing any value to intr-mbox-0 clears PCI INTA# and
5574          * chip-internal interrupt pending events.
5575          * writing non-zero to intr-mbox-0 additional tells the
5576          * NIC to stop sending us irqs, engaging "in-intr-handler"
5577          * event coalescing.
5578          *
5579          * Flush the mailbox to de-assert the IRQ immediately to prevent
5580          * spurious interrupts.  The flush impacts performance but
5581          * excessive spurious interrupts can be worse in some cases.
5582          */
5583         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5584
5585         /*
5586          * In a shared interrupt configuration, sometimes other devices'
5587          * interrupts will scream.  We record the current status tag here
5588          * so that the above check can report that the screaming interrupts
5589          * are unhandled.  Eventually they will be silenced.
5590          */
5591         tnapi->last_irq_tag = sblk->status_tag;
5592
5593         if (tg3_irq_sync(tp))
5594                 goto out;
5595
5596         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5597
5598         napi_schedule(&tnapi->napi);
5599
5600 out:
5601         return IRQ_RETVAL(handled);
5602 }
5603
5604 /* ISR for interrupt test */
5605 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5606 {
5607         struct tg3_napi *tnapi = dev_id;
5608         struct tg3 *tp = tnapi->tp;
5609         struct tg3_hw_status *sblk = tnapi->hw_status;
5610
5611         if ((sblk->status & SD_STATUS_UPDATED) ||
5612             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5613                 tg3_disable_ints(tp);
5614                 return IRQ_RETVAL(1);
5615         }
5616         return IRQ_RETVAL(0);
5617 }
5618
5619 static int tg3_init_hw(struct tg3 *, int);
5620 static int tg3_halt(struct tg3 *, int, int);
5621
5622 /* Restart hardware after configuration changes, self-test, etc.
5623  * Invoked with tp->lock held.
5624  */
5625 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5626         __releases(tp->lock)
5627         __acquires(tp->lock)
5628 {
5629         int err;
5630
5631         err = tg3_init_hw(tp, reset_phy);
5632         if (err) {
5633                 netdev_err(tp->dev,
5634                            "Failed to re-initialize device, aborting\n");
5635                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5636                 tg3_full_unlock(tp);
5637                 del_timer_sync(&tp->timer);
5638                 tp->irq_sync = 0;
5639                 tg3_napi_enable(tp);
5640                 dev_close(tp->dev);
5641                 tg3_full_lock(tp, 0);
5642         }
5643         return err;
5644 }
5645
5646 #ifdef CONFIG_NET_POLL_CONTROLLER
5647 static void tg3_poll_controller(struct net_device *dev)
5648 {
5649         int i;
5650         struct tg3 *tp = netdev_priv(dev);
5651
5652         for (i = 0; i < tp->irq_cnt; i++)
5653                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5654 }
5655 #endif
5656
5657 static void tg3_reset_task(struct work_struct *work)
5658 {
5659         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5660         int err;
5661         unsigned int restart_timer;
5662
5663         tg3_full_lock(tp, 0);
5664
5665         if (!netif_running(tp->dev)) {
5666                 tg3_full_unlock(tp);
5667                 return;
5668         }
5669
5670         tg3_full_unlock(tp);
5671
5672         tg3_phy_stop(tp);
5673
5674         tg3_netif_stop(tp);
5675
5676         tg3_full_lock(tp, 1);
5677
5678         restart_timer = tg3_flag(tp, RESTART_TIMER);
5679         tg3_flag_clear(tp, RESTART_TIMER);
5680
5681         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5682                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5683                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5684                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5685                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5686         }
5687
5688         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5689         err = tg3_init_hw(tp, 1);
5690         if (err)
5691                 goto out;
5692
5693         tg3_netif_start(tp);
5694
5695         if (restart_timer)
5696                 mod_timer(&tp->timer, jiffies + 1);
5697
5698 out:
5699         tg3_full_unlock(tp);
5700
5701         if (!err)
5702                 tg3_phy_start(tp);
5703 }
5704
5705 static void tg3_tx_timeout(struct net_device *dev)
5706 {
5707         struct tg3 *tp = netdev_priv(dev);
5708
5709         if (netif_msg_tx_err(tp)) {
5710                 netdev_err(dev, "transmit timed out, resetting\n");
5711                 tg3_dump_state(tp);
5712         }
5713
5714         schedule_work(&tp->reset_task);
5715 }
5716
5717 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5718 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5719 {
5720         u32 base = (u32) mapping & 0xffffffff;
5721
5722         return (base > 0xffffdcc0) && (base + len + 8 < base);
5723 }
5724
5725 /* Test for DMA addresses > 40-bit */
5726 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5727                                           int len)
5728 {
5729 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5730         if (tg3_flag(tp, 40BIT_DMA_BUG))
5731                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5732         return 0;
5733 #else
5734         return 0;
5735 #endif
5736 }
5737
5738 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5739                         dma_addr_t mapping, int len, u32 flags,
5740                         u32 mss_and_is_end)
5741 {
5742         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5743         int is_end = (mss_and_is_end & 0x1);
5744         u32 mss = (mss_and_is_end >> 1);
5745         u32 vlan_tag = 0;
5746
5747         if (is_end)
5748                 flags |= TXD_FLAG_END;
5749         if (flags & TXD_FLAG_VLAN) {
5750                 vlan_tag = flags >> 16;
5751                 flags &= 0xffff;
5752         }
5753         vlan_tag |= (mss << TXD_MSS_SHIFT);
5754
5755         txd->addr_hi = ((u64) mapping >> 32);
5756         txd->addr_lo = ((u64) mapping & 0xffffffff);
5757         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5758         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5759 }
5760
5761 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5762 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5763                                        struct sk_buff *skb, u32 last_plus_one,
5764                                        u32 *start, u32 base_flags, u32 mss)
5765 {
5766         struct tg3 *tp = tnapi->tp;
5767         struct sk_buff *new_skb;
5768         dma_addr_t new_addr = 0;
5769         u32 entry = *start;
5770         int i, ret = 0;
5771
5772         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5773                 new_skb = skb_copy(skb, GFP_ATOMIC);
5774         else {
5775                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5776
5777                 new_skb = skb_copy_expand(skb,
5778                                           skb_headroom(skb) + more_headroom,
5779                                           skb_tailroom(skb), GFP_ATOMIC);
5780         }
5781
5782         if (!new_skb) {
5783                 ret = -1;
5784         } else {
5785                 /* New SKB is guaranteed to be linear. */
5786                 entry = *start;
5787                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5788                                           PCI_DMA_TODEVICE);
5789                 /* Make sure the mapping succeeded */
5790                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5791                         ret = -1;
5792                         dev_kfree_skb(new_skb);
5793                         new_skb = NULL;
5794
5795                 /* Make sure new skb does not cross any 4G boundaries.
5796                  * Drop the packet if it does.
5797                  */
5798                 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5799                            tg3_4g_overflow_test(new_addr, new_skb->len)) {
5800                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5801                                          PCI_DMA_TODEVICE);
5802                         ret = -1;
5803                         dev_kfree_skb(new_skb);
5804                         new_skb = NULL;
5805                 } else {
5806                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5807                                     base_flags, 1 | (mss << 1));
5808                         *start = NEXT_TX(entry);
5809                 }
5810         }
5811
5812         /* Now clean up the sw ring entries. */
5813         i = 0;
5814         while (entry != last_plus_one) {
5815                 int len;
5816
5817                 if (i == 0)
5818                         len = skb_headlen(skb);
5819                 else
5820                         len = skb_shinfo(skb)->frags[i-1].size;
5821
5822                 pci_unmap_single(tp->pdev,
5823                                  dma_unmap_addr(&tnapi->tx_buffers[entry],
5824                                                 mapping),
5825                                  len, PCI_DMA_TODEVICE);
5826                 if (i == 0) {
5827                         tnapi->tx_buffers[entry].skb = new_skb;
5828                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5829                                            new_addr);
5830                 } else {
5831                         tnapi->tx_buffers[entry].skb = NULL;
5832                 }
5833                 entry = NEXT_TX(entry);
5834                 i++;
5835         }
5836
5837         dev_kfree_skb(skb);
5838
5839         return ret;
5840 }
5841
5842 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5843
5844 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5845  * TSO header is greater than 80 bytes.
5846  */
5847 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5848 {
5849         struct sk_buff *segs, *nskb;
5850         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5851
5852         /* Estimate the number of fragments in the worst case */
5853         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5854                 netif_stop_queue(tp->dev);
5855
5856                 /* netif_tx_stop_queue() must be done before checking
5857                  * checking tx index in tg3_tx_avail() below, because in
5858                  * tg3_tx(), we update tx index before checking for
5859                  * netif_tx_queue_stopped().
5860                  */
5861                 smp_mb();
5862                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5863                         return NETDEV_TX_BUSY;
5864
5865                 netif_wake_queue(tp->dev);
5866         }
5867
5868         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5869         if (IS_ERR(segs))
5870                 goto tg3_tso_bug_end;
5871
5872         do {
5873                 nskb = segs;
5874                 segs = segs->next;
5875                 nskb->next = NULL;
5876                 tg3_start_xmit(nskb, tp->dev);
5877         } while (segs);
5878
5879 tg3_tso_bug_end:
5880         dev_kfree_skb(skb);
5881
5882         return NETDEV_TX_OK;
5883 }
5884
5885 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5886  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5887  */
5888 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5889 {
5890         struct tg3 *tp = netdev_priv(dev);
5891         u32 len, entry, base_flags, mss;
5892         int would_hit_hwbug;
5893         dma_addr_t mapping;
5894         struct tg3_napi *tnapi;
5895         struct netdev_queue *txq;
5896         unsigned int i, last;
5897
5898         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5899         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5900         if (tg3_flag(tp, ENABLE_TSS))
5901                 tnapi++;
5902
5903         /* We are running in BH disabled context with netif_tx_lock
5904          * and TX reclaim runs via tp->napi.poll inside of a software
5905          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5906          * no IRQ context deadlocks to worry about either.  Rejoice!
5907          */
5908         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5909                 if (!netif_tx_queue_stopped(txq)) {
5910                         netif_tx_stop_queue(txq);
5911
5912                         /* This is a hard error, log it. */
5913                         netdev_err(dev,
5914                                    "BUG! Tx Ring full when queue awake!\n");
5915                 }
5916                 return NETDEV_TX_BUSY;
5917         }
5918
5919         entry = tnapi->tx_prod;
5920         base_flags = 0;
5921         if (skb->ip_summed == CHECKSUM_PARTIAL)
5922                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5923
5924         mss = skb_shinfo(skb)->gso_size;
5925         if (mss) {
5926                 struct iphdr *iph;
5927                 u32 tcp_opt_len, hdr_len;
5928
5929                 if (skb_header_cloned(skb) &&
5930                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5931                         dev_kfree_skb(skb);
5932                         goto out_unlock;
5933                 }
5934
5935                 iph = ip_hdr(skb);
5936                 tcp_opt_len = tcp_optlen(skb);
5937
5938                 if (skb_is_gso_v6(skb)) {
5939                         hdr_len = skb_headlen(skb) - ETH_HLEN;
5940                 } else {
5941                         u32 ip_tcp_len;
5942
5943                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5944                         hdr_len = ip_tcp_len + tcp_opt_len;
5945
5946                         iph->check = 0;
5947                         iph->tot_len = htons(mss + hdr_len);
5948                 }
5949
5950                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5951                     tg3_flag(tp, TSO_BUG))
5952                         return tg3_tso_bug(tp, skb);
5953
5954                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5955                                TXD_FLAG_CPU_POST_DMA);
5956
5957                 if (tg3_flag(tp, HW_TSO_1) ||
5958                     tg3_flag(tp, HW_TSO_2) ||
5959                     tg3_flag(tp, HW_TSO_3)) {
5960                         tcp_hdr(skb)->check = 0;
5961                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5962                 } else
5963                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5964                                                                  iph->daddr, 0,
5965                                                                  IPPROTO_TCP,
5966                                                                  0);
5967
5968                 if (tg3_flag(tp, HW_TSO_3)) {
5969                         mss |= (hdr_len & 0xc) << 12;
5970                         if (hdr_len & 0x10)
5971                                 base_flags |= 0x00000010;
5972                         base_flags |= (hdr_len & 0x3e0) << 5;
5973                 } else if (tg3_flag(tp, HW_TSO_2))
5974                         mss |= hdr_len << 9;
5975                 else if (tg3_flag(tp, HW_TSO_1) ||
5976                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5977                         if (tcp_opt_len || iph->ihl > 5) {
5978                                 int tsflags;
5979
5980                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5981                                 mss |= (tsflags << 11);
5982                         }
5983                 } else {
5984                         if (tcp_opt_len || iph->ihl > 5) {
5985                                 int tsflags;
5986
5987                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5988                                 base_flags |= tsflags << 12;
5989                         }
5990                 }
5991         }
5992
5993         if (vlan_tx_tag_present(skb))
5994                 base_flags |= (TXD_FLAG_VLAN |
5995                                (vlan_tx_tag_get(skb) << 16));
5996
5997         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
5998             !mss && skb->len > VLAN_ETH_FRAME_LEN)
5999                 base_flags |= TXD_FLAG_JMB_PKT;
6000
6001         len = skb_headlen(skb);
6002
6003         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6004         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6005                 dev_kfree_skb(skb);
6006                 goto out_unlock;
6007         }
6008
6009         tnapi->tx_buffers[entry].skb = skb;
6010         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6011
6012         would_hit_hwbug = 0;
6013
6014         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6015                 would_hit_hwbug = 1;
6016
6017         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6018             tg3_4g_overflow_test(mapping, len))
6019                 would_hit_hwbug = 1;
6020
6021         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6022             tg3_40bit_overflow_test(tp, mapping, len))
6023                 would_hit_hwbug = 1;
6024
6025         if (tg3_flag(tp, 5701_DMA_BUG))
6026                 would_hit_hwbug = 1;
6027
6028         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6029                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6030
6031         entry = NEXT_TX(entry);
6032
6033         /* Now loop through additional data fragments, and queue them. */
6034         if (skb_shinfo(skb)->nr_frags > 0) {
6035                 last = skb_shinfo(skb)->nr_frags - 1;
6036                 for (i = 0; i <= last; i++) {
6037                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6038
6039                         len = frag->size;
6040                         mapping = pci_map_page(tp->pdev,
6041                                                frag->page,
6042                                                frag->page_offset,
6043                                                len, PCI_DMA_TODEVICE);
6044
6045                         tnapi->tx_buffers[entry].skb = NULL;
6046                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6047                                            mapping);
6048                         if (pci_dma_mapping_error(tp->pdev, mapping))
6049                                 goto dma_error;
6050
6051                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6052                             len <= 8)
6053                                 would_hit_hwbug = 1;
6054
6055                         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6056                             tg3_4g_overflow_test(mapping, len))
6057                                 would_hit_hwbug = 1;
6058
6059                         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6060                             tg3_40bit_overflow_test(tp, mapping, len))
6061                                 would_hit_hwbug = 1;
6062
6063                         if (tg3_flag(tp, HW_TSO_1) ||
6064                             tg3_flag(tp, HW_TSO_2) ||
6065                             tg3_flag(tp, HW_TSO_3))
6066                                 tg3_set_txd(tnapi, entry, mapping, len,
6067                                             base_flags, (i == last)|(mss << 1));
6068                         else
6069                                 tg3_set_txd(tnapi, entry, mapping, len,
6070                                             base_flags, (i == last));
6071
6072                         entry = NEXT_TX(entry);
6073                 }
6074         }
6075
6076         if (would_hit_hwbug) {
6077                 u32 last_plus_one = entry;
6078                 u32 start;
6079
6080                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
6081                 start &= (TG3_TX_RING_SIZE - 1);
6082
6083                 /* If the workaround fails due to memory/mapping
6084                  * failure, silently drop this packet.
6085                  */
6086                 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
6087                                                 &start, base_flags, mss))
6088                         goto out_unlock;
6089
6090                 entry = start;
6091         }
6092
6093         /* Packets are ready, update Tx producer idx local and on card. */
6094         tw32_tx_mbox(tnapi->prodmbox, entry);
6095
6096         tnapi->tx_prod = entry;
6097         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6098                 netif_tx_stop_queue(txq);
6099
6100                 /* netif_tx_stop_queue() must be done before checking
6101                  * checking tx index in tg3_tx_avail() below, because in
6102                  * tg3_tx(), we update tx index before checking for
6103                  * netif_tx_queue_stopped().
6104                  */
6105                 smp_mb();
6106                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6107                         netif_tx_wake_queue(txq);
6108         }
6109
6110 out_unlock:
6111         mmiowb();
6112
6113         return NETDEV_TX_OK;
6114
6115 dma_error:
6116         last = i;
6117         entry = tnapi->tx_prod;
6118         tnapi->tx_buffers[entry].skb = NULL;
6119         pci_unmap_single(tp->pdev,
6120                          dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
6121                          skb_headlen(skb),
6122                          PCI_DMA_TODEVICE);
6123         for (i = 0; i <= last; i++) {
6124                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6125                 entry = NEXT_TX(entry);
6126
6127                 pci_unmap_page(tp->pdev,
6128                                dma_unmap_addr(&tnapi->tx_buffers[entry],
6129                                               mapping),
6130                                frag->size, PCI_DMA_TODEVICE);
6131         }
6132
6133         dev_kfree_skb(skb);
6134         return NETDEV_TX_OK;
6135 }
6136
6137 static void tg3_set_loopback(struct net_device *dev, u32 features)
6138 {
6139         struct tg3 *tp = netdev_priv(dev);
6140
6141         if (features & NETIF_F_LOOPBACK) {
6142                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6143                         return;
6144
6145                 /*
6146                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6147                  * loopback mode if Half-Duplex mode was negotiated earlier.
6148                  */
6149                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6150
6151                 /* Enable internal MAC loopback mode */
6152                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6153                 spin_lock_bh(&tp->lock);
6154                 tw32(MAC_MODE, tp->mac_mode);
6155                 netif_carrier_on(tp->dev);
6156                 spin_unlock_bh(&tp->lock);
6157                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6158         } else {
6159                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6160                         return;
6161
6162                 /* Disable internal MAC loopback mode */
6163                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6164                 spin_lock_bh(&tp->lock);
6165                 tw32(MAC_MODE, tp->mac_mode);
6166                 /* Force link status check */
6167                 tg3_setup_phy(tp, 1);
6168                 spin_unlock_bh(&tp->lock);
6169                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6170         }
6171 }
6172
6173 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6174 {
6175         struct tg3 *tp = netdev_priv(dev);
6176
6177         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6178                 features &= ~NETIF_F_ALL_TSO;
6179
6180         return features;
6181 }
6182
6183 static int tg3_set_features(struct net_device *dev, u32 features)
6184 {
6185         u32 changed = dev->features ^ features;
6186
6187         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6188                 tg3_set_loopback(dev, features);
6189
6190         return 0;
6191 }
6192
6193 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6194                                int new_mtu)
6195 {
6196         dev->mtu = new_mtu;
6197
6198         if (new_mtu > ETH_DATA_LEN) {
6199                 if (tg3_flag(tp, 5780_CLASS)) {
6200                         netdev_update_features(dev);
6201                         tg3_flag_clear(tp, TSO_CAPABLE);
6202                 } else {
6203                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6204                 }
6205         } else {
6206                 if (tg3_flag(tp, 5780_CLASS)) {
6207                         tg3_flag_set(tp, TSO_CAPABLE);
6208                         netdev_update_features(dev);
6209                 }
6210                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6211         }
6212 }
6213
6214 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6215 {
6216         struct tg3 *tp = netdev_priv(dev);
6217         int err;
6218
6219         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6220                 return -EINVAL;
6221
6222         if (!netif_running(dev)) {
6223                 /* We'll just catch it later when the
6224                  * device is up'd.
6225                  */
6226                 tg3_set_mtu(dev, tp, new_mtu);
6227                 return 0;
6228         }
6229
6230         tg3_phy_stop(tp);
6231
6232         tg3_netif_stop(tp);
6233
6234         tg3_full_lock(tp, 1);
6235
6236         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6237
6238         tg3_set_mtu(dev, tp, new_mtu);
6239
6240         err = tg3_restart_hw(tp, 0);
6241
6242         if (!err)
6243                 tg3_netif_start(tp);
6244
6245         tg3_full_unlock(tp);
6246
6247         if (!err)
6248                 tg3_phy_start(tp);
6249
6250         return err;
6251 }
6252
6253 static void tg3_rx_prodring_free(struct tg3 *tp,
6254                                  struct tg3_rx_prodring_set *tpr)
6255 {
6256         int i;
6257
6258         if (tpr != &tp->napi[0].prodring) {
6259                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6260                      i = (i + 1) & tp->rx_std_ring_mask)
6261                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6262                                         tp->rx_pkt_map_sz);
6263
6264                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6265                         for (i = tpr->rx_jmb_cons_idx;
6266                              i != tpr->rx_jmb_prod_idx;
6267                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6268                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6269                                                 TG3_RX_JMB_MAP_SZ);
6270                         }
6271                 }
6272
6273                 return;
6274         }
6275
6276         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6277                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6278                                 tp->rx_pkt_map_sz);
6279
6280         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6281                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6282                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6283                                         TG3_RX_JMB_MAP_SZ);
6284         }
6285 }
6286
6287 /* Initialize rx rings for packet processing.
6288  *
6289  * The chip has been shut down and the driver detached from
6290  * the networking, so no interrupts or new tx packets will
6291  * end up in the driver.  tp->{tx,}lock are held and thus
6292  * we may not sleep.
6293  */
6294 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6295                                  struct tg3_rx_prodring_set *tpr)
6296 {
6297         u32 i, rx_pkt_dma_sz;
6298
6299         tpr->rx_std_cons_idx = 0;
6300         tpr->rx_std_prod_idx = 0;
6301         tpr->rx_jmb_cons_idx = 0;
6302         tpr->rx_jmb_prod_idx = 0;
6303
6304         if (tpr != &tp->napi[0].prodring) {
6305                 memset(&tpr->rx_std_buffers[0], 0,
6306                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6307                 if (tpr->rx_jmb_buffers)
6308                         memset(&tpr->rx_jmb_buffers[0], 0,
6309                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6310                 goto done;
6311         }
6312
6313         /* Zero out all descriptors. */
6314         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6315
6316         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6317         if (tg3_flag(tp, 5780_CLASS) &&
6318             tp->dev->mtu > ETH_DATA_LEN)
6319                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6320         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6321
6322         /* Initialize invariants of the rings, we only set this
6323          * stuff once.  This works because the card does not
6324          * write into the rx buffer posting rings.
6325          */
6326         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6327                 struct tg3_rx_buffer_desc *rxd;
6328
6329                 rxd = &tpr->rx_std[i];
6330                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6331                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6332                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6333                                (i << RXD_OPAQUE_INDEX_SHIFT));
6334         }
6335
6336         /* Now allocate fresh SKBs for each rx ring. */
6337         for (i = 0; i < tp->rx_pending; i++) {
6338                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6339                         netdev_warn(tp->dev,
6340                                     "Using a smaller RX standard ring. Only "
6341                                     "%d out of %d buffers were allocated "
6342                                     "successfully\n", i, tp->rx_pending);
6343                         if (i == 0)
6344                                 goto initfail;
6345                         tp->rx_pending = i;
6346                         break;
6347                 }
6348         }
6349
6350         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6351                 goto done;
6352
6353         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6354
6355         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6356                 goto done;
6357
6358         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6359                 struct tg3_rx_buffer_desc *rxd;
6360
6361                 rxd = &tpr->rx_jmb[i].std;
6362                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6363                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6364                                   RXD_FLAG_JUMBO;
6365                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6366                        (i << RXD_OPAQUE_INDEX_SHIFT));
6367         }
6368
6369         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6370                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6371                         netdev_warn(tp->dev,
6372                                     "Using a smaller RX jumbo ring. Only %d "
6373                                     "out of %d buffers were allocated "
6374                                     "successfully\n", i, tp->rx_jumbo_pending);
6375                         if (i == 0)
6376                                 goto initfail;
6377                         tp->rx_jumbo_pending = i;
6378                         break;
6379                 }
6380         }
6381
6382 done:
6383         return 0;
6384
6385 initfail:
6386         tg3_rx_prodring_free(tp, tpr);
6387         return -ENOMEM;
6388 }
6389
6390 static void tg3_rx_prodring_fini(struct tg3 *tp,
6391                                  struct tg3_rx_prodring_set *tpr)
6392 {
6393         kfree(tpr->rx_std_buffers);
6394         tpr->rx_std_buffers = NULL;
6395         kfree(tpr->rx_jmb_buffers);
6396         tpr->rx_jmb_buffers = NULL;
6397         if (tpr->rx_std) {
6398                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6399                                   tpr->rx_std, tpr->rx_std_mapping);
6400                 tpr->rx_std = NULL;
6401         }
6402         if (tpr->rx_jmb) {
6403                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6404                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6405                 tpr->rx_jmb = NULL;
6406         }
6407 }
6408
6409 static int tg3_rx_prodring_init(struct tg3 *tp,
6410                                 struct tg3_rx_prodring_set *tpr)
6411 {
6412         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6413                                       GFP_KERNEL);
6414         if (!tpr->rx_std_buffers)
6415                 return -ENOMEM;
6416
6417         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6418                                          TG3_RX_STD_RING_BYTES(tp),
6419                                          &tpr->rx_std_mapping,
6420                                          GFP_KERNEL);
6421         if (!tpr->rx_std)
6422                 goto err_out;
6423
6424         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6425                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6426                                               GFP_KERNEL);
6427                 if (!tpr->rx_jmb_buffers)
6428                         goto err_out;
6429
6430                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6431                                                  TG3_RX_JMB_RING_BYTES(tp),
6432                                                  &tpr->rx_jmb_mapping,
6433                                                  GFP_KERNEL);
6434                 if (!tpr->rx_jmb)
6435                         goto err_out;
6436         }
6437
6438         return 0;
6439
6440 err_out:
6441         tg3_rx_prodring_fini(tp, tpr);
6442         return -ENOMEM;
6443 }
6444
6445 /* Free up pending packets in all rx/tx rings.
6446  *
6447  * The chip has been shut down and the driver detached from
6448  * the networking, so no interrupts or new tx packets will
6449  * end up in the driver.  tp->{tx,}lock is not held and we are not
6450  * in an interrupt context and thus may sleep.
6451  */
6452 static void tg3_free_rings(struct tg3 *tp)
6453 {
6454         int i, j;
6455
6456         for (j = 0; j < tp->irq_cnt; j++) {
6457                 struct tg3_napi *tnapi = &tp->napi[j];
6458
6459                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6460
6461                 if (!tnapi->tx_buffers)
6462                         continue;
6463
6464                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6465                         struct ring_info *txp;
6466                         struct sk_buff *skb;
6467                         unsigned int k;
6468
6469                         txp = &tnapi->tx_buffers[i];
6470                         skb = txp->skb;
6471
6472                         if (skb == NULL) {
6473                                 i++;
6474                                 continue;
6475                         }
6476
6477                         pci_unmap_single(tp->pdev,
6478                                          dma_unmap_addr(txp, mapping),
6479                                          skb_headlen(skb),
6480                                          PCI_DMA_TODEVICE);
6481                         txp->skb = NULL;
6482
6483                         i++;
6484
6485                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6486                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6487                                 pci_unmap_page(tp->pdev,
6488                                                dma_unmap_addr(txp, mapping),
6489                                                skb_shinfo(skb)->frags[k].size,
6490                                                PCI_DMA_TODEVICE);
6491                                 i++;
6492                         }
6493
6494                         dev_kfree_skb_any(skb);
6495                 }
6496         }
6497 }
6498
6499 /* Initialize tx/rx rings for packet processing.
6500  *
6501  * The chip has been shut down and the driver detached from
6502  * the networking, so no interrupts or new tx packets will
6503  * end up in the driver.  tp->{tx,}lock are held and thus
6504  * we may not sleep.
6505  */
6506 static int tg3_init_rings(struct tg3 *tp)
6507 {
6508         int i;
6509
6510         /* Free up all the SKBs. */
6511         tg3_free_rings(tp);
6512
6513         for (i = 0; i < tp->irq_cnt; i++) {
6514                 struct tg3_napi *tnapi = &tp->napi[i];
6515
6516                 tnapi->last_tag = 0;
6517                 tnapi->last_irq_tag = 0;
6518                 tnapi->hw_status->status = 0;
6519                 tnapi->hw_status->status_tag = 0;
6520                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6521
6522                 tnapi->tx_prod = 0;
6523                 tnapi->tx_cons = 0;
6524                 if (tnapi->tx_ring)
6525                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6526
6527                 tnapi->rx_rcb_ptr = 0;
6528                 if (tnapi->rx_rcb)
6529                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6530
6531                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6532                         tg3_free_rings(tp);
6533                         return -ENOMEM;
6534                 }
6535         }
6536
6537         return 0;
6538 }
6539
6540 /*
6541  * Must not be invoked with interrupt sources disabled and
6542  * the hardware shutdown down.
6543  */
6544 static void tg3_free_consistent(struct tg3 *tp)
6545 {
6546         int i;
6547
6548         for (i = 0; i < tp->irq_cnt; i++) {
6549                 struct tg3_napi *tnapi = &tp->napi[i];
6550
6551                 if (tnapi->tx_ring) {
6552                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6553                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6554                         tnapi->tx_ring = NULL;
6555                 }
6556
6557                 kfree(tnapi->tx_buffers);
6558                 tnapi->tx_buffers = NULL;
6559
6560                 if (tnapi->rx_rcb) {
6561                         dma_free_coherent(&tp->pdev->dev,
6562                                           TG3_RX_RCB_RING_BYTES(tp),
6563                                           tnapi->rx_rcb,
6564                                           tnapi->rx_rcb_mapping);
6565                         tnapi->rx_rcb = NULL;
6566                 }
6567
6568                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6569
6570                 if (tnapi->hw_status) {
6571                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6572                                           tnapi->hw_status,
6573                                           tnapi->status_mapping);
6574                         tnapi->hw_status = NULL;
6575                 }
6576         }
6577
6578         if (tp->hw_stats) {
6579                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6580                                   tp->hw_stats, tp->stats_mapping);
6581                 tp->hw_stats = NULL;
6582         }
6583 }
6584
6585 /*
6586  * Must not be invoked with interrupt sources disabled and
6587  * the hardware shutdown down.  Can sleep.
6588  */
6589 static int tg3_alloc_consistent(struct tg3 *tp)
6590 {
6591         int i;
6592
6593         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6594                                           sizeof(struct tg3_hw_stats),
6595                                           &tp->stats_mapping,
6596                                           GFP_KERNEL);
6597         if (!tp->hw_stats)
6598                 goto err_out;
6599
6600         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6601
6602         for (i = 0; i < tp->irq_cnt; i++) {
6603                 struct tg3_napi *tnapi = &tp->napi[i];
6604                 struct tg3_hw_status *sblk;
6605
6606                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6607                                                       TG3_HW_STATUS_SIZE,
6608                                                       &tnapi->status_mapping,
6609                                                       GFP_KERNEL);
6610                 if (!tnapi->hw_status)
6611                         goto err_out;
6612
6613                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6614                 sblk = tnapi->hw_status;
6615
6616                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6617                         goto err_out;
6618
6619                 /* If multivector TSS is enabled, vector 0 does not handle
6620                  * tx interrupts.  Don't allocate any resources for it.
6621                  */
6622                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6623                     (i && tg3_flag(tp, ENABLE_TSS))) {
6624                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6625                                                     TG3_TX_RING_SIZE,
6626                                                     GFP_KERNEL);
6627                         if (!tnapi->tx_buffers)
6628                                 goto err_out;
6629
6630                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6631                                                             TG3_TX_RING_BYTES,
6632                                                         &tnapi->tx_desc_mapping,
6633                                                             GFP_KERNEL);
6634                         if (!tnapi->tx_ring)
6635                                 goto err_out;
6636                 }
6637
6638                 /*
6639                  * When RSS is enabled, the status block format changes
6640                  * slightly.  The "rx_jumbo_consumer", "reserved",
6641                  * and "rx_mini_consumer" members get mapped to the
6642                  * other three rx return ring producer indexes.
6643                  */
6644                 switch (i) {
6645                 default:
6646                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6647                         break;
6648                 case 2:
6649                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6650                         break;
6651                 case 3:
6652                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6653                         break;
6654                 case 4:
6655                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6656                         break;
6657                 }
6658
6659                 /*
6660                  * If multivector RSS is enabled, vector 0 does not handle
6661                  * rx or tx interrupts.  Don't allocate any resources for it.
6662                  */
6663                 if (!i && tg3_flag(tp, ENABLE_RSS))
6664                         continue;
6665
6666                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6667                                                    TG3_RX_RCB_RING_BYTES(tp),
6668                                                    &tnapi->rx_rcb_mapping,
6669                                                    GFP_KERNEL);
6670                 if (!tnapi->rx_rcb)
6671                         goto err_out;
6672
6673                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6674         }
6675
6676         return 0;
6677
6678 err_out:
6679         tg3_free_consistent(tp);
6680         return -ENOMEM;
6681 }
6682
6683 #define MAX_WAIT_CNT 1000
6684
6685 /* To stop a block, clear the enable bit and poll till it
6686  * clears.  tp->lock is held.
6687  */
6688 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6689 {
6690         unsigned int i;
6691         u32 val;
6692
6693         if (tg3_flag(tp, 5705_PLUS)) {
6694                 switch (ofs) {
6695                 case RCVLSC_MODE:
6696                 case DMAC_MODE:
6697                 case MBFREE_MODE:
6698                 case BUFMGR_MODE:
6699                 case MEMARB_MODE:
6700                         /* We can't enable/disable these bits of the
6701                          * 5705/5750, just say success.
6702                          */
6703                         return 0;
6704
6705                 default:
6706                         break;
6707                 }
6708         }
6709
6710         val = tr32(ofs);
6711         val &= ~enable_bit;
6712         tw32_f(ofs, val);
6713
6714         for (i = 0; i < MAX_WAIT_CNT; i++) {
6715                 udelay(100);
6716                 val = tr32(ofs);
6717                 if ((val & enable_bit) == 0)
6718                         break;
6719         }
6720
6721         if (i == MAX_WAIT_CNT && !silent) {
6722                 dev_err(&tp->pdev->dev,
6723                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6724                         ofs, enable_bit);
6725                 return -ENODEV;
6726         }
6727
6728         return 0;
6729 }
6730
6731 /* tp->lock is held. */
6732 static int tg3_abort_hw(struct tg3 *tp, int silent)
6733 {
6734         int i, err;
6735
6736         tg3_disable_ints(tp);
6737
6738         tp->rx_mode &= ~RX_MODE_ENABLE;
6739         tw32_f(MAC_RX_MODE, tp->rx_mode);
6740         udelay(10);
6741
6742         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6743         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6744         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6745         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6746         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6747         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6748
6749         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6750         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6751         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6752         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6753         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6754         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6755         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6756
6757         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6758         tw32_f(MAC_MODE, tp->mac_mode);
6759         udelay(40);
6760
6761         tp->tx_mode &= ~TX_MODE_ENABLE;
6762         tw32_f(MAC_TX_MODE, tp->tx_mode);
6763
6764         for (i = 0; i < MAX_WAIT_CNT; i++) {
6765                 udelay(100);
6766                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6767                         break;
6768         }
6769         if (i >= MAX_WAIT_CNT) {
6770                 dev_err(&tp->pdev->dev,
6771                         "%s timed out, TX_MODE_ENABLE will not clear "
6772                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6773                 err |= -ENODEV;
6774         }
6775
6776         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6777         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6778         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6779
6780         tw32(FTQ_RESET, 0xffffffff);
6781         tw32(FTQ_RESET, 0x00000000);
6782
6783         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6784         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6785
6786         for (i = 0; i < tp->irq_cnt; i++) {
6787                 struct tg3_napi *tnapi = &tp->napi[i];
6788                 if (tnapi->hw_status)
6789                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6790         }
6791         if (tp->hw_stats)
6792                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6793
6794         return err;
6795 }
6796
6797 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6798 {
6799         int i;
6800         u32 apedata;
6801
6802         /* NCSI does not support APE events */
6803         if (tg3_flag(tp, APE_HAS_NCSI))
6804                 return;
6805
6806         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6807         if (apedata != APE_SEG_SIG_MAGIC)
6808                 return;
6809
6810         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6811         if (!(apedata & APE_FW_STATUS_READY))
6812                 return;
6813
6814         /* Wait for up to 1 millisecond for APE to service previous event. */
6815         for (i = 0; i < 10; i++) {
6816                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6817                         return;
6818
6819                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6820
6821                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6822                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6823                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6824
6825                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6826
6827                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6828                         break;
6829
6830                 udelay(100);
6831         }
6832
6833         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6834                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6835 }
6836
6837 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6838 {
6839         u32 event;
6840         u32 apedata;
6841
6842         if (!tg3_flag(tp, ENABLE_APE))
6843                 return;
6844
6845         switch (kind) {
6846         case RESET_KIND_INIT:
6847                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6848                                 APE_HOST_SEG_SIG_MAGIC);
6849                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6850                                 APE_HOST_SEG_LEN_MAGIC);
6851                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6852                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6853                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6854                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6855                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6856                                 APE_HOST_BEHAV_NO_PHYLOCK);
6857                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6858                                     TG3_APE_HOST_DRVR_STATE_START);
6859
6860                 event = APE_EVENT_STATUS_STATE_START;
6861                 break;
6862         case RESET_KIND_SHUTDOWN:
6863                 /* With the interface we are currently using,
6864                  * APE does not track driver state.  Wiping
6865                  * out the HOST SEGMENT SIGNATURE forces
6866                  * the APE to assume OS absent status.
6867                  */
6868                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6869
6870                 if (device_may_wakeup(&tp->pdev->dev) &&
6871                     tg3_flag(tp, WOL_ENABLE)) {
6872                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6873                                             TG3_APE_HOST_WOL_SPEED_AUTO);
6874                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6875                 } else
6876                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6877
6878                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6879
6880                 event = APE_EVENT_STATUS_STATE_UNLOAD;
6881                 break;
6882         case RESET_KIND_SUSPEND:
6883                 event = APE_EVENT_STATUS_STATE_SUSPEND;
6884                 break;
6885         default:
6886                 return;
6887         }
6888
6889         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6890
6891         tg3_ape_send_event(tp, event);
6892 }
6893
6894 /* tp->lock is held. */
6895 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6896 {
6897         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6898                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6899
6900         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6901                 switch (kind) {
6902                 case RESET_KIND_INIT:
6903                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6904                                       DRV_STATE_START);
6905                         break;
6906
6907                 case RESET_KIND_SHUTDOWN:
6908                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6909                                       DRV_STATE_UNLOAD);
6910                         break;
6911
6912                 case RESET_KIND_SUSPEND:
6913                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6914                                       DRV_STATE_SUSPEND);
6915                         break;
6916
6917                 default:
6918                         break;
6919                 }
6920         }
6921
6922         if (kind == RESET_KIND_INIT ||
6923             kind == RESET_KIND_SUSPEND)
6924                 tg3_ape_driver_state_change(tp, kind);
6925 }
6926
6927 /* tp->lock is held. */
6928 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6929 {
6930         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6931                 switch (kind) {
6932                 case RESET_KIND_INIT:
6933                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6934                                       DRV_STATE_START_DONE);
6935                         break;
6936
6937                 case RESET_KIND_SHUTDOWN:
6938                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6939                                       DRV_STATE_UNLOAD_DONE);
6940                         break;
6941
6942                 default:
6943                         break;
6944                 }
6945         }
6946
6947         if (kind == RESET_KIND_SHUTDOWN)
6948                 tg3_ape_driver_state_change(tp, kind);
6949 }
6950
6951 /* tp->lock is held. */
6952 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6953 {
6954         if (tg3_flag(tp, ENABLE_ASF)) {
6955                 switch (kind) {
6956                 case RESET_KIND_INIT:
6957                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6958                                       DRV_STATE_START);
6959                         break;
6960
6961                 case RESET_KIND_SHUTDOWN:
6962                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6963                                       DRV_STATE_UNLOAD);
6964                         break;
6965
6966                 case RESET_KIND_SUSPEND:
6967                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6968                                       DRV_STATE_SUSPEND);
6969                         break;
6970
6971                 default:
6972                         break;
6973                 }
6974         }
6975 }
6976
6977 static int tg3_poll_fw(struct tg3 *tp)
6978 {
6979         int i;
6980         u32 val;
6981
6982         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6983                 /* Wait up to 20ms for init done. */
6984                 for (i = 0; i < 200; i++) {
6985                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6986                                 return 0;
6987                         udelay(100);
6988                 }
6989                 return -ENODEV;
6990         }
6991
6992         /* Wait for firmware initialization to complete. */
6993         for (i = 0; i < 100000; i++) {
6994                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6995                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6996                         break;
6997                 udelay(10);
6998         }
6999
7000         /* Chip might not be fitted with firmware.  Some Sun onboard
7001          * parts are configured like that.  So don't signal the timeout
7002          * of the above loop as an error, but do report the lack of
7003          * running firmware once.
7004          */
7005         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7006                 tg3_flag_set(tp, NO_FWARE_REPORTED);
7007
7008                 netdev_info(tp->dev, "No firmware running\n");
7009         }
7010
7011         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7012                 /* The 57765 A0 needs a little more
7013                  * time to do some important work.
7014                  */
7015                 mdelay(10);
7016         }
7017
7018         return 0;
7019 }
7020
7021 /* Save PCI command register before chip reset */
7022 static void tg3_save_pci_state(struct tg3 *tp)
7023 {
7024         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7025 }
7026
7027 /* Restore PCI state after chip reset */
7028 static void tg3_restore_pci_state(struct tg3 *tp)
7029 {
7030         u32 val;
7031
7032         /* Re-enable indirect register accesses. */
7033         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7034                                tp->misc_host_ctrl);
7035
7036         /* Set MAX PCI retry to zero. */
7037         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7038         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7039             tg3_flag(tp, PCIX_MODE))
7040                 val |= PCISTATE_RETRY_SAME_DMA;
7041         /* Allow reads and writes to the APE register and memory space. */
7042         if (tg3_flag(tp, ENABLE_APE))
7043                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7044                        PCISTATE_ALLOW_APE_SHMEM_WR |
7045                        PCISTATE_ALLOW_APE_PSPACE_WR;
7046         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7047
7048         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7049
7050         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7051                 if (tg3_flag(tp, PCI_EXPRESS))
7052                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7053                 else {
7054                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7055                                               tp->pci_cacheline_sz);
7056                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7057                                               tp->pci_lat_timer);
7058                 }
7059         }
7060
7061         /* Make sure PCI-X relaxed ordering bit is clear. */
7062         if (tg3_flag(tp, PCIX_MODE)) {
7063                 u16 pcix_cmd;
7064
7065                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7066                                      &pcix_cmd);
7067                 pcix_cmd &= ~PCI_X_CMD_ERO;
7068                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7069                                       pcix_cmd);
7070         }
7071
7072         if (tg3_flag(tp, 5780_CLASS)) {
7073
7074                 /* Chip reset on 5780 will reset MSI enable bit,
7075                  * so need to restore it.
7076                  */
7077                 if (tg3_flag(tp, USING_MSI)) {
7078                         u16 ctrl;
7079
7080                         pci_read_config_word(tp->pdev,
7081                                              tp->msi_cap + PCI_MSI_FLAGS,
7082                                              &ctrl);
7083                         pci_write_config_word(tp->pdev,
7084                                               tp->msi_cap + PCI_MSI_FLAGS,
7085                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7086                         val = tr32(MSGINT_MODE);
7087                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7088                 }
7089         }
7090 }
7091
7092 static void tg3_stop_fw(struct tg3 *);
7093
7094 /* tp->lock is held. */
7095 static int tg3_chip_reset(struct tg3 *tp)
7096 {
7097         u32 val;
7098         void (*write_op)(struct tg3 *, u32, u32);
7099         int i, err;
7100
7101         tg3_nvram_lock(tp);
7102
7103         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7104
7105         /* No matching tg3_nvram_unlock() after this because
7106          * chip reset below will undo the nvram lock.
7107          */
7108         tp->nvram_lock_cnt = 0;
7109
7110         /* GRC_MISC_CFG core clock reset will clear the memory
7111          * enable bit in PCI register 4 and the MSI enable bit
7112          * on some chips, so we save relevant registers here.
7113          */
7114         tg3_save_pci_state(tp);
7115
7116         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7117             tg3_flag(tp, 5755_PLUS))
7118                 tw32(GRC_FASTBOOT_PC, 0);
7119
7120         /*
7121          * We must avoid the readl() that normally takes place.
7122          * It locks machines, causes machine checks, and other
7123          * fun things.  So, temporarily disable the 5701
7124          * hardware workaround, while we do the reset.
7125          */
7126         write_op = tp->write32;
7127         if (write_op == tg3_write_flush_reg32)
7128                 tp->write32 = tg3_write32;
7129
7130         /* Prevent the irq handler from reading or writing PCI registers
7131          * during chip reset when the memory enable bit in the PCI command
7132          * register may be cleared.  The chip does not generate interrupt
7133          * at this time, but the irq handler may still be called due to irq
7134          * sharing or irqpoll.
7135          */
7136         tg3_flag_set(tp, CHIP_RESETTING);
7137         for (i = 0; i < tp->irq_cnt; i++) {
7138                 struct tg3_napi *tnapi = &tp->napi[i];
7139                 if (tnapi->hw_status) {
7140                         tnapi->hw_status->status = 0;
7141                         tnapi->hw_status->status_tag = 0;
7142                 }
7143                 tnapi->last_tag = 0;
7144                 tnapi->last_irq_tag = 0;
7145         }
7146         smp_mb();
7147
7148         for (i = 0; i < tp->irq_cnt; i++)
7149                 synchronize_irq(tp->napi[i].irq_vec);
7150
7151         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7152                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7153                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7154         }
7155
7156         /* do the reset */
7157         val = GRC_MISC_CFG_CORECLK_RESET;
7158
7159         if (tg3_flag(tp, PCI_EXPRESS)) {
7160                 /* Force PCIe 1.0a mode */
7161                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7162                     !tg3_flag(tp, 57765_PLUS) &&
7163                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7164                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7165                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7166
7167                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7168                         tw32(GRC_MISC_CFG, (1 << 29));
7169                         val |= (1 << 29);
7170                 }
7171         }
7172
7173         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7174                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7175                 tw32(GRC_VCPU_EXT_CTRL,
7176                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7177         }
7178
7179         /* Manage gphy power for all CPMU absent PCIe devices. */
7180         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7181                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7182
7183         tw32(GRC_MISC_CFG, val);
7184
7185         /* restore 5701 hardware bug workaround write method */
7186         tp->write32 = write_op;
7187
7188         /* Unfortunately, we have to delay before the PCI read back.
7189          * Some 575X chips even will not respond to a PCI cfg access
7190          * when the reset command is given to the chip.
7191          *
7192          * How do these hardware designers expect things to work
7193          * properly if the PCI write is posted for a long period
7194          * of time?  It is always necessary to have some method by
7195          * which a register read back can occur to push the write
7196          * out which does the reset.
7197          *
7198          * For most tg3 variants the trick below was working.
7199          * Ho hum...
7200          */
7201         udelay(120);
7202
7203         /* Flush PCI posted writes.  The normal MMIO registers
7204          * are inaccessible at this time so this is the only
7205          * way to make this reliably (actually, this is no longer
7206          * the case, see above).  I tried to use indirect
7207          * register read/write but this upset some 5701 variants.
7208          */
7209         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7210
7211         udelay(120);
7212
7213         if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7214                 u16 val16;
7215
7216                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7217                         int i;
7218                         u32 cfg_val;
7219
7220                         /* Wait for link training to complete.  */
7221                         for (i = 0; i < 5000; i++)
7222                                 udelay(100);
7223
7224                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7225                         pci_write_config_dword(tp->pdev, 0xc4,
7226                                                cfg_val | (1 << 15));
7227                 }
7228
7229                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7230                 pci_read_config_word(tp->pdev,
7231                                      tp->pcie_cap + PCI_EXP_DEVCTL,
7232                                      &val16);
7233                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7234                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7235                 /*
7236                  * Older PCIe devices only support the 128 byte
7237                  * MPS setting.  Enforce the restriction.
7238                  */
7239                 if (!tg3_flag(tp, CPMU_PRESENT))
7240                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7241                 pci_write_config_word(tp->pdev,
7242                                       tp->pcie_cap + PCI_EXP_DEVCTL,
7243                                       val16);
7244
7245                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7246
7247                 /* Clear error status */
7248                 pci_write_config_word(tp->pdev,
7249                                       tp->pcie_cap + PCI_EXP_DEVSTA,
7250                                       PCI_EXP_DEVSTA_CED |
7251                                       PCI_EXP_DEVSTA_NFED |
7252                                       PCI_EXP_DEVSTA_FED |
7253                                       PCI_EXP_DEVSTA_URD);
7254         }
7255
7256         tg3_restore_pci_state(tp);
7257
7258         tg3_flag_clear(tp, CHIP_RESETTING);
7259         tg3_flag_clear(tp, ERROR_PROCESSED);
7260
7261         val = 0;
7262         if (tg3_flag(tp, 5780_CLASS))
7263                 val = tr32(MEMARB_MODE);
7264         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7265
7266         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7267                 tg3_stop_fw(tp);
7268                 tw32(0x5000, 0x400);
7269         }
7270
7271         tw32(GRC_MODE, tp->grc_mode);
7272
7273         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7274                 val = tr32(0xc4);
7275
7276                 tw32(0xc4, val | (1 << 15));
7277         }
7278
7279         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7280             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7281                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7282                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7283                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7284                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7285         }
7286
7287         if (tg3_flag(tp, ENABLE_APE))
7288                 tp->mac_mode = MAC_MODE_APE_TX_EN |
7289                                MAC_MODE_APE_RX_EN |
7290                                MAC_MODE_TDE_ENABLE;
7291
7292         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7293                 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7294                 val = tp->mac_mode;
7295         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7296                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7297                 val = tp->mac_mode;
7298         } else
7299                 val = 0;
7300
7301         tw32_f(MAC_MODE, val);
7302         udelay(40);
7303
7304         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7305
7306         err = tg3_poll_fw(tp);
7307         if (err)
7308                 return err;
7309
7310         tg3_mdio_start(tp);
7311
7312         if (tg3_flag(tp, PCI_EXPRESS) &&
7313             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7314             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7315             !tg3_flag(tp, 57765_PLUS)) {
7316                 val = tr32(0x7c00);
7317
7318                 tw32(0x7c00, val | (1 << 25));
7319         }
7320
7321         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7322                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7323                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7324         }
7325
7326         /* Reprobe ASF enable state.  */
7327         tg3_flag_clear(tp, ENABLE_ASF);
7328         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7329         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7330         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7331                 u32 nic_cfg;
7332
7333                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7334                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7335                         tg3_flag_set(tp, ENABLE_ASF);
7336                         tp->last_event_jiffies = jiffies;
7337                         if (tg3_flag(tp, 5750_PLUS))
7338                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7339                 }
7340         }
7341
7342         return 0;
7343 }
7344
7345 /* tp->lock is held. */
7346 static void tg3_stop_fw(struct tg3 *tp)
7347 {
7348         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7349                 /* Wait for RX cpu to ACK the previous event. */
7350                 tg3_wait_for_event_ack(tp);
7351
7352                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7353
7354                 tg3_generate_fw_event(tp);
7355
7356                 /* Wait for RX cpu to ACK this event. */
7357                 tg3_wait_for_event_ack(tp);
7358         }
7359 }
7360
7361 /* tp->lock is held. */
7362 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7363 {
7364         int err;
7365
7366         tg3_stop_fw(tp);
7367
7368         tg3_write_sig_pre_reset(tp, kind);
7369
7370         tg3_abort_hw(tp, silent);
7371         err = tg3_chip_reset(tp);
7372
7373         __tg3_set_mac_addr(tp, 0);
7374
7375         tg3_write_sig_legacy(tp, kind);
7376         tg3_write_sig_post_reset(tp, kind);
7377
7378         if (err)
7379                 return err;
7380
7381         return 0;
7382 }
7383
7384 #define RX_CPU_SCRATCH_BASE     0x30000
7385 #define RX_CPU_SCRATCH_SIZE     0x04000
7386 #define TX_CPU_SCRATCH_BASE     0x34000
7387 #define TX_CPU_SCRATCH_SIZE     0x04000
7388
7389 /* tp->lock is held. */
7390 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7391 {
7392         int i;
7393
7394         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7395
7396         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7397                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7398
7399                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7400                 return 0;
7401         }
7402         if (offset == RX_CPU_BASE) {
7403                 for (i = 0; i < 10000; i++) {
7404                         tw32(offset + CPU_STATE, 0xffffffff);
7405                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7406                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7407                                 break;
7408                 }
7409
7410                 tw32(offset + CPU_STATE, 0xffffffff);
7411                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7412                 udelay(10);
7413         } else {
7414                 for (i = 0; i < 10000; i++) {
7415                         tw32(offset + CPU_STATE, 0xffffffff);
7416                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7417                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7418                                 break;
7419                 }
7420         }
7421
7422         if (i >= 10000) {
7423                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7424                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7425                 return -ENODEV;
7426         }
7427
7428         /* Clear firmware's nvram arbitration. */
7429         if (tg3_flag(tp, NVRAM))
7430                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7431         return 0;
7432 }
7433
7434 struct fw_info {
7435         unsigned int fw_base;
7436         unsigned int fw_len;
7437         const __be32 *fw_data;
7438 };
7439
7440 /* tp->lock is held. */
7441 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7442                                  int cpu_scratch_size, struct fw_info *info)
7443 {
7444         int err, lock_err, i;
7445         void (*write_op)(struct tg3 *, u32, u32);
7446
7447         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7448                 netdev_err(tp->dev,
7449                            "%s: Trying to load TX cpu firmware which is 5705\n",
7450                            __func__);
7451                 return -EINVAL;
7452         }
7453
7454         if (tg3_flag(tp, 5705_PLUS))
7455                 write_op = tg3_write_mem;
7456         else
7457                 write_op = tg3_write_indirect_reg32;
7458
7459         /* It is possible that bootcode is still loading at this point.
7460          * Get the nvram lock first before halting the cpu.
7461          */
7462         lock_err = tg3_nvram_lock(tp);
7463         err = tg3_halt_cpu(tp, cpu_base);
7464         if (!lock_err)
7465                 tg3_nvram_unlock(tp);
7466         if (err)
7467                 goto out;
7468
7469         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7470                 write_op(tp, cpu_scratch_base + i, 0);
7471         tw32(cpu_base + CPU_STATE, 0xffffffff);
7472         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7473         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7474                 write_op(tp, (cpu_scratch_base +
7475                               (info->fw_base & 0xffff) +
7476                               (i * sizeof(u32))),
7477                               be32_to_cpu(info->fw_data[i]));
7478
7479         err = 0;
7480
7481 out:
7482         return err;
7483 }
7484
7485 /* tp->lock is held. */
7486 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7487 {
7488         struct fw_info info;
7489         const __be32 *fw_data;
7490         int err, i;
7491
7492         fw_data = (void *)tp->fw->data;
7493
7494         /* Firmware blob starts with version numbers, followed by
7495            start address and length. We are setting complete length.
7496            length = end_address_of_bss - start_address_of_text.
7497            Remainder is the blob to be loaded contiguously
7498            from start address. */
7499
7500         info.fw_base = be32_to_cpu(fw_data[1]);
7501         info.fw_len = tp->fw->size - 12;
7502         info.fw_data = &fw_data[3];
7503
7504         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7505                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7506                                     &info);
7507         if (err)
7508                 return err;
7509
7510         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7511                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7512                                     &info);
7513         if (err)
7514                 return err;
7515
7516         /* Now startup only the RX cpu. */
7517         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7518         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7519
7520         for (i = 0; i < 5; i++) {
7521                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7522                         break;
7523                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7524                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7525                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7526                 udelay(1000);
7527         }
7528         if (i >= 5) {
7529                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7530                            "should be %08x\n", __func__,
7531                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7532                 return -ENODEV;
7533         }
7534         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7535         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7536
7537         return 0;
7538 }
7539
7540 /* tp->lock is held. */
7541 static int tg3_load_tso_firmware(struct tg3 *tp)
7542 {
7543         struct fw_info info;
7544         const __be32 *fw_data;
7545         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7546         int err, i;
7547
7548         if (tg3_flag(tp, HW_TSO_1) ||
7549             tg3_flag(tp, HW_TSO_2) ||
7550             tg3_flag(tp, HW_TSO_3))
7551                 return 0;
7552
7553         fw_data = (void *)tp->fw->data;
7554
7555         /* Firmware blob starts with version numbers, followed by
7556            start address and length. We are setting complete length.
7557            length = end_address_of_bss - start_address_of_text.
7558            Remainder is the blob to be loaded contiguously
7559            from start address. */
7560
7561         info.fw_base = be32_to_cpu(fw_data[1]);
7562         cpu_scratch_size = tp->fw_len;
7563         info.fw_len = tp->fw->size - 12;
7564         info.fw_data = &fw_data[3];
7565
7566         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7567                 cpu_base = RX_CPU_BASE;
7568                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7569         } else {
7570                 cpu_base = TX_CPU_BASE;
7571                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7572                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7573         }
7574
7575         err = tg3_load_firmware_cpu(tp, cpu_base,
7576                                     cpu_scratch_base, cpu_scratch_size,
7577                                     &info);
7578         if (err)
7579                 return err;
7580
7581         /* Now startup the cpu. */
7582         tw32(cpu_base + CPU_STATE, 0xffffffff);
7583         tw32_f(cpu_base + CPU_PC, info.fw_base);
7584
7585         for (i = 0; i < 5; i++) {
7586                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7587                         break;
7588                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7589                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7590                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7591                 udelay(1000);
7592         }
7593         if (i >= 5) {
7594                 netdev_err(tp->dev,
7595                            "%s fails to set CPU PC, is %08x should be %08x\n",
7596                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7597                 return -ENODEV;
7598         }
7599         tw32(cpu_base + CPU_STATE, 0xffffffff);
7600         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7601         return 0;
7602 }
7603
7604
7605 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7606 {
7607         struct tg3 *tp = netdev_priv(dev);
7608         struct sockaddr *addr = p;
7609         int err = 0, skip_mac_1 = 0;
7610
7611         if (!is_valid_ether_addr(addr->sa_data))
7612                 return -EINVAL;
7613
7614         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7615
7616         if (!netif_running(dev))
7617                 return 0;
7618
7619         if (tg3_flag(tp, ENABLE_ASF)) {
7620                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7621
7622                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7623                 addr0_low = tr32(MAC_ADDR_0_LOW);
7624                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7625                 addr1_low = tr32(MAC_ADDR_1_LOW);
7626
7627                 /* Skip MAC addr 1 if ASF is using it. */
7628                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7629                     !(addr1_high == 0 && addr1_low == 0))
7630                         skip_mac_1 = 1;
7631         }
7632         spin_lock_bh(&tp->lock);
7633         __tg3_set_mac_addr(tp, skip_mac_1);
7634         spin_unlock_bh(&tp->lock);
7635
7636         return err;
7637 }
7638
7639 /* tp->lock is held. */
7640 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7641                            dma_addr_t mapping, u32 maxlen_flags,
7642                            u32 nic_addr)
7643 {
7644         tg3_write_mem(tp,
7645                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7646                       ((u64) mapping >> 32));
7647         tg3_write_mem(tp,
7648                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7649                       ((u64) mapping & 0xffffffff));
7650         tg3_write_mem(tp,
7651                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7652                        maxlen_flags);
7653
7654         if (!tg3_flag(tp, 5705_PLUS))
7655                 tg3_write_mem(tp,
7656                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7657                               nic_addr);
7658 }
7659
7660 static void __tg3_set_rx_mode(struct net_device *);
7661 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7662 {
7663         int i;
7664
7665         if (!tg3_flag(tp, ENABLE_TSS)) {
7666                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7667                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7668                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7669         } else {
7670                 tw32(HOSTCC_TXCOL_TICKS, 0);
7671                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7672                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7673         }
7674
7675         if (!tg3_flag(tp, ENABLE_RSS)) {
7676                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7677                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7678                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7679         } else {
7680                 tw32(HOSTCC_RXCOL_TICKS, 0);
7681                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7682                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7683         }
7684
7685         if (!tg3_flag(tp, 5705_PLUS)) {
7686                 u32 val = ec->stats_block_coalesce_usecs;
7687
7688                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7689                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7690
7691                 if (!netif_carrier_ok(tp->dev))
7692                         val = 0;
7693
7694                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7695         }
7696
7697         for (i = 0; i < tp->irq_cnt - 1; i++) {
7698                 u32 reg;
7699
7700                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7701                 tw32(reg, ec->rx_coalesce_usecs);
7702                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7703                 tw32(reg, ec->rx_max_coalesced_frames);
7704                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7705                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7706
7707                 if (tg3_flag(tp, ENABLE_TSS)) {
7708                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7709                         tw32(reg, ec->tx_coalesce_usecs);
7710                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7711                         tw32(reg, ec->tx_max_coalesced_frames);
7712                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7713                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7714                 }
7715         }
7716
7717         for (; i < tp->irq_max - 1; i++) {
7718                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7719                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7720                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7721
7722                 if (tg3_flag(tp, ENABLE_TSS)) {
7723                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7724                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7725                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7726                 }
7727         }
7728 }
7729
7730 /* tp->lock is held. */
7731 static void tg3_rings_reset(struct tg3 *tp)
7732 {
7733         int i;
7734         u32 stblk, txrcb, rxrcb, limit;
7735         struct tg3_napi *tnapi = &tp->napi[0];
7736
7737         /* Disable all transmit rings but the first. */
7738         if (!tg3_flag(tp, 5705_PLUS))
7739                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7740         else if (tg3_flag(tp, 5717_PLUS))
7741                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7742         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7743                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7744         else
7745                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7746
7747         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7748              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7749                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7750                               BDINFO_FLAGS_DISABLED);
7751
7752
7753         /* Disable all receive return rings but the first. */
7754         if (tg3_flag(tp, 5717_PLUS))
7755                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7756         else if (!tg3_flag(tp, 5705_PLUS))
7757                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7758         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7759                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7760                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7761         else
7762                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7763
7764         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7765              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7766                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7767                               BDINFO_FLAGS_DISABLED);
7768
7769         /* Disable interrupts */
7770         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7771
7772         /* Zero mailbox registers. */
7773         if (tg3_flag(tp, SUPPORT_MSIX)) {
7774                 for (i = 1; i < tp->irq_max; i++) {
7775                         tp->napi[i].tx_prod = 0;
7776                         tp->napi[i].tx_cons = 0;
7777                         if (tg3_flag(tp, ENABLE_TSS))
7778                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7779                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7780                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7781                 }
7782                 if (!tg3_flag(tp, ENABLE_TSS))
7783                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7784         } else {
7785                 tp->napi[0].tx_prod = 0;
7786                 tp->napi[0].tx_cons = 0;
7787                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7788                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7789         }
7790
7791         /* Make sure the NIC-based send BD rings are disabled. */
7792         if (!tg3_flag(tp, 5705_PLUS)) {
7793                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7794                 for (i = 0; i < 16; i++)
7795                         tw32_tx_mbox(mbox + i * 8, 0);
7796         }
7797
7798         txrcb = NIC_SRAM_SEND_RCB;
7799         rxrcb = NIC_SRAM_RCV_RET_RCB;
7800
7801         /* Clear status block in ram. */
7802         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7803
7804         /* Set status block DMA address */
7805         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7806              ((u64) tnapi->status_mapping >> 32));
7807         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7808              ((u64) tnapi->status_mapping & 0xffffffff));
7809
7810         if (tnapi->tx_ring) {
7811                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7812                                (TG3_TX_RING_SIZE <<
7813                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7814                                NIC_SRAM_TX_BUFFER_DESC);
7815                 txrcb += TG3_BDINFO_SIZE;
7816         }
7817
7818         if (tnapi->rx_rcb) {
7819                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7820                                (tp->rx_ret_ring_mask + 1) <<
7821                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7822                 rxrcb += TG3_BDINFO_SIZE;
7823         }
7824
7825         stblk = HOSTCC_STATBLCK_RING1;
7826
7827         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7828                 u64 mapping = (u64)tnapi->status_mapping;
7829                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7830                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7831
7832                 /* Clear status block in ram. */
7833                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7834
7835                 if (tnapi->tx_ring) {
7836                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7837                                        (TG3_TX_RING_SIZE <<
7838                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7839                                        NIC_SRAM_TX_BUFFER_DESC);
7840                         txrcb += TG3_BDINFO_SIZE;
7841                 }
7842
7843                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7844                                ((tp->rx_ret_ring_mask + 1) <<
7845                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7846
7847                 stblk += 8;
7848                 rxrcb += TG3_BDINFO_SIZE;
7849         }
7850 }
7851
7852 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7853 {
7854         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7855
7856         if (!tg3_flag(tp, 5750_PLUS) ||
7857             tg3_flag(tp, 5780_CLASS) ||
7858             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7859             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7860                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7861         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7862                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7863                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7864         else
7865                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7866
7867         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7868         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7869
7870         val = min(nic_rep_thresh, host_rep_thresh);
7871         tw32(RCVBDI_STD_THRESH, val);
7872
7873         if (tg3_flag(tp, 57765_PLUS))
7874                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7875
7876         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7877                 return;
7878
7879         if (!tg3_flag(tp, 5705_PLUS))
7880                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7881         else
7882                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7883
7884         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7885
7886         val = min(bdcache_maxcnt / 2, host_rep_thresh);
7887         tw32(RCVBDI_JUMBO_THRESH, val);
7888
7889         if (tg3_flag(tp, 57765_PLUS))
7890                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7891 }
7892
7893 /* tp->lock is held. */
7894 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7895 {
7896         u32 val, rdmac_mode;
7897         int i, err, limit;
7898         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7899
7900         tg3_disable_ints(tp);
7901
7902         tg3_stop_fw(tp);
7903
7904         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7905
7906         if (tg3_flag(tp, INIT_COMPLETE))
7907                 tg3_abort_hw(tp, 1);
7908
7909         /* Enable MAC control of LPI */
7910         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7911                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7912                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7913                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
7914
7915                 tw32_f(TG3_CPMU_EEE_CTRL,
7916                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7917
7918                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7919                       TG3_CPMU_EEEMD_LPI_IN_TX |
7920                       TG3_CPMU_EEEMD_LPI_IN_RX |
7921                       TG3_CPMU_EEEMD_EEE_ENABLE;
7922
7923                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7924                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7925
7926                 if (tg3_flag(tp, ENABLE_APE))
7927                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7928
7929                 tw32_f(TG3_CPMU_EEE_MODE, val);
7930
7931                 tw32_f(TG3_CPMU_EEE_DBTMR1,
7932                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7933                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7934
7935                 tw32_f(TG3_CPMU_EEE_DBTMR2,
7936                        TG3_CPMU_DBTMR2_APE_TX_2047US |
7937                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7938         }
7939
7940         if (reset_phy)
7941                 tg3_phy_reset(tp);
7942
7943         err = tg3_chip_reset(tp);
7944         if (err)
7945                 return err;
7946
7947         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7948
7949         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7950                 val = tr32(TG3_CPMU_CTRL);
7951                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7952                 tw32(TG3_CPMU_CTRL, val);
7953
7954                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7955                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7956                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7957                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7958
7959                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7960                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7961                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7962                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7963
7964                 val = tr32(TG3_CPMU_HST_ACC);
7965                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7966                 val |= CPMU_HST_ACC_MACCLK_6_25;
7967                 tw32(TG3_CPMU_HST_ACC, val);
7968         }
7969
7970         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7971                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7972                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7973                        PCIE_PWR_MGMT_L1_THRESH_4MS;
7974                 tw32(PCIE_PWR_MGMT_THRESH, val);
7975
7976                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7977                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7978
7979                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7980
7981                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7982                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7983         }
7984
7985         if (tg3_flag(tp, L1PLLPD_EN)) {
7986                 u32 grc_mode = tr32(GRC_MODE);
7987
7988                 /* Access the lower 1K of PL PCIE block registers. */
7989                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7990                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7991
7992                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7993                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7994                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7995
7996                 tw32(GRC_MODE, grc_mode);
7997         }
7998
7999         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8000                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8001                         u32 grc_mode = tr32(GRC_MODE);
8002
8003                         /* Access the lower 1K of PL PCIE block registers. */
8004                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8005                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8006
8007                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8008                                    TG3_PCIE_PL_LO_PHYCTL5);
8009                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8010                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8011
8012                         tw32(GRC_MODE, grc_mode);
8013                 }
8014
8015                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8016                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8017                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8018                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8019         }
8020
8021         /* This works around an issue with Athlon chipsets on
8022          * B3 tigon3 silicon.  This bit has no effect on any
8023          * other revision.  But do not set this on PCI Express
8024          * chips and don't even touch the clocks if the CPMU is present.
8025          */
8026         if (!tg3_flag(tp, CPMU_PRESENT)) {
8027                 if (!tg3_flag(tp, PCI_EXPRESS))
8028                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8029                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8030         }
8031
8032         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8033             tg3_flag(tp, PCIX_MODE)) {
8034                 val = tr32(TG3PCI_PCISTATE);
8035                 val |= PCISTATE_RETRY_SAME_DMA;
8036                 tw32(TG3PCI_PCISTATE, val);
8037         }
8038
8039         if (tg3_flag(tp, ENABLE_APE)) {
8040                 /* Allow reads and writes to the
8041                  * APE register and memory space.
8042                  */
8043                 val = tr32(TG3PCI_PCISTATE);
8044                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8045                        PCISTATE_ALLOW_APE_SHMEM_WR |
8046                        PCISTATE_ALLOW_APE_PSPACE_WR;
8047                 tw32(TG3PCI_PCISTATE, val);
8048         }
8049
8050         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8051                 /* Enable some hw fixes.  */
8052                 val = tr32(TG3PCI_MSI_DATA);
8053                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8054                 tw32(TG3PCI_MSI_DATA, val);
8055         }
8056
8057         /* Descriptor ring init may make accesses to the
8058          * NIC SRAM area to setup the TX descriptors, so we
8059          * can only do this after the hardware has been
8060          * successfully reset.
8061          */
8062         err = tg3_init_rings(tp);
8063         if (err)
8064                 return err;
8065
8066         if (tg3_flag(tp, 57765_PLUS)) {
8067                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8068                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8069                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8070                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8071                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8072                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8073                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8074                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8075         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8076                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8077                 /* This value is determined during the probe time DMA
8078                  * engine test, tg3_test_dma.
8079                  */
8080                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8081         }
8082
8083         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8084                           GRC_MODE_4X_NIC_SEND_RINGS |
8085                           GRC_MODE_NO_TX_PHDR_CSUM |
8086                           GRC_MODE_NO_RX_PHDR_CSUM);
8087         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8088
8089         /* Pseudo-header checksum is done by hardware logic and not
8090          * the offload processers, so make the chip do the pseudo-
8091          * header checksums on receive.  For transmit it is more
8092          * convenient to do the pseudo-header checksum in software
8093          * as Linux does that on transmit for us in all cases.
8094          */
8095         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8096
8097         tw32(GRC_MODE,
8098              tp->grc_mode |
8099              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8100
8101         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8102         val = tr32(GRC_MISC_CFG);
8103         val &= ~0xff;
8104         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8105         tw32(GRC_MISC_CFG, val);
8106
8107         /* Initialize MBUF/DESC pool. */
8108         if (tg3_flag(tp, 5750_PLUS)) {
8109                 /* Do nothing.  */
8110         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8111                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8112                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8113                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8114                 else
8115                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8116                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8117                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8118         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8119                 int fw_len;
8120
8121                 fw_len = tp->fw_len;
8122                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8123                 tw32(BUFMGR_MB_POOL_ADDR,
8124                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8125                 tw32(BUFMGR_MB_POOL_SIZE,
8126                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8127         }
8128
8129         if (tp->dev->mtu <= ETH_DATA_LEN) {
8130                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8131                      tp->bufmgr_config.mbuf_read_dma_low_water);
8132                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8133                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8134                 tw32(BUFMGR_MB_HIGH_WATER,
8135                      tp->bufmgr_config.mbuf_high_water);
8136         } else {
8137                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8138                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8139                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8140                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8141                 tw32(BUFMGR_MB_HIGH_WATER,
8142                      tp->bufmgr_config.mbuf_high_water_jumbo);
8143         }
8144         tw32(BUFMGR_DMA_LOW_WATER,
8145              tp->bufmgr_config.dma_low_water);
8146         tw32(BUFMGR_DMA_HIGH_WATER,
8147              tp->bufmgr_config.dma_high_water);
8148
8149         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8150         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8151                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8152         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8153             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8154             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8155                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8156         tw32(BUFMGR_MODE, val);
8157         for (i = 0; i < 2000; i++) {
8158                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8159                         break;
8160                 udelay(10);
8161         }
8162         if (i >= 2000) {
8163                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8164                 return -ENODEV;
8165         }
8166
8167         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8168                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8169
8170         tg3_setup_rxbd_thresholds(tp);
8171
8172         /* Initialize TG3_BDINFO's at:
8173          *  RCVDBDI_STD_BD:     standard eth size rx ring
8174          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8175          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8176          *
8177          * like so:
8178          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8179          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8180          *                              ring attribute flags
8181          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8182          *
8183          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8184          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8185          *
8186          * The size of each ring is fixed in the firmware, but the location is
8187          * configurable.
8188          */
8189         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8190              ((u64) tpr->rx_std_mapping >> 32));
8191         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8192              ((u64) tpr->rx_std_mapping & 0xffffffff));
8193         if (!tg3_flag(tp, 5717_PLUS))
8194                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8195                      NIC_SRAM_RX_BUFFER_DESC);
8196
8197         /* Disable the mini ring */
8198         if (!tg3_flag(tp, 5705_PLUS))
8199                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8200                      BDINFO_FLAGS_DISABLED);
8201
8202         /* Program the jumbo buffer descriptor ring control
8203          * blocks on those devices that have them.
8204          */
8205         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8206             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8207
8208                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8209                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8210                              ((u64) tpr->rx_jmb_mapping >> 32));
8211                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8212                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8213                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8214                               BDINFO_FLAGS_MAXLEN_SHIFT;
8215                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8216                              val | BDINFO_FLAGS_USE_EXT_RECV);
8217                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8218                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8219                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8220                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8221                 } else {
8222                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8223                              BDINFO_FLAGS_DISABLED);
8224                 }
8225
8226                 if (tg3_flag(tp, 57765_PLUS)) {
8227                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8228                                 val = TG3_RX_STD_MAX_SIZE_5700;
8229                         else
8230                                 val = TG3_RX_STD_MAX_SIZE_5717;
8231                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8232                         val |= (TG3_RX_STD_DMA_SZ << 2);
8233                 } else
8234                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8235         } else
8236                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8237
8238         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8239
8240         tpr->rx_std_prod_idx = tp->rx_pending;
8241         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8242
8243         tpr->rx_jmb_prod_idx =
8244                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8245         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8246
8247         tg3_rings_reset(tp);
8248
8249         /* Initialize MAC address and backoff seed. */
8250         __tg3_set_mac_addr(tp, 0);
8251
8252         /* MTU + ethernet header + FCS + optional VLAN tag */
8253         tw32(MAC_RX_MTU_SIZE,
8254              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8255
8256         /* The slot time is changed by tg3_setup_phy if we
8257          * run at gigabit with half duplex.
8258          */
8259         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8260               (6 << TX_LENGTHS_IPG_SHIFT) |
8261               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8262
8263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8264                 val |= tr32(MAC_TX_LENGTHS) &
8265                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8266                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8267
8268         tw32(MAC_TX_LENGTHS, val);
8269
8270         /* Receive rules. */
8271         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8272         tw32(RCVLPC_CONFIG, 0x0181);
8273
8274         /* Calculate RDMAC_MODE setting early, we need it to determine
8275          * the RCVLPC_STATE_ENABLE mask.
8276          */
8277         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8278                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8279                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8280                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8281                       RDMAC_MODE_LNGREAD_ENAB);
8282
8283         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8284                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8285
8286         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8287             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8288             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8289                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8290                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8291                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8292
8293         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8294             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8295                 if (tg3_flag(tp, TSO_CAPABLE) &&
8296                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8297                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8298                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8299                            !tg3_flag(tp, IS_5788)) {
8300                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8301                 }
8302         }
8303
8304         if (tg3_flag(tp, PCI_EXPRESS))
8305                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8306
8307         if (tg3_flag(tp, HW_TSO_1) ||
8308             tg3_flag(tp, HW_TSO_2) ||
8309             tg3_flag(tp, HW_TSO_3))
8310                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8311
8312         if (tg3_flag(tp, HW_TSO_3) ||
8313             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8314             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8315                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8316
8317         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8318                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8319
8320         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8321             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8322             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8323             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8324             tg3_flag(tp, 57765_PLUS)) {
8325                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8326                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8327                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8328                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8329                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8330                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8331                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8332                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8333                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8334                 }
8335                 tw32(TG3_RDMA_RSRVCTRL_REG,
8336                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8337         }
8338
8339         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8340             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8341                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8342                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8343                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8344                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8345         }
8346
8347         /* Receive/send statistics. */
8348         if (tg3_flag(tp, 5750_PLUS)) {
8349                 val = tr32(RCVLPC_STATS_ENABLE);
8350                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8351                 tw32(RCVLPC_STATS_ENABLE, val);
8352         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8353                    tg3_flag(tp, TSO_CAPABLE)) {
8354                 val = tr32(RCVLPC_STATS_ENABLE);
8355                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8356                 tw32(RCVLPC_STATS_ENABLE, val);
8357         } else {
8358                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8359         }
8360         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8361         tw32(SNDDATAI_STATSENAB, 0xffffff);
8362         tw32(SNDDATAI_STATSCTRL,
8363              (SNDDATAI_SCTRL_ENABLE |
8364               SNDDATAI_SCTRL_FASTUPD));
8365
8366         /* Setup host coalescing engine. */
8367         tw32(HOSTCC_MODE, 0);
8368         for (i = 0; i < 2000; i++) {
8369                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8370                         break;
8371                 udelay(10);
8372         }
8373
8374         __tg3_set_coalesce(tp, &tp->coal);
8375
8376         if (!tg3_flag(tp, 5705_PLUS)) {
8377                 /* Status/statistics block address.  See tg3_timer,
8378                  * the tg3_periodic_fetch_stats call there, and
8379                  * tg3_get_stats to see how this works for 5705/5750 chips.
8380                  */
8381                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8382                      ((u64) tp->stats_mapping >> 32));
8383                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8384                      ((u64) tp->stats_mapping & 0xffffffff));
8385                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8386
8387                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8388
8389                 /* Clear statistics and status block memory areas */
8390                 for (i = NIC_SRAM_STATS_BLK;
8391                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8392                      i += sizeof(u32)) {
8393                         tg3_write_mem(tp, i, 0);
8394                         udelay(40);
8395                 }
8396         }
8397
8398         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8399
8400         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8401         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8402         if (!tg3_flag(tp, 5705_PLUS))
8403                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8404
8405         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8406                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8407                 /* reset to prevent losing 1st rx packet intermittently */
8408                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8409                 udelay(10);
8410         }
8411
8412         if (tg3_flag(tp, ENABLE_APE))
8413                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8414         else
8415                 tp->mac_mode = 0;
8416         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8417                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8418         if (!tg3_flag(tp, 5705_PLUS) &&
8419             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8420             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8421                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8422         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8423         udelay(40);
8424
8425         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8426          * If TG3_FLAG_IS_NIC is zero, we should read the
8427          * register to preserve the GPIO settings for LOMs. The GPIOs,
8428          * whether used as inputs or outputs, are set by boot code after
8429          * reset.
8430          */
8431         if (!tg3_flag(tp, IS_NIC)) {
8432                 u32 gpio_mask;
8433
8434                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8435                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8436                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8437
8438                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8439                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8440                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8441
8442                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8443                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8444
8445                 tp->grc_local_ctrl &= ~gpio_mask;
8446                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8447
8448                 /* GPIO1 must be driven high for eeprom write protect */
8449                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8450                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8451                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8452         }
8453         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8454         udelay(100);
8455
8456         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8457                 val = tr32(MSGINT_MODE);
8458                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8459                 tw32(MSGINT_MODE, val);
8460         }
8461
8462         if (!tg3_flag(tp, 5705_PLUS)) {
8463                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8464                 udelay(40);
8465         }
8466
8467         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8468                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8469                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8470                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8471                WDMAC_MODE_LNGREAD_ENAB);
8472
8473         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8474             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8475                 if (tg3_flag(tp, TSO_CAPABLE) &&
8476                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8477                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8478                         /* nothing */
8479                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8480                            !tg3_flag(tp, IS_5788)) {
8481                         val |= WDMAC_MODE_RX_ACCEL;
8482                 }
8483         }
8484
8485         /* Enable host coalescing bug fix */
8486         if (tg3_flag(tp, 5755_PLUS))
8487                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8488
8489         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8490                 val |= WDMAC_MODE_BURST_ALL_DATA;
8491
8492         tw32_f(WDMAC_MODE, val);
8493         udelay(40);
8494
8495         if (tg3_flag(tp, PCIX_MODE)) {
8496                 u16 pcix_cmd;
8497
8498                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8499                                      &pcix_cmd);
8500                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8501                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8502                         pcix_cmd |= PCI_X_CMD_READ_2K;
8503                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8504                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8505                         pcix_cmd |= PCI_X_CMD_READ_2K;
8506                 }
8507                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8508                                       pcix_cmd);
8509         }
8510
8511         tw32_f(RDMAC_MODE, rdmac_mode);
8512         udelay(40);
8513
8514         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8515         if (!tg3_flag(tp, 5705_PLUS))
8516                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8517
8518         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8519                 tw32(SNDDATAC_MODE,
8520                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8521         else
8522                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8523
8524         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8525         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8526         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8527         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8528                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8529         tw32(RCVDBDI_MODE, val);
8530         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8531         if (tg3_flag(tp, HW_TSO_1) ||
8532             tg3_flag(tp, HW_TSO_2) ||
8533             tg3_flag(tp, HW_TSO_3))
8534                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8535         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8536         if (tg3_flag(tp, ENABLE_TSS))
8537                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8538         tw32(SNDBDI_MODE, val);
8539         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8540
8541         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8542                 err = tg3_load_5701_a0_firmware_fix(tp);
8543                 if (err)
8544                         return err;
8545         }
8546
8547         if (tg3_flag(tp, TSO_CAPABLE)) {
8548                 err = tg3_load_tso_firmware(tp);
8549                 if (err)
8550                         return err;
8551         }
8552
8553         tp->tx_mode = TX_MODE_ENABLE;
8554
8555         if (tg3_flag(tp, 5755_PLUS) ||
8556             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8557                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8558
8559         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8560                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8561                 tp->tx_mode &= ~val;
8562                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8563         }
8564
8565         tw32_f(MAC_TX_MODE, tp->tx_mode);
8566         udelay(100);
8567
8568         if (tg3_flag(tp, ENABLE_RSS)) {
8569                 u32 reg = MAC_RSS_INDIR_TBL_0;
8570                 u8 *ent = (u8 *)&val;
8571
8572                 /* Setup the indirection table */
8573                 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8574                         int idx = i % sizeof(val);
8575
8576                         ent[idx] = i % (tp->irq_cnt - 1);
8577                         if (idx == sizeof(val) - 1) {
8578                                 tw32(reg, val);
8579                                 reg += 4;
8580                         }
8581                 }
8582
8583                 /* Setup the "secret" hash key. */
8584                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8585                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8586                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8587                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8588                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8589                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8590                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8591                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8592                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8593                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8594         }
8595
8596         tp->rx_mode = RX_MODE_ENABLE;
8597         if (tg3_flag(tp, 5755_PLUS))
8598                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8599
8600         if (tg3_flag(tp, ENABLE_RSS))
8601                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8602                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8603                                RX_MODE_RSS_IPV6_HASH_EN |
8604                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8605                                RX_MODE_RSS_IPV4_HASH_EN |
8606                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8607
8608         tw32_f(MAC_RX_MODE, tp->rx_mode);
8609         udelay(10);
8610
8611         tw32(MAC_LED_CTRL, tp->led_ctrl);
8612
8613         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8614         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8615                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8616                 udelay(10);
8617         }
8618         tw32_f(MAC_RX_MODE, tp->rx_mode);
8619         udelay(10);
8620
8621         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8622                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8623                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8624                         /* Set drive transmission level to 1.2V  */
8625                         /* only if the signal pre-emphasis bit is not set  */
8626                         val = tr32(MAC_SERDES_CFG);
8627                         val &= 0xfffff000;
8628                         val |= 0x880;
8629                         tw32(MAC_SERDES_CFG, val);
8630                 }
8631                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8632                         tw32(MAC_SERDES_CFG, 0x616000);
8633         }
8634
8635         /* Prevent chip from dropping frames when flow control
8636          * is enabled.
8637          */
8638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8639                 val = 1;
8640         else
8641                 val = 2;
8642         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8643
8644         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8645             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8646                 /* Use hardware link auto-negotiation */
8647                 tg3_flag_set(tp, HW_AUTONEG);
8648         }
8649
8650         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8651             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8652                 u32 tmp;
8653
8654                 tmp = tr32(SERDES_RX_CTRL);
8655                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8656                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8657                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8658                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8659         }
8660
8661         if (!tg3_flag(tp, USE_PHYLIB)) {
8662                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8663                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8664                         tp->link_config.speed = tp->link_config.orig_speed;
8665                         tp->link_config.duplex = tp->link_config.orig_duplex;
8666                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8667                 }
8668
8669                 err = tg3_setup_phy(tp, 0);
8670                 if (err)
8671                         return err;
8672
8673                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8674                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8675                         u32 tmp;
8676
8677                         /* Clear CRC stats. */
8678                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8679                                 tg3_writephy(tp, MII_TG3_TEST1,
8680                                              tmp | MII_TG3_TEST1_CRC_EN);
8681                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8682                         }
8683                 }
8684         }
8685
8686         __tg3_set_rx_mode(tp->dev);
8687
8688         /* Initialize receive rules. */
8689         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8690         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8691         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8692         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8693
8694         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8695                 limit = 8;
8696         else
8697                 limit = 16;
8698         if (tg3_flag(tp, ENABLE_ASF))
8699                 limit -= 4;
8700         switch (limit) {
8701         case 16:
8702                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8703         case 15:
8704                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8705         case 14:
8706                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8707         case 13:
8708                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8709         case 12:
8710                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8711         case 11:
8712                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8713         case 10:
8714                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8715         case 9:
8716                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8717         case 8:
8718                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8719         case 7:
8720                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8721         case 6:
8722                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8723         case 5:
8724                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8725         case 4:
8726                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8727         case 3:
8728                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8729         case 2:
8730         case 1:
8731
8732         default:
8733                 break;
8734         }
8735
8736         if (tg3_flag(tp, ENABLE_APE))
8737                 /* Write our heartbeat update interval to APE. */
8738                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8739                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8740
8741         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8742
8743         return 0;
8744 }
8745
8746 /* Called at device open time to get the chip ready for
8747  * packet processing.  Invoked with tp->lock held.
8748  */
8749 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8750 {
8751         tg3_switch_clocks(tp);
8752
8753         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8754
8755         return tg3_reset_hw(tp, reset_phy);
8756 }
8757
8758 #define TG3_STAT_ADD32(PSTAT, REG) \
8759 do {    u32 __val = tr32(REG); \
8760         (PSTAT)->low += __val; \
8761         if ((PSTAT)->low < __val) \
8762                 (PSTAT)->high += 1; \
8763 } while (0)
8764
8765 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8766 {
8767         struct tg3_hw_stats *sp = tp->hw_stats;
8768
8769         if (!netif_carrier_ok(tp->dev))
8770                 return;
8771
8772         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8773         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8774         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8775         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8776         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8777         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8778         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8779         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8780         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8781         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8782         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8783         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8784         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8785
8786         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8787         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8788         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8789         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8790         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8791         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8792         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8793         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8794         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8795         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8796         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8797         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8798         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8799         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8800
8801         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8802         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
8803                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8804         } else {
8805                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8806                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8807                 if (val) {
8808                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8809                         sp->rx_discards.low += val;
8810                         if (sp->rx_discards.low < val)
8811                                 sp->rx_discards.high += 1;
8812                 }
8813                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8814         }
8815         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8816 }
8817
8818 static void tg3_timer(unsigned long __opaque)
8819 {
8820         struct tg3 *tp = (struct tg3 *) __opaque;
8821
8822         if (tp->irq_sync)
8823                 goto restart_timer;
8824
8825         spin_lock(&tp->lock);
8826
8827         if (!tg3_flag(tp, TAGGED_STATUS)) {
8828                 /* All of this garbage is because when using non-tagged
8829                  * IRQ status the mailbox/status_block protocol the chip
8830                  * uses with the cpu is race prone.
8831                  */
8832                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8833                         tw32(GRC_LOCAL_CTRL,
8834                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8835                 } else {
8836                         tw32(HOSTCC_MODE, tp->coalesce_mode |
8837                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8838                 }
8839
8840                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8841                         tg3_flag_set(tp, RESTART_TIMER);
8842                         spin_unlock(&tp->lock);
8843                         schedule_work(&tp->reset_task);
8844                         return;
8845                 }
8846         }
8847
8848         /* This part only runs once per second. */
8849         if (!--tp->timer_counter) {
8850                 if (tg3_flag(tp, 5705_PLUS))
8851                         tg3_periodic_fetch_stats(tp);
8852
8853                 if (tp->setlpicnt && !--tp->setlpicnt) {
8854                         u32 val = tr32(TG3_CPMU_EEE_MODE);
8855                         tw32(TG3_CPMU_EEE_MODE,
8856                              val | TG3_CPMU_EEEMD_LPI_ENABLE);
8857                 }
8858
8859                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8860                         u32 mac_stat;
8861                         int phy_event;
8862
8863                         mac_stat = tr32(MAC_STATUS);
8864
8865                         phy_event = 0;
8866                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8867                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8868                                         phy_event = 1;
8869                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8870                                 phy_event = 1;
8871
8872                         if (phy_event)
8873                                 tg3_setup_phy(tp, 0);
8874                 } else if (tg3_flag(tp, POLL_SERDES)) {
8875                         u32 mac_stat = tr32(MAC_STATUS);
8876                         int need_setup = 0;
8877
8878                         if (netif_carrier_ok(tp->dev) &&
8879                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8880                                 need_setup = 1;
8881                         }
8882                         if (!netif_carrier_ok(tp->dev) &&
8883                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
8884                                          MAC_STATUS_SIGNAL_DET))) {
8885                                 need_setup = 1;
8886                         }
8887                         if (need_setup) {
8888                                 if (!tp->serdes_counter) {
8889                                         tw32_f(MAC_MODE,
8890                                              (tp->mac_mode &
8891                                               ~MAC_MODE_PORT_MODE_MASK));
8892                                         udelay(40);
8893                                         tw32_f(MAC_MODE, tp->mac_mode);
8894                                         udelay(40);
8895                                 }
8896                                 tg3_setup_phy(tp, 0);
8897                         }
8898                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8899                            tg3_flag(tp, 5780_CLASS)) {
8900                         tg3_serdes_parallel_detect(tp);
8901                 }
8902
8903                 tp->timer_counter = tp->timer_multiplier;
8904         }
8905
8906         /* Heartbeat is only sent once every 2 seconds.
8907          *
8908          * The heartbeat is to tell the ASF firmware that the host
8909          * driver is still alive.  In the event that the OS crashes,
8910          * ASF needs to reset the hardware to free up the FIFO space
8911          * that may be filled with rx packets destined for the host.
8912          * If the FIFO is full, ASF will no longer function properly.
8913          *
8914          * Unintended resets have been reported on real time kernels
8915          * where the timer doesn't run on time.  Netpoll will also have
8916          * same problem.
8917          *
8918          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8919          * to check the ring condition when the heartbeat is expiring
8920          * before doing the reset.  This will prevent most unintended
8921          * resets.
8922          */
8923         if (!--tp->asf_counter) {
8924                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8925                         tg3_wait_for_event_ack(tp);
8926
8927                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8928                                       FWCMD_NICDRV_ALIVE3);
8929                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8930                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8931                                       TG3_FW_UPDATE_TIMEOUT_SEC);
8932
8933                         tg3_generate_fw_event(tp);
8934                 }
8935                 tp->asf_counter = tp->asf_multiplier;
8936         }
8937
8938         spin_unlock(&tp->lock);
8939
8940 restart_timer:
8941         tp->timer.expires = jiffies + tp->timer_offset;
8942         add_timer(&tp->timer);
8943 }
8944
8945 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8946 {
8947         irq_handler_t fn;
8948         unsigned long flags;
8949         char *name;
8950         struct tg3_napi *tnapi = &tp->napi[irq_num];
8951
8952         if (tp->irq_cnt == 1)
8953                 name = tp->dev->name;
8954         else {
8955                 name = &tnapi->irq_lbl[0];
8956                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8957                 name[IFNAMSIZ-1] = 0;
8958         }
8959
8960         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8961                 fn = tg3_msi;
8962                 if (tg3_flag(tp, 1SHOT_MSI))
8963                         fn = tg3_msi_1shot;
8964                 flags = 0;
8965         } else {
8966                 fn = tg3_interrupt;
8967                 if (tg3_flag(tp, TAGGED_STATUS))
8968                         fn = tg3_interrupt_tagged;
8969                 flags = IRQF_SHARED;
8970         }
8971
8972         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8973 }
8974
8975 static int tg3_test_interrupt(struct tg3 *tp)
8976 {
8977         struct tg3_napi *tnapi = &tp->napi[0];
8978         struct net_device *dev = tp->dev;
8979         int err, i, intr_ok = 0;
8980         u32 val;
8981
8982         if (!netif_running(dev))
8983                 return -ENODEV;
8984
8985         tg3_disable_ints(tp);
8986
8987         free_irq(tnapi->irq_vec, tnapi);
8988
8989         /*
8990          * Turn off MSI one shot mode.  Otherwise this test has no
8991          * observable way to know whether the interrupt was delivered.
8992          */
8993         if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
8994                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8995                 tw32(MSGINT_MODE, val);
8996         }
8997
8998         err = request_irq(tnapi->irq_vec, tg3_test_isr,
8999                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9000         if (err)
9001                 return err;
9002
9003         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9004         tg3_enable_ints(tp);
9005
9006         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9007                tnapi->coal_now);
9008
9009         for (i = 0; i < 5; i++) {
9010                 u32 int_mbox, misc_host_ctrl;
9011
9012                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9013                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9014
9015                 if ((int_mbox != 0) ||
9016                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9017                         intr_ok = 1;
9018                         break;
9019                 }
9020
9021                 msleep(10);
9022         }
9023
9024         tg3_disable_ints(tp);
9025
9026         free_irq(tnapi->irq_vec, tnapi);
9027
9028         err = tg3_request_irq(tp, 0);
9029
9030         if (err)
9031                 return err;
9032
9033         if (intr_ok) {
9034                 /* Reenable MSI one shot mode. */
9035                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9036                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9037                         tw32(MSGINT_MODE, val);
9038                 }
9039                 return 0;
9040         }
9041
9042         return -EIO;
9043 }
9044
9045 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9046  * successfully restored
9047  */
9048 static int tg3_test_msi(struct tg3 *tp)
9049 {
9050         int err;
9051         u16 pci_cmd;
9052
9053         if (!tg3_flag(tp, USING_MSI))
9054                 return 0;
9055
9056         /* Turn off SERR reporting in case MSI terminates with Master
9057          * Abort.
9058          */
9059         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9060         pci_write_config_word(tp->pdev, PCI_COMMAND,
9061                               pci_cmd & ~PCI_COMMAND_SERR);
9062
9063         err = tg3_test_interrupt(tp);
9064
9065         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9066
9067         if (!err)
9068                 return 0;
9069
9070         /* other failures */
9071         if (err != -EIO)
9072                 return err;
9073
9074         /* MSI test failed, go back to INTx mode */
9075         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9076                     "to INTx mode. Please report this failure to the PCI "
9077                     "maintainer and include system chipset information\n");
9078
9079         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9080
9081         pci_disable_msi(tp->pdev);
9082
9083         tg3_flag_clear(tp, USING_MSI);
9084         tp->napi[0].irq_vec = tp->pdev->irq;
9085
9086         err = tg3_request_irq(tp, 0);
9087         if (err)
9088                 return err;
9089
9090         /* Need to reset the chip because the MSI cycle may have terminated
9091          * with Master Abort.
9092          */
9093         tg3_full_lock(tp, 1);
9094
9095         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9096         err = tg3_init_hw(tp, 1);
9097
9098         tg3_full_unlock(tp);
9099
9100         if (err)
9101                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9102
9103         return err;
9104 }
9105
9106 static int tg3_request_firmware(struct tg3 *tp)
9107 {
9108         const __be32 *fw_data;
9109
9110         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9111                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9112                            tp->fw_needed);
9113                 return -ENOENT;
9114         }
9115
9116         fw_data = (void *)tp->fw->data;
9117
9118         /* Firmware blob starts with version numbers, followed by
9119          * start address and _full_ length including BSS sections
9120          * (which must be longer than the actual data, of course
9121          */
9122
9123         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9124         if (tp->fw_len < (tp->fw->size - 12)) {
9125                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9126                            tp->fw_len, tp->fw_needed);
9127                 release_firmware(tp->fw);
9128                 tp->fw = NULL;
9129                 return -EINVAL;
9130         }
9131
9132         /* We no longer need firmware; we have it. */
9133         tp->fw_needed = NULL;
9134         return 0;
9135 }
9136
9137 static bool tg3_enable_msix(struct tg3 *tp)
9138 {
9139         int i, rc, cpus = num_online_cpus();
9140         struct msix_entry msix_ent[tp->irq_max];
9141
9142         if (cpus == 1)
9143                 /* Just fallback to the simpler MSI mode. */
9144                 return false;
9145
9146         /*
9147          * We want as many rx rings enabled as there are cpus.
9148          * The first MSIX vector only deals with link interrupts, etc,
9149          * so we add one to the number of vectors we are requesting.
9150          */
9151         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9152
9153         for (i = 0; i < tp->irq_max; i++) {
9154                 msix_ent[i].entry  = i;
9155                 msix_ent[i].vector = 0;
9156         }
9157
9158         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9159         if (rc < 0) {
9160                 return false;
9161         } else if (rc != 0) {
9162                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9163                         return false;
9164                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9165                               tp->irq_cnt, rc);
9166                 tp->irq_cnt = rc;
9167         }
9168
9169         for (i = 0; i < tp->irq_max; i++)
9170                 tp->napi[i].irq_vec = msix_ent[i].vector;
9171
9172         netif_set_real_num_tx_queues(tp->dev, 1);
9173         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9174         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9175                 pci_disable_msix(tp->pdev);
9176                 return false;
9177         }
9178
9179         if (tp->irq_cnt > 1) {
9180                 tg3_flag_set(tp, ENABLE_RSS);
9181
9182                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9183                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9184                         tg3_flag_set(tp, ENABLE_TSS);
9185                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9186                 }
9187         }
9188
9189         return true;
9190 }
9191
9192 static void tg3_ints_init(struct tg3 *tp)
9193 {
9194         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9195             !tg3_flag(tp, TAGGED_STATUS)) {
9196                 /* All MSI supporting chips should support tagged
9197                  * status.  Assert that this is the case.
9198                  */
9199                 netdev_warn(tp->dev,
9200                             "MSI without TAGGED_STATUS? Not using MSI\n");
9201                 goto defcfg;
9202         }
9203
9204         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9205                 tg3_flag_set(tp, USING_MSIX);
9206         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9207                 tg3_flag_set(tp, USING_MSI);
9208
9209         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9210                 u32 msi_mode = tr32(MSGINT_MODE);
9211                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9212                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9213                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9214         }
9215 defcfg:
9216         if (!tg3_flag(tp, USING_MSIX)) {
9217                 tp->irq_cnt = 1;
9218                 tp->napi[0].irq_vec = tp->pdev->irq;
9219                 netif_set_real_num_tx_queues(tp->dev, 1);
9220                 netif_set_real_num_rx_queues(tp->dev, 1);
9221         }
9222 }
9223
9224 static void tg3_ints_fini(struct tg3 *tp)
9225 {
9226         if (tg3_flag(tp, USING_MSIX))
9227                 pci_disable_msix(tp->pdev);
9228         else if (tg3_flag(tp, USING_MSI))
9229                 pci_disable_msi(tp->pdev);
9230         tg3_flag_clear(tp, USING_MSI);
9231         tg3_flag_clear(tp, USING_MSIX);
9232         tg3_flag_clear(tp, ENABLE_RSS);
9233         tg3_flag_clear(tp, ENABLE_TSS);
9234 }
9235
9236 static int tg3_open(struct net_device *dev)
9237 {
9238         struct tg3 *tp = netdev_priv(dev);
9239         int i, err;
9240
9241         if (tp->fw_needed) {
9242                 err = tg3_request_firmware(tp);
9243                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9244                         if (err)
9245                                 return err;
9246                 } else if (err) {
9247                         netdev_warn(tp->dev, "TSO capability disabled\n");
9248                         tg3_flag_clear(tp, TSO_CAPABLE);
9249                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9250                         netdev_notice(tp->dev, "TSO capability restored\n");
9251                         tg3_flag_set(tp, TSO_CAPABLE);
9252                 }
9253         }
9254
9255         netif_carrier_off(tp->dev);
9256
9257         err = tg3_power_up(tp);
9258         if (err)
9259                 return err;
9260
9261         tg3_full_lock(tp, 0);
9262
9263         tg3_disable_ints(tp);
9264         tg3_flag_clear(tp, INIT_COMPLETE);
9265
9266         tg3_full_unlock(tp);
9267
9268         /*
9269          * Setup interrupts first so we know how
9270          * many NAPI resources to allocate
9271          */
9272         tg3_ints_init(tp);
9273
9274         /* The placement of this call is tied
9275          * to the setup and use of Host TX descriptors.
9276          */
9277         err = tg3_alloc_consistent(tp);
9278         if (err)
9279                 goto err_out1;
9280
9281         tg3_napi_init(tp);
9282
9283         tg3_napi_enable(tp);
9284
9285         for (i = 0; i < tp->irq_cnt; i++) {
9286                 struct tg3_napi *tnapi = &tp->napi[i];
9287                 err = tg3_request_irq(tp, i);
9288                 if (err) {
9289                         for (i--; i >= 0; i--)
9290                                 free_irq(tnapi->irq_vec, tnapi);
9291                         break;
9292                 }
9293         }
9294
9295         if (err)
9296                 goto err_out2;
9297
9298         tg3_full_lock(tp, 0);
9299
9300         err = tg3_init_hw(tp, 1);
9301         if (err) {
9302                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9303                 tg3_free_rings(tp);
9304         } else {
9305                 if (tg3_flag(tp, TAGGED_STATUS))
9306                         tp->timer_offset = HZ;
9307                 else
9308                         tp->timer_offset = HZ / 10;
9309
9310                 BUG_ON(tp->timer_offset > HZ);
9311                 tp->timer_counter = tp->timer_multiplier =
9312                         (HZ / tp->timer_offset);
9313                 tp->asf_counter = tp->asf_multiplier =
9314                         ((HZ / tp->timer_offset) * 2);
9315
9316                 init_timer(&tp->timer);
9317                 tp->timer.expires = jiffies + tp->timer_offset;
9318                 tp->timer.data = (unsigned long) tp;
9319                 tp->timer.function = tg3_timer;
9320         }
9321
9322         tg3_full_unlock(tp);
9323
9324         if (err)
9325                 goto err_out3;
9326
9327         if (tg3_flag(tp, USING_MSI)) {
9328                 err = tg3_test_msi(tp);
9329
9330                 if (err) {
9331                         tg3_full_lock(tp, 0);
9332                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9333                         tg3_free_rings(tp);
9334                         tg3_full_unlock(tp);
9335
9336                         goto err_out2;
9337                 }
9338
9339                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9340                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9341
9342                         tw32(PCIE_TRANSACTION_CFG,
9343                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9344                 }
9345         }
9346
9347         tg3_phy_start(tp);
9348
9349         tg3_full_lock(tp, 0);
9350
9351         add_timer(&tp->timer);
9352         tg3_flag_set(tp, INIT_COMPLETE);
9353         tg3_enable_ints(tp);
9354
9355         tg3_full_unlock(tp);
9356
9357         netif_tx_start_all_queues(dev);
9358
9359         /*
9360          * Reset loopback feature if it was turned on while the device was down
9361          * make sure that it's installed properly now.
9362          */
9363         if (dev->features & NETIF_F_LOOPBACK)
9364                 tg3_set_loopback(dev, dev->features);
9365
9366         return 0;
9367
9368 err_out3:
9369         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9370                 struct tg3_napi *tnapi = &tp->napi[i];
9371                 free_irq(tnapi->irq_vec, tnapi);
9372         }
9373
9374 err_out2:
9375         tg3_napi_disable(tp);
9376         tg3_napi_fini(tp);
9377         tg3_free_consistent(tp);
9378
9379 err_out1:
9380         tg3_ints_fini(tp);
9381         return err;
9382 }
9383
9384 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9385                                                  struct rtnl_link_stats64 *);
9386 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9387
9388 static int tg3_close(struct net_device *dev)
9389 {
9390         int i;
9391         struct tg3 *tp = netdev_priv(dev);
9392
9393         tg3_napi_disable(tp);
9394         cancel_work_sync(&tp->reset_task);
9395
9396         netif_tx_stop_all_queues(dev);
9397
9398         del_timer_sync(&tp->timer);
9399
9400         tg3_phy_stop(tp);
9401
9402         tg3_full_lock(tp, 1);
9403
9404         tg3_disable_ints(tp);
9405
9406         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9407         tg3_free_rings(tp);
9408         tg3_flag_clear(tp, INIT_COMPLETE);
9409
9410         tg3_full_unlock(tp);
9411
9412         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9413                 struct tg3_napi *tnapi = &tp->napi[i];
9414                 free_irq(tnapi->irq_vec, tnapi);
9415         }
9416
9417         tg3_ints_fini(tp);
9418
9419         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9420
9421         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9422                sizeof(tp->estats_prev));
9423
9424         tg3_napi_fini(tp);
9425
9426         tg3_free_consistent(tp);
9427
9428         tg3_power_down(tp);
9429
9430         netif_carrier_off(tp->dev);
9431
9432         return 0;
9433 }
9434
9435 static inline u64 get_stat64(tg3_stat64_t *val)
9436 {
9437        return ((u64)val->high << 32) | ((u64)val->low);
9438 }
9439
9440 static u64 calc_crc_errors(struct tg3 *tp)
9441 {
9442         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9443
9444         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9445             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9446              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9447                 u32 val;
9448
9449                 spin_lock_bh(&tp->lock);
9450                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9451                         tg3_writephy(tp, MII_TG3_TEST1,
9452                                      val | MII_TG3_TEST1_CRC_EN);
9453                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9454                 } else
9455                         val = 0;
9456                 spin_unlock_bh(&tp->lock);
9457
9458                 tp->phy_crc_errors += val;
9459
9460                 return tp->phy_crc_errors;
9461         }
9462
9463         return get_stat64(&hw_stats->rx_fcs_errors);
9464 }
9465
9466 #define ESTAT_ADD(member) \
9467         estats->member =        old_estats->member + \
9468                                 get_stat64(&hw_stats->member)
9469
9470 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9471 {
9472         struct tg3_ethtool_stats *estats = &tp->estats;
9473         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9474         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9475
9476         if (!hw_stats)
9477                 return old_estats;
9478
9479         ESTAT_ADD(rx_octets);
9480         ESTAT_ADD(rx_fragments);
9481         ESTAT_ADD(rx_ucast_packets);
9482         ESTAT_ADD(rx_mcast_packets);
9483         ESTAT_ADD(rx_bcast_packets);
9484         ESTAT_ADD(rx_fcs_errors);
9485         ESTAT_ADD(rx_align_errors);
9486         ESTAT_ADD(rx_xon_pause_rcvd);
9487         ESTAT_ADD(rx_xoff_pause_rcvd);
9488         ESTAT_ADD(rx_mac_ctrl_rcvd);
9489         ESTAT_ADD(rx_xoff_entered);
9490         ESTAT_ADD(rx_frame_too_long_errors);
9491         ESTAT_ADD(rx_jabbers);
9492         ESTAT_ADD(rx_undersize_packets);
9493         ESTAT_ADD(rx_in_length_errors);
9494         ESTAT_ADD(rx_out_length_errors);
9495         ESTAT_ADD(rx_64_or_less_octet_packets);
9496         ESTAT_ADD(rx_65_to_127_octet_packets);
9497         ESTAT_ADD(rx_128_to_255_octet_packets);
9498         ESTAT_ADD(rx_256_to_511_octet_packets);
9499         ESTAT_ADD(rx_512_to_1023_octet_packets);
9500         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9501         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9502         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9503         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9504         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9505
9506         ESTAT_ADD(tx_octets);
9507         ESTAT_ADD(tx_collisions);
9508         ESTAT_ADD(tx_xon_sent);
9509         ESTAT_ADD(tx_xoff_sent);
9510         ESTAT_ADD(tx_flow_control);
9511         ESTAT_ADD(tx_mac_errors);
9512         ESTAT_ADD(tx_single_collisions);
9513         ESTAT_ADD(tx_mult_collisions);
9514         ESTAT_ADD(tx_deferred);
9515         ESTAT_ADD(tx_excessive_collisions);
9516         ESTAT_ADD(tx_late_collisions);
9517         ESTAT_ADD(tx_collide_2times);
9518         ESTAT_ADD(tx_collide_3times);
9519         ESTAT_ADD(tx_collide_4times);
9520         ESTAT_ADD(tx_collide_5times);
9521         ESTAT_ADD(tx_collide_6times);
9522         ESTAT_ADD(tx_collide_7times);
9523         ESTAT_ADD(tx_collide_8times);
9524         ESTAT_ADD(tx_collide_9times);
9525         ESTAT_ADD(tx_collide_10times);
9526         ESTAT_ADD(tx_collide_11times);
9527         ESTAT_ADD(tx_collide_12times);
9528         ESTAT_ADD(tx_collide_13times);
9529         ESTAT_ADD(tx_collide_14times);
9530         ESTAT_ADD(tx_collide_15times);
9531         ESTAT_ADD(tx_ucast_packets);
9532         ESTAT_ADD(tx_mcast_packets);
9533         ESTAT_ADD(tx_bcast_packets);
9534         ESTAT_ADD(tx_carrier_sense_errors);
9535         ESTAT_ADD(tx_discards);
9536         ESTAT_ADD(tx_errors);
9537
9538         ESTAT_ADD(dma_writeq_full);
9539         ESTAT_ADD(dma_write_prioq_full);
9540         ESTAT_ADD(rxbds_empty);
9541         ESTAT_ADD(rx_discards);
9542         ESTAT_ADD(rx_errors);
9543         ESTAT_ADD(rx_threshold_hit);
9544
9545         ESTAT_ADD(dma_readq_full);
9546         ESTAT_ADD(dma_read_prioq_full);
9547         ESTAT_ADD(tx_comp_queue_full);
9548
9549         ESTAT_ADD(ring_set_send_prod_index);
9550         ESTAT_ADD(ring_status_update);
9551         ESTAT_ADD(nic_irqs);
9552         ESTAT_ADD(nic_avoided_irqs);
9553         ESTAT_ADD(nic_tx_threshold_hit);
9554
9555         return estats;
9556 }
9557
9558 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9559                                                  struct rtnl_link_stats64 *stats)
9560 {
9561         struct tg3 *tp = netdev_priv(dev);
9562         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9563         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9564
9565         if (!hw_stats)
9566                 return old_stats;
9567
9568         stats->rx_packets = old_stats->rx_packets +
9569                 get_stat64(&hw_stats->rx_ucast_packets) +
9570                 get_stat64(&hw_stats->rx_mcast_packets) +
9571                 get_stat64(&hw_stats->rx_bcast_packets);
9572
9573         stats->tx_packets = old_stats->tx_packets +
9574                 get_stat64(&hw_stats->tx_ucast_packets) +
9575                 get_stat64(&hw_stats->tx_mcast_packets) +
9576                 get_stat64(&hw_stats->tx_bcast_packets);
9577
9578         stats->rx_bytes = old_stats->rx_bytes +
9579                 get_stat64(&hw_stats->rx_octets);
9580         stats->tx_bytes = old_stats->tx_bytes +
9581                 get_stat64(&hw_stats->tx_octets);
9582
9583         stats->rx_errors = old_stats->rx_errors +
9584                 get_stat64(&hw_stats->rx_errors);
9585         stats->tx_errors = old_stats->tx_errors +
9586                 get_stat64(&hw_stats->tx_errors) +
9587                 get_stat64(&hw_stats->tx_mac_errors) +
9588                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9589                 get_stat64(&hw_stats->tx_discards);
9590
9591         stats->multicast = old_stats->multicast +
9592                 get_stat64(&hw_stats->rx_mcast_packets);
9593         stats->collisions = old_stats->collisions +
9594                 get_stat64(&hw_stats->tx_collisions);
9595
9596         stats->rx_length_errors = old_stats->rx_length_errors +
9597                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9598                 get_stat64(&hw_stats->rx_undersize_packets);
9599
9600         stats->rx_over_errors = old_stats->rx_over_errors +
9601                 get_stat64(&hw_stats->rxbds_empty);
9602         stats->rx_frame_errors = old_stats->rx_frame_errors +
9603                 get_stat64(&hw_stats->rx_align_errors);
9604         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9605                 get_stat64(&hw_stats->tx_discards);
9606         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9607                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9608
9609         stats->rx_crc_errors = old_stats->rx_crc_errors +
9610                 calc_crc_errors(tp);
9611
9612         stats->rx_missed_errors = old_stats->rx_missed_errors +
9613                 get_stat64(&hw_stats->rx_discards);
9614
9615         stats->rx_dropped = tp->rx_dropped;
9616
9617         return stats;
9618 }
9619
9620 static inline u32 calc_crc(unsigned char *buf, int len)
9621 {
9622         u32 reg;
9623         u32 tmp;
9624         int j, k;
9625
9626         reg = 0xffffffff;
9627
9628         for (j = 0; j < len; j++) {
9629                 reg ^= buf[j];
9630
9631                 for (k = 0; k < 8; k++) {
9632                         tmp = reg & 0x01;
9633
9634                         reg >>= 1;
9635
9636                         if (tmp)
9637                                 reg ^= 0xedb88320;
9638                 }
9639         }
9640
9641         return ~reg;
9642 }
9643
9644 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9645 {
9646         /* accept or reject all multicast frames */
9647         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9648         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9649         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9650         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9651 }
9652
9653 static void __tg3_set_rx_mode(struct net_device *dev)
9654 {
9655         struct tg3 *tp = netdev_priv(dev);
9656         u32 rx_mode;
9657
9658         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9659                                   RX_MODE_KEEP_VLAN_TAG);
9660
9661 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9662         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9663          * flag clear.
9664          */
9665         if (!tg3_flag(tp, ENABLE_ASF))
9666                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9667 #endif
9668
9669         if (dev->flags & IFF_PROMISC) {
9670                 /* Promiscuous mode. */
9671                 rx_mode |= RX_MODE_PROMISC;
9672         } else if (dev->flags & IFF_ALLMULTI) {
9673                 /* Accept all multicast. */
9674                 tg3_set_multi(tp, 1);
9675         } else if (netdev_mc_empty(dev)) {
9676                 /* Reject all multicast. */
9677                 tg3_set_multi(tp, 0);
9678         } else {
9679                 /* Accept one or more multicast(s). */
9680                 struct netdev_hw_addr *ha;
9681                 u32 mc_filter[4] = { 0, };
9682                 u32 regidx;
9683                 u32 bit;
9684                 u32 crc;
9685
9686                 netdev_for_each_mc_addr(ha, dev) {
9687                         crc = calc_crc(ha->addr, ETH_ALEN);
9688                         bit = ~crc & 0x7f;
9689                         regidx = (bit & 0x60) >> 5;
9690                         bit &= 0x1f;
9691                         mc_filter[regidx] |= (1 << bit);
9692                 }
9693
9694                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9695                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9696                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9697                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9698         }
9699
9700         if (rx_mode != tp->rx_mode) {
9701                 tp->rx_mode = rx_mode;
9702                 tw32_f(MAC_RX_MODE, rx_mode);
9703                 udelay(10);
9704         }
9705 }
9706
9707 static void tg3_set_rx_mode(struct net_device *dev)
9708 {
9709         struct tg3 *tp = netdev_priv(dev);
9710
9711         if (!netif_running(dev))
9712                 return;
9713
9714         tg3_full_lock(tp, 0);
9715         __tg3_set_rx_mode(dev);
9716         tg3_full_unlock(tp);
9717 }
9718
9719 static int tg3_get_regs_len(struct net_device *dev)
9720 {
9721         return TG3_REG_BLK_SIZE;
9722 }
9723
9724 static void tg3_get_regs(struct net_device *dev,
9725                 struct ethtool_regs *regs, void *_p)
9726 {
9727         struct tg3 *tp = netdev_priv(dev);
9728
9729         regs->version = 0;
9730
9731         memset(_p, 0, TG3_REG_BLK_SIZE);
9732
9733         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9734                 return;
9735
9736         tg3_full_lock(tp, 0);
9737
9738         tg3_dump_legacy_regs(tp, (u32 *)_p);
9739
9740         tg3_full_unlock(tp);
9741 }
9742
9743 static int tg3_get_eeprom_len(struct net_device *dev)
9744 {
9745         struct tg3 *tp = netdev_priv(dev);
9746
9747         return tp->nvram_size;
9748 }
9749
9750 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9751 {
9752         struct tg3 *tp = netdev_priv(dev);
9753         int ret;
9754         u8  *pd;
9755         u32 i, offset, len, b_offset, b_count;
9756         __be32 val;
9757
9758         if (tg3_flag(tp, NO_NVRAM))
9759                 return -EINVAL;
9760
9761         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9762                 return -EAGAIN;
9763
9764         offset = eeprom->offset;
9765         len = eeprom->len;
9766         eeprom->len = 0;
9767
9768         eeprom->magic = TG3_EEPROM_MAGIC;
9769
9770         if (offset & 3) {
9771                 /* adjustments to start on required 4 byte boundary */
9772                 b_offset = offset & 3;
9773                 b_count = 4 - b_offset;
9774                 if (b_count > len) {
9775                         /* i.e. offset=1 len=2 */
9776                         b_count = len;
9777                 }
9778                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9779                 if (ret)
9780                         return ret;
9781                 memcpy(data, ((char *)&val) + b_offset, b_count);
9782                 len -= b_count;
9783                 offset += b_count;
9784                 eeprom->len += b_count;
9785         }
9786
9787         /* read bytes up to the last 4 byte boundary */
9788         pd = &data[eeprom->len];
9789         for (i = 0; i < (len - (len & 3)); i += 4) {
9790                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9791                 if (ret) {
9792                         eeprom->len += i;
9793                         return ret;
9794                 }
9795                 memcpy(pd + i, &val, 4);
9796         }
9797         eeprom->len += i;
9798
9799         if (len & 3) {
9800                 /* read last bytes not ending on 4 byte boundary */
9801                 pd = &data[eeprom->len];
9802                 b_count = len & 3;
9803                 b_offset = offset + len - b_count;
9804                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9805                 if (ret)
9806                         return ret;
9807                 memcpy(pd, &val, b_count);
9808                 eeprom->len += b_count;
9809         }
9810         return 0;
9811 }
9812
9813 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9814
9815 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9816 {
9817         struct tg3 *tp = netdev_priv(dev);
9818         int ret;
9819         u32 offset, len, b_offset, odd_len;
9820         u8 *buf;
9821         __be32 start, end;
9822
9823         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9824                 return -EAGAIN;
9825
9826         if (tg3_flag(tp, NO_NVRAM) ||
9827             eeprom->magic != TG3_EEPROM_MAGIC)
9828                 return -EINVAL;
9829
9830         offset = eeprom->offset;
9831         len = eeprom->len;
9832
9833         if ((b_offset = (offset & 3))) {
9834                 /* adjustments to start on required 4 byte boundary */
9835                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9836                 if (ret)
9837                         return ret;
9838                 len += b_offset;
9839                 offset &= ~3;
9840                 if (len < 4)
9841                         len = 4;
9842         }
9843
9844         odd_len = 0;
9845         if (len & 3) {
9846                 /* adjustments to end on required 4 byte boundary */
9847                 odd_len = 1;
9848                 len = (len + 3) & ~3;
9849                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9850                 if (ret)
9851                         return ret;
9852         }
9853
9854         buf = data;
9855         if (b_offset || odd_len) {
9856                 buf = kmalloc(len, GFP_KERNEL);
9857                 if (!buf)
9858                         return -ENOMEM;
9859                 if (b_offset)
9860                         memcpy(buf, &start, 4);
9861                 if (odd_len)
9862                         memcpy(buf+len-4, &end, 4);
9863                 memcpy(buf + b_offset, data, eeprom->len);
9864         }
9865
9866         ret = tg3_nvram_write_block(tp, offset, len, buf);
9867
9868         if (buf != data)
9869                 kfree(buf);
9870
9871         return ret;
9872 }
9873
9874 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9875 {
9876         struct tg3 *tp = netdev_priv(dev);
9877
9878         if (tg3_flag(tp, USE_PHYLIB)) {
9879                 struct phy_device *phydev;
9880                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9881                         return -EAGAIN;
9882                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9883                 return phy_ethtool_gset(phydev, cmd);
9884         }
9885
9886         cmd->supported = (SUPPORTED_Autoneg);
9887
9888         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9889                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9890                                    SUPPORTED_1000baseT_Full);
9891
9892         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9893                 cmd->supported |= (SUPPORTED_100baseT_Half |
9894                                   SUPPORTED_100baseT_Full |
9895                                   SUPPORTED_10baseT_Half |
9896                                   SUPPORTED_10baseT_Full |
9897                                   SUPPORTED_TP);
9898                 cmd->port = PORT_TP;
9899         } else {
9900                 cmd->supported |= SUPPORTED_FIBRE;
9901                 cmd->port = PORT_FIBRE;
9902         }
9903
9904         cmd->advertising = tp->link_config.advertising;
9905         if (netif_running(dev)) {
9906                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9907                 cmd->duplex = tp->link_config.active_duplex;
9908         } else {
9909                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9910                 cmd->duplex = DUPLEX_INVALID;
9911         }
9912         cmd->phy_address = tp->phy_addr;
9913         cmd->transceiver = XCVR_INTERNAL;
9914         cmd->autoneg = tp->link_config.autoneg;
9915         cmd->maxtxpkt = 0;
9916         cmd->maxrxpkt = 0;
9917         return 0;
9918 }
9919
9920 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9921 {
9922         struct tg3 *tp = netdev_priv(dev);
9923         u32 speed = ethtool_cmd_speed(cmd);
9924
9925         if (tg3_flag(tp, USE_PHYLIB)) {
9926                 struct phy_device *phydev;
9927                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9928                         return -EAGAIN;
9929                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9930                 return phy_ethtool_sset(phydev, cmd);
9931         }
9932
9933         if (cmd->autoneg != AUTONEG_ENABLE &&
9934             cmd->autoneg != AUTONEG_DISABLE)
9935                 return -EINVAL;
9936
9937         if (cmd->autoneg == AUTONEG_DISABLE &&
9938             cmd->duplex != DUPLEX_FULL &&
9939             cmd->duplex != DUPLEX_HALF)
9940                 return -EINVAL;
9941
9942         if (cmd->autoneg == AUTONEG_ENABLE) {
9943                 u32 mask = ADVERTISED_Autoneg |
9944                            ADVERTISED_Pause |
9945                            ADVERTISED_Asym_Pause;
9946
9947                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9948                         mask |= ADVERTISED_1000baseT_Half |
9949                                 ADVERTISED_1000baseT_Full;
9950
9951                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9952                         mask |= ADVERTISED_100baseT_Half |
9953                                 ADVERTISED_100baseT_Full |
9954                                 ADVERTISED_10baseT_Half |
9955                                 ADVERTISED_10baseT_Full |
9956                                 ADVERTISED_TP;
9957                 else
9958                         mask |= ADVERTISED_FIBRE;
9959
9960                 if (cmd->advertising & ~mask)
9961                         return -EINVAL;
9962
9963                 mask &= (ADVERTISED_1000baseT_Half |
9964                          ADVERTISED_1000baseT_Full |
9965                          ADVERTISED_100baseT_Half |
9966                          ADVERTISED_100baseT_Full |
9967                          ADVERTISED_10baseT_Half |
9968                          ADVERTISED_10baseT_Full);
9969
9970                 cmd->advertising &= mask;
9971         } else {
9972                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9973                         if (speed != SPEED_1000)
9974                                 return -EINVAL;
9975
9976                         if (cmd->duplex != DUPLEX_FULL)
9977                                 return -EINVAL;
9978                 } else {
9979                         if (speed != SPEED_100 &&
9980                             speed != SPEED_10)
9981                                 return -EINVAL;
9982                 }
9983         }
9984
9985         tg3_full_lock(tp, 0);
9986
9987         tp->link_config.autoneg = cmd->autoneg;
9988         if (cmd->autoneg == AUTONEG_ENABLE) {
9989                 tp->link_config.advertising = (cmd->advertising |
9990                                               ADVERTISED_Autoneg);
9991                 tp->link_config.speed = SPEED_INVALID;
9992                 tp->link_config.duplex = DUPLEX_INVALID;
9993         } else {
9994                 tp->link_config.advertising = 0;
9995                 tp->link_config.speed = speed;
9996                 tp->link_config.duplex = cmd->duplex;
9997         }
9998
9999         tp->link_config.orig_speed = tp->link_config.speed;
10000         tp->link_config.orig_duplex = tp->link_config.duplex;
10001         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10002
10003         if (netif_running(dev))
10004                 tg3_setup_phy(tp, 1);
10005
10006         tg3_full_unlock(tp);
10007
10008         return 0;
10009 }
10010
10011 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10012 {
10013         struct tg3 *tp = netdev_priv(dev);
10014
10015         strcpy(info->driver, DRV_MODULE_NAME);
10016         strcpy(info->version, DRV_MODULE_VERSION);
10017         strcpy(info->fw_version, tp->fw_ver);
10018         strcpy(info->bus_info, pci_name(tp->pdev));
10019 }
10020
10021 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10022 {
10023         struct tg3 *tp = netdev_priv(dev);
10024
10025         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10026                 wol->supported = WAKE_MAGIC;
10027         else
10028                 wol->supported = 0;
10029         wol->wolopts = 0;
10030         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10031                 wol->wolopts = WAKE_MAGIC;
10032         memset(&wol->sopass, 0, sizeof(wol->sopass));
10033 }
10034
10035 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10036 {
10037         struct tg3 *tp = netdev_priv(dev);
10038         struct device *dp = &tp->pdev->dev;
10039
10040         if (wol->wolopts & ~WAKE_MAGIC)
10041                 return -EINVAL;
10042         if ((wol->wolopts & WAKE_MAGIC) &&
10043             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10044                 return -EINVAL;
10045
10046         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10047
10048         spin_lock_bh(&tp->lock);
10049         if (device_may_wakeup(dp))
10050                 tg3_flag_set(tp, WOL_ENABLE);
10051         else
10052                 tg3_flag_clear(tp, WOL_ENABLE);
10053         spin_unlock_bh(&tp->lock);
10054
10055         return 0;
10056 }
10057
10058 static u32 tg3_get_msglevel(struct net_device *dev)
10059 {
10060         struct tg3 *tp = netdev_priv(dev);
10061         return tp->msg_enable;
10062 }
10063
10064 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10065 {
10066         struct tg3 *tp = netdev_priv(dev);
10067         tp->msg_enable = value;
10068 }
10069
10070 static int tg3_nway_reset(struct net_device *dev)
10071 {
10072         struct tg3 *tp = netdev_priv(dev);
10073         int r;
10074
10075         if (!netif_running(dev))
10076                 return -EAGAIN;
10077
10078         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10079                 return -EINVAL;
10080
10081         if (tg3_flag(tp, USE_PHYLIB)) {
10082                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10083                         return -EAGAIN;
10084                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10085         } else {
10086                 u32 bmcr;
10087
10088                 spin_lock_bh(&tp->lock);
10089                 r = -EINVAL;
10090                 tg3_readphy(tp, MII_BMCR, &bmcr);
10091                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10092                     ((bmcr & BMCR_ANENABLE) ||
10093                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10094                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10095                                                    BMCR_ANENABLE);
10096                         r = 0;
10097                 }
10098                 spin_unlock_bh(&tp->lock);
10099         }
10100
10101         return r;
10102 }
10103
10104 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10105 {
10106         struct tg3 *tp = netdev_priv(dev);
10107
10108         ering->rx_max_pending = tp->rx_std_ring_mask;
10109         ering->rx_mini_max_pending = 0;
10110         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10111                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10112         else
10113                 ering->rx_jumbo_max_pending = 0;
10114
10115         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10116
10117         ering->rx_pending = tp->rx_pending;
10118         ering->rx_mini_pending = 0;
10119         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10120                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10121         else
10122                 ering->rx_jumbo_pending = 0;
10123
10124         ering->tx_pending = tp->napi[0].tx_pending;
10125 }
10126
10127 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10128 {
10129         struct tg3 *tp = netdev_priv(dev);
10130         int i, irq_sync = 0, err = 0;
10131
10132         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10133             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10134             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10135             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10136             (tg3_flag(tp, TSO_BUG) &&
10137              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10138                 return -EINVAL;
10139
10140         if (netif_running(dev)) {
10141                 tg3_phy_stop(tp);
10142                 tg3_netif_stop(tp);
10143                 irq_sync = 1;
10144         }
10145
10146         tg3_full_lock(tp, irq_sync);
10147
10148         tp->rx_pending = ering->rx_pending;
10149
10150         if (tg3_flag(tp, MAX_RXPEND_64) &&
10151             tp->rx_pending > 63)
10152                 tp->rx_pending = 63;
10153         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10154
10155         for (i = 0; i < tp->irq_max; i++)
10156                 tp->napi[i].tx_pending = ering->tx_pending;
10157
10158         if (netif_running(dev)) {
10159                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10160                 err = tg3_restart_hw(tp, 1);
10161                 if (!err)
10162                         tg3_netif_start(tp);
10163         }
10164
10165         tg3_full_unlock(tp);
10166
10167         if (irq_sync && !err)
10168                 tg3_phy_start(tp);
10169
10170         return err;
10171 }
10172
10173 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10174 {
10175         struct tg3 *tp = netdev_priv(dev);
10176
10177         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10178
10179         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10180                 epause->rx_pause = 1;
10181         else
10182                 epause->rx_pause = 0;
10183
10184         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10185                 epause->tx_pause = 1;
10186         else
10187                 epause->tx_pause = 0;
10188 }
10189
10190 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10191 {
10192         struct tg3 *tp = netdev_priv(dev);
10193         int err = 0;
10194
10195         if (tg3_flag(tp, USE_PHYLIB)) {
10196                 u32 newadv;
10197                 struct phy_device *phydev;
10198
10199                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10200
10201                 if (!(phydev->supported & SUPPORTED_Pause) ||
10202                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10203                      (epause->rx_pause != epause->tx_pause)))
10204                         return -EINVAL;
10205
10206                 tp->link_config.flowctrl = 0;
10207                 if (epause->rx_pause) {
10208                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10209
10210                         if (epause->tx_pause) {
10211                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10212                                 newadv = ADVERTISED_Pause;
10213                         } else
10214                                 newadv = ADVERTISED_Pause |
10215                                          ADVERTISED_Asym_Pause;
10216                 } else if (epause->tx_pause) {
10217                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10218                         newadv = ADVERTISED_Asym_Pause;
10219                 } else
10220                         newadv = 0;
10221
10222                 if (epause->autoneg)
10223                         tg3_flag_set(tp, PAUSE_AUTONEG);
10224                 else
10225                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10226
10227                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10228                         u32 oldadv = phydev->advertising &
10229                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10230                         if (oldadv != newadv) {
10231                                 phydev->advertising &=
10232                                         ~(ADVERTISED_Pause |
10233                                           ADVERTISED_Asym_Pause);
10234                                 phydev->advertising |= newadv;
10235                                 if (phydev->autoneg) {
10236                                         /*
10237                                          * Always renegotiate the link to
10238                                          * inform our link partner of our
10239                                          * flow control settings, even if the
10240                                          * flow control is forced.  Let
10241                                          * tg3_adjust_link() do the final
10242                                          * flow control setup.
10243                                          */
10244                                         return phy_start_aneg(phydev);
10245                                 }
10246                         }
10247
10248                         if (!epause->autoneg)
10249                                 tg3_setup_flow_control(tp, 0, 0);
10250                 } else {
10251                         tp->link_config.orig_advertising &=
10252                                         ~(ADVERTISED_Pause |
10253                                           ADVERTISED_Asym_Pause);
10254                         tp->link_config.orig_advertising |= newadv;
10255                 }
10256         } else {
10257                 int irq_sync = 0;
10258
10259                 if (netif_running(dev)) {
10260                         tg3_netif_stop(tp);
10261                         irq_sync = 1;
10262                 }
10263
10264                 tg3_full_lock(tp, irq_sync);
10265
10266                 if (epause->autoneg)
10267                         tg3_flag_set(tp, PAUSE_AUTONEG);
10268                 else
10269                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10270                 if (epause->rx_pause)
10271                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10272                 else
10273                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10274                 if (epause->tx_pause)
10275                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10276                 else
10277                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10278
10279                 if (netif_running(dev)) {
10280                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10281                         err = tg3_restart_hw(tp, 1);
10282                         if (!err)
10283                                 tg3_netif_start(tp);
10284                 }
10285
10286                 tg3_full_unlock(tp);
10287         }
10288
10289         return err;
10290 }
10291
10292 static int tg3_get_sset_count(struct net_device *dev, int sset)
10293 {
10294         switch (sset) {
10295         case ETH_SS_TEST:
10296                 return TG3_NUM_TEST;
10297         case ETH_SS_STATS:
10298                 return TG3_NUM_STATS;
10299         default:
10300                 return -EOPNOTSUPP;
10301         }
10302 }
10303
10304 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10305 {
10306         switch (stringset) {
10307         case ETH_SS_STATS:
10308                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10309                 break;
10310         case ETH_SS_TEST:
10311                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10312                 break;
10313         default:
10314                 WARN_ON(1);     /* we need a WARN() */
10315                 break;
10316         }
10317 }
10318
10319 static int tg3_set_phys_id(struct net_device *dev,
10320                             enum ethtool_phys_id_state state)
10321 {
10322         struct tg3 *tp = netdev_priv(dev);
10323
10324         if (!netif_running(tp->dev))
10325                 return -EAGAIN;
10326
10327         switch (state) {
10328         case ETHTOOL_ID_ACTIVE:
10329                 return 1;       /* cycle on/off once per second */
10330
10331         case ETHTOOL_ID_ON:
10332                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10333                      LED_CTRL_1000MBPS_ON |
10334                      LED_CTRL_100MBPS_ON |
10335                      LED_CTRL_10MBPS_ON |
10336                      LED_CTRL_TRAFFIC_OVERRIDE |
10337                      LED_CTRL_TRAFFIC_BLINK |
10338                      LED_CTRL_TRAFFIC_LED);
10339                 break;
10340
10341         case ETHTOOL_ID_OFF:
10342                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10343                      LED_CTRL_TRAFFIC_OVERRIDE);
10344                 break;
10345
10346         case ETHTOOL_ID_INACTIVE:
10347                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10348                 break;
10349         }
10350
10351         return 0;
10352 }
10353
10354 static void tg3_get_ethtool_stats(struct net_device *dev,
10355                                    struct ethtool_stats *estats, u64 *tmp_stats)
10356 {
10357         struct tg3 *tp = netdev_priv(dev);
10358         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10359 }
10360
10361 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10362 {
10363         int i;
10364         __be32 *buf;
10365         u32 offset = 0, len = 0;
10366         u32 magic, val;
10367
10368         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10369                 return NULL;
10370
10371         if (magic == TG3_EEPROM_MAGIC) {
10372                 for (offset = TG3_NVM_DIR_START;
10373                      offset < TG3_NVM_DIR_END;
10374                      offset += TG3_NVM_DIRENT_SIZE) {
10375                         if (tg3_nvram_read(tp, offset, &val))
10376                                 return NULL;
10377
10378                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10379                             TG3_NVM_DIRTYPE_EXTVPD)
10380                                 break;
10381                 }
10382
10383                 if (offset != TG3_NVM_DIR_END) {
10384                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10385                         if (tg3_nvram_read(tp, offset + 4, &offset))
10386                                 return NULL;
10387
10388                         offset = tg3_nvram_logical_addr(tp, offset);
10389                 }
10390         }
10391
10392         if (!offset || !len) {
10393                 offset = TG3_NVM_VPD_OFF;
10394                 len = TG3_NVM_VPD_LEN;
10395         }
10396
10397         buf = kmalloc(len, GFP_KERNEL);
10398         if (buf == NULL)
10399                 return NULL;
10400
10401         if (magic == TG3_EEPROM_MAGIC) {
10402                 for (i = 0; i < len; i += 4) {
10403                         /* The data is in little-endian format in NVRAM.
10404                          * Use the big-endian read routines to preserve
10405                          * the byte order as it exists in NVRAM.
10406                          */
10407                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10408                                 goto error;
10409                 }
10410         } else {
10411                 u8 *ptr;
10412                 ssize_t cnt;
10413                 unsigned int pos = 0;
10414
10415                 ptr = (u8 *)&buf[0];
10416                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10417                         cnt = pci_read_vpd(tp->pdev, pos,
10418                                            len - pos, ptr);
10419                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10420                                 cnt = 0;
10421                         else if (cnt < 0)
10422                                 goto error;
10423                 }
10424                 if (pos != len)
10425                         goto error;
10426         }
10427
10428         return buf;
10429
10430 error:
10431         kfree(buf);
10432         return NULL;
10433 }
10434
10435 #define NVRAM_TEST_SIZE 0x100
10436 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10437 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10438 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10439 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10440 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10441
10442 static int tg3_test_nvram(struct tg3 *tp)
10443 {
10444         u32 csum, magic;
10445         __be32 *buf;
10446         int i, j, k, err = 0, size;
10447
10448         if (tg3_flag(tp, NO_NVRAM))
10449                 return 0;
10450
10451         if (tg3_nvram_read(tp, 0, &magic) != 0)
10452                 return -EIO;
10453
10454         if (magic == TG3_EEPROM_MAGIC)
10455                 size = NVRAM_TEST_SIZE;
10456         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10457                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10458                     TG3_EEPROM_SB_FORMAT_1) {
10459                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10460                         case TG3_EEPROM_SB_REVISION_0:
10461                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10462                                 break;
10463                         case TG3_EEPROM_SB_REVISION_2:
10464                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10465                                 break;
10466                         case TG3_EEPROM_SB_REVISION_3:
10467                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10468                                 break;
10469                         default:
10470                                 return 0;
10471                         }
10472                 } else
10473                         return 0;
10474         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10475                 size = NVRAM_SELFBOOT_HW_SIZE;
10476         else
10477                 return -EIO;
10478
10479         buf = kmalloc(size, GFP_KERNEL);
10480         if (buf == NULL)
10481                 return -ENOMEM;
10482
10483         err = -EIO;
10484         for (i = 0, j = 0; i < size; i += 4, j++) {
10485                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10486                 if (err)
10487                         break;
10488         }
10489         if (i < size)
10490                 goto out;
10491
10492         /* Selfboot format */
10493         magic = be32_to_cpu(buf[0]);
10494         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10495             TG3_EEPROM_MAGIC_FW) {
10496                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10497
10498                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10499                     TG3_EEPROM_SB_REVISION_2) {
10500                         /* For rev 2, the csum doesn't include the MBA. */
10501                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10502                                 csum8 += buf8[i];
10503                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10504                                 csum8 += buf8[i];
10505                 } else {
10506                         for (i = 0; i < size; i++)
10507                                 csum8 += buf8[i];
10508                 }
10509
10510                 if (csum8 == 0) {
10511                         err = 0;
10512                         goto out;
10513                 }
10514
10515                 err = -EIO;
10516                 goto out;
10517         }
10518
10519         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10520             TG3_EEPROM_MAGIC_HW) {
10521                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10522                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10523                 u8 *buf8 = (u8 *) buf;
10524
10525                 /* Separate the parity bits and the data bytes.  */
10526                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10527                         if ((i == 0) || (i == 8)) {
10528                                 int l;
10529                                 u8 msk;
10530
10531                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10532                                         parity[k++] = buf8[i] & msk;
10533                                 i++;
10534                         } else if (i == 16) {
10535                                 int l;
10536                                 u8 msk;
10537
10538                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10539                                         parity[k++] = buf8[i] & msk;
10540                                 i++;
10541
10542                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10543                                         parity[k++] = buf8[i] & msk;
10544                                 i++;
10545                         }
10546                         data[j++] = buf8[i];
10547                 }
10548
10549                 err = -EIO;
10550                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10551                         u8 hw8 = hweight8(data[i]);
10552
10553                         if ((hw8 & 0x1) && parity[i])
10554                                 goto out;
10555                         else if (!(hw8 & 0x1) && !parity[i])
10556                                 goto out;
10557                 }
10558                 err = 0;
10559                 goto out;
10560         }
10561
10562         err = -EIO;
10563
10564         /* Bootstrap checksum at offset 0x10 */
10565         csum = calc_crc((unsigned char *) buf, 0x10);
10566         if (csum != le32_to_cpu(buf[0x10/4]))
10567                 goto out;
10568
10569         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10570         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10571         if (csum != le32_to_cpu(buf[0xfc/4]))
10572                 goto out;
10573
10574         kfree(buf);
10575
10576         buf = tg3_vpd_readblock(tp);
10577         if (!buf)
10578                 return -ENOMEM;
10579
10580         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10581                              PCI_VPD_LRDT_RO_DATA);
10582         if (i > 0) {
10583                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10584                 if (j < 0)
10585                         goto out;
10586
10587                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10588                         goto out;
10589
10590                 i += PCI_VPD_LRDT_TAG_SIZE;
10591                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10592                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10593                 if (j > 0) {
10594                         u8 csum8 = 0;
10595
10596                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10597
10598                         for (i = 0; i <= j; i++)
10599                                 csum8 += ((u8 *)buf)[i];
10600
10601                         if (csum8)
10602                                 goto out;
10603                 }
10604         }
10605
10606         err = 0;
10607
10608 out:
10609         kfree(buf);
10610         return err;
10611 }
10612
10613 #define TG3_SERDES_TIMEOUT_SEC  2
10614 #define TG3_COPPER_TIMEOUT_SEC  6
10615
10616 static int tg3_test_link(struct tg3 *tp)
10617 {
10618         int i, max;
10619
10620         if (!netif_running(tp->dev))
10621                 return -ENODEV;
10622
10623         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10624                 max = TG3_SERDES_TIMEOUT_SEC;
10625         else
10626                 max = TG3_COPPER_TIMEOUT_SEC;
10627
10628         for (i = 0; i < max; i++) {
10629                 if (netif_carrier_ok(tp->dev))
10630                         return 0;
10631
10632                 if (msleep_interruptible(1000))
10633                         break;
10634         }
10635
10636         return -EIO;
10637 }
10638
10639 /* Only test the commonly used registers */
10640 static int tg3_test_registers(struct tg3 *tp)
10641 {
10642         int i, is_5705, is_5750;
10643         u32 offset, read_mask, write_mask, val, save_val, read_val;
10644         static struct {
10645                 u16 offset;
10646                 u16 flags;
10647 #define TG3_FL_5705     0x1
10648 #define TG3_FL_NOT_5705 0x2
10649 #define TG3_FL_NOT_5788 0x4
10650 #define TG3_FL_NOT_5750 0x8
10651                 u32 read_mask;
10652                 u32 write_mask;
10653         } reg_tbl[] = {
10654                 /* MAC Control Registers */
10655                 { MAC_MODE, TG3_FL_NOT_5705,
10656                         0x00000000, 0x00ef6f8c },
10657                 { MAC_MODE, TG3_FL_5705,
10658                         0x00000000, 0x01ef6b8c },
10659                 { MAC_STATUS, TG3_FL_NOT_5705,
10660                         0x03800107, 0x00000000 },
10661                 { MAC_STATUS, TG3_FL_5705,
10662                         0x03800100, 0x00000000 },
10663                 { MAC_ADDR_0_HIGH, 0x0000,
10664                         0x00000000, 0x0000ffff },
10665                 { MAC_ADDR_0_LOW, 0x0000,
10666                         0x00000000, 0xffffffff },
10667                 { MAC_RX_MTU_SIZE, 0x0000,
10668                         0x00000000, 0x0000ffff },
10669                 { MAC_TX_MODE, 0x0000,
10670                         0x00000000, 0x00000070 },
10671                 { MAC_TX_LENGTHS, 0x0000,
10672                         0x00000000, 0x00003fff },
10673                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10674                         0x00000000, 0x000007fc },
10675                 { MAC_RX_MODE, TG3_FL_5705,
10676                         0x00000000, 0x000007dc },
10677                 { MAC_HASH_REG_0, 0x0000,
10678                         0x00000000, 0xffffffff },
10679                 { MAC_HASH_REG_1, 0x0000,
10680                         0x00000000, 0xffffffff },
10681                 { MAC_HASH_REG_2, 0x0000,
10682                         0x00000000, 0xffffffff },
10683                 { MAC_HASH_REG_3, 0x0000,
10684                         0x00000000, 0xffffffff },
10685
10686                 /* Receive Data and Receive BD Initiator Control Registers. */
10687                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10688                         0x00000000, 0xffffffff },
10689                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10690                         0x00000000, 0xffffffff },
10691                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10692                         0x00000000, 0x00000003 },
10693                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10694                         0x00000000, 0xffffffff },
10695                 { RCVDBDI_STD_BD+0, 0x0000,
10696                         0x00000000, 0xffffffff },
10697                 { RCVDBDI_STD_BD+4, 0x0000,
10698                         0x00000000, 0xffffffff },
10699                 { RCVDBDI_STD_BD+8, 0x0000,
10700                         0x00000000, 0xffff0002 },
10701                 { RCVDBDI_STD_BD+0xc, 0x0000,
10702                         0x00000000, 0xffffffff },
10703
10704                 /* Receive BD Initiator Control Registers. */
10705                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10706                         0x00000000, 0xffffffff },
10707                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10708                         0x00000000, 0x000003ff },
10709                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10710                         0x00000000, 0xffffffff },
10711
10712                 /* Host Coalescing Control Registers. */
10713                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10714                         0x00000000, 0x00000004 },
10715                 { HOSTCC_MODE, TG3_FL_5705,
10716                         0x00000000, 0x000000f6 },
10717                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10718                         0x00000000, 0xffffffff },
10719                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10720                         0x00000000, 0x000003ff },
10721                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10722                         0x00000000, 0xffffffff },
10723                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10724                         0x00000000, 0x000003ff },
10725                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10726                         0x00000000, 0xffffffff },
10727                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10728                         0x00000000, 0x000000ff },
10729                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10730                         0x00000000, 0xffffffff },
10731                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10732                         0x00000000, 0x000000ff },
10733                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10734                         0x00000000, 0xffffffff },
10735                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10736                         0x00000000, 0xffffffff },
10737                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10738                         0x00000000, 0xffffffff },
10739                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10740                         0x00000000, 0x000000ff },
10741                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10742                         0x00000000, 0xffffffff },
10743                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10744                         0x00000000, 0x000000ff },
10745                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10746                         0x00000000, 0xffffffff },
10747                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10748                         0x00000000, 0xffffffff },
10749                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10750                         0x00000000, 0xffffffff },
10751                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10752                         0x00000000, 0xffffffff },
10753                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10754                         0x00000000, 0xffffffff },
10755                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10756                         0xffffffff, 0x00000000 },
10757                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10758                         0xffffffff, 0x00000000 },
10759
10760                 /* Buffer Manager Control Registers. */
10761                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10762                         0x00000000, 0x007fff80 },
10763                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10764                         0x00000000, 0x007fffff },
10765                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10766                         0x00000000, 0x0000003f },
10767                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10768                         0x00000000, 0x000001ff },
10769                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10770                         0x00000000, 0x000001ff },
10771                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10772                         0xffffffff, 0x00000000 },
10773                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10774                         0xffffffff, 0x00000000 },
10775
10776                 /* Mailbox Registers */
10777                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10778                         0x00000000, 0x000001ff },
10779                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10780                         0x00000000, 0x000001ff },
10781                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10782                         0x00000000, 0x000007ff },
10783                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10784                         0x00000000, 0x000001ff },
10785
10786                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10787         };
10788
10789         is_5705 = is_5750 = 0;
10790         if (tg3_flag(tp, 5705_PLUS)) {
10791                 is_5705 = 1;
10792                 if (tg3_flag(tp, 5750_PLUS))
10793                         is_5750 = 1;
10794         }
10795
10796         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10797                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10798                         continue;
10799
10800                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10801                         continue;
10802
10803                 if (tg3_flag(tp, IS_5788) &&
10804                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
10805                         continue;
10806
10807                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10808                         continue;
10809
10810                 offset = (u32) reg_tbl[i].offset;
10811                 read_mask = reg_tbl[i].read_mask;
10812                 write_mask = reg_tbl[i].write_mask;
10813
10814                 /* Save the original register content */
10815                 save_val = tr32(offset);
10816
10817                 /* Determine the read-only value. */
10818                 read_val = save_val & read_mask;
10819
10820                 /* Write zero to the register, then make sure the read-only bits
10821                  * are not changed and the read/write bits are all zeros.
10822                  */
10823                 tw32(offset, 0);
10824
10825                 val = tr32(offset);
10826
10827                 /* Test the read-only and read/write bits. */
10828                 if (((val & read_mask) != read_val) || (val & write_mask))
10829                         goto out;
10830
10831                 /* Write ones to all the bits defined by RdMask and WrMask, then
10832                  * make sure the read-only bits are not changed and the
10833                  * read/write bits are all ones.
10834                  */
10835                 tw32(offset, read_mask | write_mask);
10836
10837                 val = tr32(offset);
10838
10839                 /* Test the read-only bits. */
10840                 if ((val & read_mask) != read_val)
10841                         goto out;
10842
10843                 /* Test the read/write bits. */
10844                 if ((val & write_mask) != write_mask)
10845                         goto out;
10846
10847                 tw32(offset, save_val);
10848         }
10849
10850         return 0;
10851
10852 out:
10853         if (netif_msg_hw(tp))
10854                 netdev_err(tp->dev,
10855                            "Register test failed at offset %x\n", offset);
10856         tw32(offset, save_val);
10857         return -EIO;
10858 }
10859
10860 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10861 {
10862         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10863         int i;
10864         u32 j;
10865
10866         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10867                 for (j = 0; j < len; j += 4) {
10868                         u32 val;
10869
10870                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10871                         tg3_read_mem(tp, offset + j, &val);
10872                         if (val != test_pattern[i])
10873                                 return -EIO;
10874                 }
10875         }
10876         return 0;
10877 }
10878
10879 static int tg3_test_memory(struct tg3 *tp)
10880 {
10881         static struct mem_entry {
10882                 u32 offset;
10883                 u32 len;
10884         } mem_tbl_570x[] = {
10885                 { 0x00000000, 0x00b50},
10886                 { 0x00002000, 0x1c000},
10887                 { 0xffffffff, 0x00000}
10888         }, mem_tbl_5705[] = {
10889                 { 0x00000100, 0x0000c},
10890                 { 0x00000200, 0x00008},
10891                 { 0x00004000, 0x00800},
10892                 { 0x00006000, 0x01000},
10893                 { 0x00008000, 0x02000},
10894                 { 0x00010000, 0x0e000},
10895                 { 0xffffffff, 0x00000}
10896         }, mem_tbl_5755[] = {
10897                 { 0x00000200, 0x00008},
10898                 { 0x00004000, 0x00800},
10899                 { 0x00006000, 0x00800},
10900                 { 0x00008000, 0x02000},
10901                 { 0x00010000, 0x0c000},
10902                 { 0xffffffff, 0x00000}
10903         }, mem_tbl_5906[] = {
10904                 { 0x00000200, 0x00008},
10905                 { 0x00004000, 0x00400},
10906                 { 0x00006000, 0x00400},
10907                 { 0x00008000, 0x01000},
10908                 { 0x00010000, 0x01000},
10909                 { 0xffffffff, 0x00000}
10910         }, mem_tbl_5717[] = {
10911                 { 0x00000200, 0x00008},
10912                 { 0x00010000, 0x0a000},
10913                 { 0x00020000, 0x13c00},
10914                 { 0xffffffff, 0x00000}
10915         }, mem_tbl_57765[] = {
10916                 { 0x00000200, 0x00008},
10917                 { 0x00004000, 0x00800},
10918                 { 0x00006000, 0x09800},
10919                 { 0x00010000, 0x0a000},
10920                 { 0xffffffff, 0x00000}
10921         };
10922         struct mem_entry *mem_tbl;
10923         int err = 0;
10924         int i;
10925
10926         if (tg3_flag(tp, 5717_PLUS))
10927                 mem_tbl = mem_tbl_5717;
10928         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10929                 mem_tbl = mem_tbl_57765;
10930         else if (tg3_flag(tp, 5755_PLUS))
10931                 mem_tbl = mem_tbl_5755;
10932         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10933                 mem_tbl = mem_tbl_5906;
10934         else if (tg3_flag(tp, 5705_PLUS))
10935                 mem_tbl = mem_tbl_5705;
10936         else
10937                 mem_tbl = mem_tbl_570x;
10938
10939         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10940                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10941                 if (err)
10942                         break;
10943         }
10944
10945         return err;
10946 }
10947
10948 #define TG3_MAC_LOOPBACK        0
10949 #define TG3_PHY_LOOPBACK        1
10950 #define TG3_TSO_LOOPBACK        2
10951
10952 #define TG3_TSO_MSS             500
10953
10954 #define TG3_TSO_IP_HDR_LEN      20
10955 #define TG3_TSO_TCP_HDR_LEN     20
10956 #define TG3_TSO_TCP_OPT_LEN     12
10957
10958 static const u8 tg3_tso_header[] = {
10959 0x08, 0x00,
10960 0x45, 0x00, 0x00, 0x00,
10961 0x00, 0x00, 0x40, 0x00,
10962 0x40, 0x06, 0x00, 0x00,
10963 0x0a, 0x00, 0x00, 0x01,
10964 0x0a, 0x00, 0x00, 0x02,
10965 0x0d, 0x00, 0xe0, 0x00,
10966 0x00, 0x00, 0x01, 0x00,
10967 0x00, 0x00, 0x02, 0x00,
10968 0x80, 0x10, 0x10, 0x00,
10969 0x14, 0x09, 0x00, 0x00,
10970 0x01, 0x01, 0x08, 0x0a,
10971 0x11, 0x11, 0x11, 0x11,
10972 0x11, 0x11, 0x11, 0x11,
10973 };
10974
10975 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10976 {
10977         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10978         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10979         struct sk_buff *skb, *rx_skb;
10980         u8 *tx_data;
10981         dma_addr_t map;
10982         int num_pkts, tx_len, rx_len, i, err;
10983         struct tg3_rx_buffer_desc *desc;
10984         struct tg3_napi *tnapi, *rnapi;
10985         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10986
10987         tnapi = &tp->napi[0];
10988         rnapi = &tp->napi[0];
10989         if (tp->irq_cnt > 1) {
10990                 if (tg3_flag(tp, ENABLE_RSS))
10991                         rnapi = &tp->napi[1];
10992                 if (tg3_flag(tp, ENABLE_TSS))
10993                         tnapi = &tp->napi[1];
10994         }
10995         coal_now = tnapi->coal_now | rnapi->coal_now;
10996
10997         if (loopback_mode == TG3_MAC_LOOPBACK) {
10998                 /* HW errata - mac loopback fails in some cases on 5780.
10999                  * Normal traffic and PHY loopback are not affected by
11000                  * errata.  Also, the MAC loopback test is deprecated for
11001                  * all newer ASIC revisions.
11002                  */
11003                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11004                     tg3_flag(tp, CPMU_PRESENT))
11005                         return 0;
11006
11007                 mac_mode = tp->mac_mode &
11008                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11009                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11010                 if (!tg3_flag(tp, 5705_PLUS))
11011                         mac_mode |= MAC_MODE_LINK_POLARITY;
11012                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11013                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11014                 else
11015                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11016                 tw32(MAC_MODE, mac_mode);
11017         } else {
11018                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11019                         tg3_phy_fet_toggle_apd(tp, false);
11020                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11021                 } else
11022                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11023
11024                 tg3_phy_toggle_automdix(tp, 0);
11025
11026                 tg3_writephy(tp, MII_BMCR, val);
11027                 udelay(40);
11028
11029                 mac_mode = tp->mac_mode &
11030                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11031                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11032                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11033                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11034                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11035                         /* The write needs to be flushed for the AC131 */
11036                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11037                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11038                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11039                 } else
11040                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11041
11042                 /* reset to prevent losing 1st rx packet intermittently */
11043                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11044                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11045                         udelay(10);
11046                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11047                 }
11048                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11049                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11050                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11051                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11052                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11053                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11054                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11055                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11056                 }
11057                 tw32(MAC_MODE, mac_mode);
11058
11059                 /* Wait for link */
11060                 for (i = 0; i < 100; i++) {
11061                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11062                                 break;
11063                         mdelay(1);
11064                 }
11065         }
11066
11067         err = -EIO;
11068
11069         tx_len = pktsz;
11070         skb = netdev_alloc_skb(tp->dev, tx_len);
11071         if (!skb)
11072                 return -ENOMEM;
11073
11074         tx_data = skb_put(skb, tx_len);
11075         memcpy(tx_data, tp->dev->dev_addr, 6);
11076         memset(tx_data + 6, 0x0, 8);
11077
11078         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11079
11080         if (loopback_mode == TG3_TSO_LOOPBACK) {
11081                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11082
11083                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11084                               TG3_TSO_TCP_OPT_LEN;
11085
11086                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11087                        sizeof(tg3_tso_header));
11088                 mss = TG3_TSO_MSS;
11089
11090                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11091                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11092
11093                 /* Set the total length field in the IP header */
11094                 iph->tot_len = htons((u16)(mss + hdr_len));
11095
11096                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11097                               TXD_FLAG_CPU_POST_DMA);
11098
11099                 if (tg3_flag(tp, HW_TSO_1) ||
11100                     tg3_flag(tp, HW_TSO_2) ||
11101                     tg3_flag(tp, HW_TSO_3)) {
11102                         struct tcphdr *th;
11103                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11104                         th = (struct tcphdr *)&tx_data[val];
11105                         th->check = 0;
11106                 } else
11107                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11108
11109                 if (tg3_flag(tp, HW_TSO_3)) {
11110                         mss |= (hdr_len & 0xc) << 12;
11111                         if (hdr_len & 0x10)
11112                                 base_flags |= 0x00000010;
11113                         base_flags |= (hdr_len & 0x3e0) << 5;
11114                 } else if (tg3_flag(tp, HW_TSO_2))
11115                         mss |= hdr_len << 9;
11116                 else if (tg3_flag(tp, HW_TSO_1) ||
11117                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11118                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11119                 } else {
11120                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11121                 }
11122
11123                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11124         } else {
11125                 num_pkts = 1;
11126                 data_off = ETH_HLEN;
11127         }
11128
11129         for (i = data_off; i < tx_len; i++)
11130                 tx_data[i] = (u8) (i & 0xff);
11131
11132         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11133         if (pci_dma_mapping_error(tp->pdev, map)) {
11134                 dev_kfree_skb(skb);
11135                 return -EIO;
11136         }
11137
11138         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11139                rnapi->coal_now);
11140
11141         udelay(10);
11142
11143         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11144
11145         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11146                     base_flags, (mss << 1) | 1);
11147
11148         tnapi->tx_prod++;
11149
11150         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11151         tr32_mailbox(tnapi->prodmbox);
11152
11153         udelay(10);
11154
11155         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11156         for (i = 0; i < 35; i++) {
11157                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11158                        coal_now);
11159
11160                 udelay(10);
11161
11162                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11163                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11164                 if ((tx_idx == tnapi->tx_prod) &&
11165                     (rx_idx == (rx_start_idx + num_pkts)))
11166                         break;
11167         }
11168
11169         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11170         dev_kfree_skb(skb);
11171
11172         if (tx_idx != tnapi->tx_prod)
11173                 goto out;
11174
11175         if (rx_idx != rx_start_idx + num_pkts)
11176                 goto out;
11177
11178         val = data_off;
11179         while (rx_idx != rx_start_idx) {
11180                 desc = &rnapi->rx_rcb[rx_start_idx++];
11181                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11182                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11183
11184                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11185                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11186                         goto out;
11187
11188                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11189                          - ETH_FCS_LEN;
11190
11191                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11192                         if (rx_len != tx_len)
11193                                 goto out;
11194
11195                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11196                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11197                                         goto out;
11198                         } else {
11199                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11200                                         goto out;
11201                         }
11202                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11203                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11204                             >> RXD_TCPCSUM_SHIFT == 0xffff) {
11205                         goto out;
11206                 }
11207
11208                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11209                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11210                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11211                                              mapping);
11212                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11213                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11214                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11215                                              mapping);
11216                 } else
11217                         goto out;
11218
11219                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11220                                             PCI_DMA_FROMDEVICE);
11221
11222                 for (i = data_off; i < rx_len; i++, val++) {
11223                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11224                                 goto out;
11225                 }
11226         }
11227
11228         err = 0;
11229
11230         /* tg3_free_rings will unmap and free the rx_skb */
11231 out:
11232         return err;
11233 }
11234
11235 #define TG3_STD_LOOPBACK_FAILED         1
11236 #define TG3_JMB_LOOPBACK_FAILED         2
11237 #define TG3_TSO_LOOPBACK_FAILED         4
11238
11239 #define TG3_MAC_LOOPBACK_SHIFT          0
11240 #define TG3_PHY_LOOPBACK_SHIFT          4
11241 #define TG3_LOOPBACK_FAILED             0x00000077
11242
11243 static int tg3_test_loopback(struct tg3 *tp)
11244 {
11245         int err = 0;
11246         u32 eee_cap, cpmuctrl = 0;
11247
11248         if (!netif_running(tp->dev))
11249                 return TG3_LOOPBACK_FAILED;
11250
11251         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11252         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11253
11254         err = tg3_reset_hw(tp, 1);
11255         if (err) {
11256                 err = TG3_LOOPBACK_FAILED;
11257                 goto done;
11258         }
11259
11260         if (tg3_flag(tp, ENABLE_RSS)) {
11261                 int i;
11262
11263                 /* Reroute all rx packets to the 1st queue */
11264                 for (i = MAC_RSS_INDIR_TBL_0;
11265                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11266                         tw32(i, 0x0);
11267         }
11268
11269         /* Turn off gphy autopowerdown. */
11270         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11271                 tg3_phy_toggle_apd(tp, false);
11272
11273         if (tg3_flag(tp, CPMU_PRESENT)) {
11274                 int i;
11275                 u32 status;
11276
11277                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11278
11279                 /* Wait for up to 40 microseconds to acquire lock. */
11280                 for (i = 0; i < 4; i++) {
11281                         status = tr32(TG3_CPMU_MUTEX_GNT);
11282                         if (status == CPMU_MUTEX_GNT_DRIVER)
11283                                 break;
11284                         udelay(10);
11285                 }
11286
11287                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11288                         err = TG3_LOOPBACK_FAILED;
11289                         goto done;
11290                 }
11291
11292                 /* Turn off link-based power management. */
11293                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11294                 tw32(TG3_CPMU_CTRL,
11295                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11296                                   CPMU_CTRL_LINK_AWARE_MODE));
11297         }
11298
11299         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11300                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11301
11302         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11303             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11304                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11305
11306         if (tg3_flag(tp, CPMU_PRESENT)) {
11307                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11308
11309                 /* Release the mutex */
11310                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11311         }
11312
11313         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11314             !tg3_flag(tp, USE_PHYLIB)) {
11315                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11316                         err |= TG3_STD_LOOPBACK_FAILED <<
11317                                TG3_PHY_LOOPBACK_SHIFT;
11318                 if (tg3_flag(tp, TSO_CAPABLE) &&
11319                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11320                         err |= TG3_TSO_LOOPBACK_FAILED <<
11321                                TG3_PHY_LOOPBACK_SHIFT;
11322                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11323                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11324                         err |= TG3_JMB_LOOPBACK_FAILED <<
11325                                TG3_PHY_LOOPBACK_SHIFT;
11326         }
11327
11328         /* Re-enable gphy autopowerdown. */
11329         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11330                 tg3_phy_toggle_apd(tp, true);
11331
11332 done:
11333         tp->phy_flags |= eee_cap;
11334
11335         return err;
11336 }
11337
11338 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11339                           u64 *data)
11340 {
11341         struct tg3 *tp = netdev_priv(dev);
11342
11343         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11344                 tg3_power_up(tp);
11345
11346         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11347
11348         if (tg3_test_nvram(tp) != 0) {
11349                 etest->flags |= ETH_TEST_FL_FAILED;
11350                 data[0] = 1;
11351         }
11352         if (tg3_test_link(tp) != 0) {
11353                 etest->flags |= ETH_TEST_FL_FAILED;
11354                 data[1] = 1;
11355         }
11356         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11357                 int err, err2 = 0, irq_sync = 0;
11358
11359                 if (netif_running(dev)) {
11360                         tg3_phy_stop(tp);
11361                         tg3_netif_stop(tp);
11362                         irq_sync = 1;
11363                 }
11364
11365                 tg3_full_lock(tp, irq_sync);
11366
11367                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11368                 err = tg3_nvram_lock(tp);
11369                 tg3_halt_cpu(tp, RX_CPU_BASE);
11370                 if (!tg3_flag(tp, 5705_PLUS))
11371                         tg3_halt_cpu(tp, TX_CPU_BASE);
11372                 if (!err)
11373                         tg3_nvram_unlock(tp);
11374
11375                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11376                         tg3_phy_reset(tp);
11377
11378                 if (tg3_test_registers(tp) != 0) {
11379                         etest->flags |= ETH_TEST_FL_FAILED;
11380                         data[2] = 1;
11381                 }
11382                 if (tg3_test_memory(tp) != 0) {
11383                         etest->flags |= ETH_TEST_FL_FAILED;
11384                         data[3] = 1;
11385                 }
11386                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11387                         etest->flags |= ETH_TEST_FL_FAILED;
11388
11389                 tg3_full_unlock(tp);
11390
11391                 if (tg3_test_interrupt(tp) != 0) {
11392                         etest->flags |= ETH_TEST_FL_FAILED;
11393                         data[5] = 1;
11394                 }
11395
11396                 tg3_full_lock(tp, 0);
11397
11398                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11399                 if (netif_running(dev)) {
11400                         tg3_flag_set(tp, INIT_COMPLETE);
11401                         err2 = tg3_restart_hw(tp, 1);
11402                         if (!err2)
11403                                 tg3_netif_start(tp);
11404                 }
11405
11406                 tg3_full_unlock(tp);
11407
11408                 if (irq_sync && !err2)
11409                         tg3_phy_start(tp);
11410         }
11411         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11412                 tg3_power_down(tp);
11413
11414 }
11415
11416 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11417 {
11418         struct mii_ioctl_data *data = if_mii(ifr);
11419         struct tg3 *tp = netdev_priv(dev);
11420         int err;
11421
11422         if (tg3_flag(tp, USE_PHYLIB)) {
11423                 struct phy_device *phydev;
11424                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11425                         return -EAGAIN;
11426                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11427                 return phy_mii_ioctl(phydev, ifr, cmd);
11428         }
11429
11430         switch (cmd) {
11431         case SIOCGMIIPHY:
11432                 data->phy_id = tp->phy_addr;
11433
11434                 /* fallthru */
11435         case SIOCGMIIREG: {
11436                 u32 mii_regval;
11437
11438                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11439                         break;                  /* We have no PHY */
11440
11441                 if (!netif_running(dev))
11442                         return -EAGAIN;
11443
11444                 spin_lock_bh(&tp->lock);
11445                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11446                 spin_unlock_bh(&tp->lock);
11447
11448                 data->val_out = mii_regval;
11449
11450                 return err;
11451         }
11452
11453         case SIOCSMIIREG:
11454                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11455                         break;                  /* We have no PHY */
11456
11457                 if (!netif_running(dev))
11458                         return -EAGAIN;
11459
11460                 spin_lock_bh(&tp->lock);
11461                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11462                 spin_unlock_bh(&tp->lock);
11463
11464                 return err;
11465
11466         default:
11467                 /* do nothing */
11468                 break;
11469         }
11470         return -EOPNOTSUPP;
11471 }
11472
11473 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11474 {
11475         struct tg3 *tp = netdev_priv(dev);
11476
11477         memcpy(ec, &tp->coal, sizeof(*ec));
11478         return 0;
11479 }
11480
11481 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11482 {
11483         struct tg3 *tp = netdev_priv(dev);
11484         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11485         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11486
11487         if (!tg3_flag(tp, 5705_PLUS)) {
11488                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11489                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11490                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11491                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11492         }
11493
11494         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11495             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11496             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11497             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11498             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11499             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11500             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11501             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11502             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11503             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11504                 return -EINVAL;
11505
11506         /* No rx interrupts will be generated if both are zero */
11507         if ((ec->rx_coalesce_usecs == 0) &&
11508             (ec->rx_max_coalesced_frames == 0))
11509                 return -EINVAL;
11510
11511         /* No tx interrupts will be generated if both are zero */
11512         if ((ec->tx_coalesce_usecs == 0) &&
11513             (ec->tx_max_coalesced_frames == 0))
11514                 return -EINVAL;
11515
11516         /* Only copy relevant parameters, ignore all others. */
11517         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11518         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11519         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11520         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11521         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11522         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11523         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11524         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11525         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11526
11527         if (netif_running(dev)) {
11528                 tg3_full_lock(tp, 0);
11529                 __tg3_set_coalesce(tp, &tp->coal);
11530                 tg3_full_unlock(tp);
11531         }
11532         return 0;
11533 }
11534
11535 static const struct ethtool_ops tg3_ethtool_ops = {
11536         .get_settings           = tg3_get_settings,
11537         .set_settings           = tg3_set_settings,
11538         .get_drvinfo            = tg3_get_drvinfo,
11539         .get_regs_len           = tg3_get_regs_len,
11540         .get_regs               = tg3_get_regs,
11541         .get_wol                = tg3_get_wol,
11542         .set_wol                = tg3_set_wol,
11543         .get_msglevel           = tg3_get_msglevel,
11544         .set_msglevel           = tg3_set_msglevel,
11545         .nway_reset             = tg3_nway_reset,
11546         .get_link               = ethtool_op_get_link,
11547         .get_eeprom_len         = tg3_get_eeprom_len,
11548         .get_eeprom             = tg3_get_eeprom,
11549         .set_eeprom             = tg3_set_eeprom,
11550         .get_ringparam          = tg3_get_ringparam,
11551         .set_ringparam          = tg3_set_ringparam,
11552         .get_pauseparam         = tg3_get_pauseparam,
11553         .set_pauseparam         = tg3_set_pauseparam,
11554         .self_test              = tg3_self_test,
11555         .get_strings            = tg3_get_strings,
11556         .set_phys_id            = tg3_set_phys_id,
11557         .get_ethtool_stats      = tg3_get_ethtool_stats,
11558         .get_coalesce           = tg3_get_coalesce,
11559         .set_coalesce           = tg3_set_coalesce,
11560         .get_sset_count         = tg3_get_sset_count,
11561 };
11562
11563 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11564 {
11565         u32 cursize, val, magic;
11566
11567         tp->nvram_size = EEPROM_CHIP_SIZE;
11568
11569         if (tg3_nvram_read(tp, 0, &magic) != 0)
11570                 return;
11571
11572         if ((magic != TG3_EEPROM_MAGIC) &&
11573             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11574             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11575                 return;
11576
11577         /*
11578          * Size the chip by reading offsets at increasing powers of two.
11579          * When we encounter our validation signature, we know the addressing
11580          * has wrapped around, and thus have our chip size.
11581          */
11582         cursize = 0x10;
11583
11584         while (cursize < tp->nvram_size) {
11585                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11586                         return;
11587
11588                 if (val == magic)
11589                         break;
11590
11591                 cursize <<= 1;
11592         }
11593
11594         tp->nvram_size = cursize;
11595 }
11596
11597 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11598 {
11599         u32 val;
11600
11601         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11602                 return;
11603
11604         /* Selfboot format */
11605         if (val != TG3_EEPROM_MAGIC) {
11606                 tg3_get_eeprom_size(tp);
11607                 return;
11608         }
11609
11610         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11611                 if (val != 0) {
11612                         /* This is confusing.  We want to operate on the
11613                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11614                          * call will read from NVRAM and byteswap the data
11615                          * according to the byteswapping settings for all
11616                          * other register accesses.  This ensures the data we
11617                          * want will always reside in the lower 16-bits.
11618                          * However, the data in NVRAM is in LE format, which
11619                          * means the data from the NVRAM read will always be
11620                          * opposite the endianness of the CPU.  The 16-bit
11621                          * byteswap then brings the data to CPU endianness.
11622                          */
11623                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11624                         return;
11625                 }
11626         }
11627         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11628 }
11629
11630 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11631 {
11632         u32 nvcfg1;
11633
11634         nvcfg1 = tr32(NVRAM_CFG1);
11635         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11636                 tg3_flag_set(tp, FLASH);
11637         } else {
11638                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11639                 tw32(NVRAM_CFG1, nvcfg1);
11640         }
11641
11642         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11643             tg3_flag(tp, 5780_CLASS)) {
11644                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11645                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11646                         tp->nvram_jedecnum = JEDEC_ATMEL;
11647                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11648                         tg3_flag_set(tp, NVRAM_BUFFERED);
11649                         break;
11650                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11651                         tp->nvram_jedecnum = JEDEC_ATMEL;
11652                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11653                         break;
11654                 case FLASH_VENDOR_ATMEL_EEPROM:
11655                         tp->nvram_jedecnum = JEDEC_ATMEL;
11656                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11657                         tg3_flag_set(tp, NVRAM_BUFFERED);
11658                         break;
11659                 case FLASH_VENDOR_ST:
11660                         tp->nvram_jedecnum = JEDEC_ST;
11661                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11662                         tg3_flag_set(tp, NVRAM_BUFFERED);
11663                         break;
11664                 case FLASH_VENDOR_SAIFUN:
11665                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11666                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11667                         break;
11668                 case FLASH_VENDOR_SST_SMALL:
11669                 case FLASH_VENDOR_SST_LARGE:
11670                         tp->nvram_jedecnum = JEDEC_SST;
11671                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11672                         break;
11673                 }
11674         } else {
11675                 tp->nvram_jedecnum = JEDEC_ATMEL;
11676                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11677                 tg3_flag_set(tp, NVRAM_BUFFERED);
11678         }
11679 }
11680
11681 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11682 {
11683         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11684         case FLASH_5752PAGE_SIZE_256:
11685                 tp->nvram_pagesize = 256;
11686                 break;
11687         case FLASH_5752PAGE_SIZE_512:
11688                 tp->nvram_pagesize = 512;
11689                 break;
11690         case FLASH_5752PAGE_SIZE_1K:
11691                 tp->nvram_pagesize = 1024;
11692                 break;
11693         case FLASH_5752PAGE_SIZE_2K:
11694                 tp->nvram_pagesize = 2048;
11695                 break;
11696         case FLASH_5752PAGE_SIZE_4K:
11697                 tp->nvram_pagesize = 4096;
11698                 break;
11699         case FLASH_5752PAGE_SIZE_264:
11700                 tp->nvram_pagesize = 264;
11701                 break;
11702         case FLASH_5752PAGE_SIZE_528:
11703                 tp->nvram_pagesize = 528;
11704                 break;
11705         }
11706 }
11707
11708 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11709 {
11710         u32 nvcfg1;
11711
11712         nvcfg1 = tr32(NVRAM_CFG1);
11713
11714         /* NVRAM protection for TPM */
11715         if (nvcfg1 & (1 << 27))
11716                 tg3_flag_set(tp, PROTECTED_NVRAM);
11717
11718         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11719         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11720         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11721                 tp->nvram_jedecnum = JEDEC_ATMEL;
11722                 tg3_flag_set(tp, NVRAM_BUFFERED);
11723                 break;
11724         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11725                 tp->nvram_jedecnum = JEDEC_ATMEL;
11726                 tg3_flag_set(tp, NVRAM_BUFFERED);
11727                 tg3_flag_set(tp, FLASH);
11728                 break;
11729         case FLASH_5752VENDOR_ST_M45PE10:
11730         case FLASH_5752VENDOR_ST_M45PE20:
11731         case FLASH_5752VENDOR_ST_M45PE40:
11732                 tp->nvram_jedecnum = JEDEC_ST;
11733                 tg3_flag_set(tp, NVRAM_BUFFERED);
11734                 tg3_flag_set(tp, FLASH);
11735                 break;
11736         }
11737
11738         if (tg3_flag(tp, FLASH)) {
11739                 tg3_nvram_get_pagesize(tp, nvcfg1);
11740         } else {
11741                 /* For eeprom, set pagesize to maximum eeprom size */
11742                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11743
11744                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11745                 tw32(NVRAM_CFG1, nvcfg1);
11746         }
11747 }
11748
11749 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11750 {
11751         u32 nvcfg1, protect = 0;
11752
11753         nvcfg1 = tr32(NVRAM_CFG1);
11754
11755         /* NVRAM protection for TPM */
11756         if (nvcfg1 & (1 << 27)) {
11757                 tg3_flag_set(tp, PROTECTED_NVRAM);
11758                 protect = 1;
11759         }
11760
11761         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11762         switch (nvcfg1) {
11763         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11764         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11765         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11766         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11767                 tp->nvram_jedecnum = JEDEC_ATMEL;
11768                 tg3_flag_set(tp, NVRAM_BUFFERED);
11769                 tg3_flag_set(tp, FLASH);
11770                 tp->nvram_pagesize = 264;
11771                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11772                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11773                         tp->nvram_size = (protect ? 0x3e200 :
11774                                           TG3_NVRAM_SIZE_512KB);
11775                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11776                         tp->nvram_size = (protect ? 0x1f200 :
11777                                           TG3_NVRAM_SIZE_256KB);
11778                 else
11779                         tp->nvram_size = (protect ? 0x1f200 :
11780                                           TG3_NVRAM_SIZE_128KB);
11781                 break;
11782         case FLASH_5752VENDOR_ST_M45PE10:
11783         case FLASH_5752VENDOR_ST_M45PE20:
11784         case FLASH_5752VENDOR_ST_M45PE40:
11785                 tp->nvram_jedecnum = JEDEC_ST;
11786                 tg3_flag_set(tp, NVRAM_BUFFERED);
11787                 tg3_flag_set(tp, FLASH);
11788                 tp->nvram_pagesize = 256;
11789                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11790                         tp->nvram_size = (protect ?
11791                                           TG3_NVRAM_SIZE_64KB :
11792                                           TG3_NVRAM_SIZE_128KB);
11793                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11794                         tp->nvram_size = (protect ?
11795                                           TG3_NVRAM_SIZE_64KB :
11796                                           TG3_NVRAM_SIZE_256KB);
11797                 else
11798                         tp->nvram_size = (protect ?
11799                                           TG3_NVRAM_SIZE_128KB :
11800                                           TG3_NVRAM_SIZE_512KB);
11801                 break;
11802         }
11803 }
11804
11805 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11806 {
11807         u32 nvcfg1;
11808
11809         nvcfg1 = tr32(NVRAM_CFG1);
11810
11811         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11812         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11813         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11814         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11815         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11816                 tp->nvram_jedecnum = JEDEC_ATMEL;
11817                 tg3_flag_set(tp, NVRAM_BUFFERED);
11818                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11819
11820                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11821                 tw32(NVRAM_CFG1, nvcfg1);
11822                 break;
11823         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11824         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11825         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11826         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11827                 tp->nvram_jedecnum = JEDEC_ATMEL;
11828                 tg3_flag_set(tp, NVRAM_BUFFERED);
11829                 tg3_flag_set(tp, FLASH);
11830                 tp->nvram_pagesize = 264;
11831                 break;
11832         case FLASH_5752VENDOR_ST_M45PE10:
11833         case FLASH_5752VENDOR_ST_M45PE20:
11834         case FLASH_5752VENDOR_ST_M45PE40:
11835                 tp->nvram_jedecnum = JEDEC_ST;
11836                 tg3_flag_set(tp, NVRAM_BUFFERED);
11837                 tg3_flag_set(tp, FLASH);
11838                 tp->nvram_pagesize = 256;
11839                 break;
11840         }
11841 }
11842
11843 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11844 {
11845         u32 nvcfg1, protect = 0;
11846
11847         nvcfg1 = tr32(NVRAM_CFG1);
11848
11849         /* NVRAM protection for TPM */
11850         if (nvcfg1 & (1 << 27)) {
11851                 tg3_flag_set(tp, PROTECTED_NVRAM);
11852                 protect = 1;
11853         }
11854
11855         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11856         switch (nvcfg1) {
11857         case FLASH_5761VENDOR_ATMEL_ADB021D:
11858         case FLASH_5761VENDOR_ATMEL_ADB041D:
11859         case FLASH_5761VENDOR_ATMEL_ADB081D:
11860         case FLASH_5761VENDOR_ATMEL_ADB161D:
11861         case FLASH_5761VENDOR_ATMEL_MDB021D:
11862         case FLASH_5761VENDOR_ATMEL_MDB041D:
11863         case FLASH_5761VENDOR_ATMEL_MDB081D:
11864         case FLASH_5761VENDOR_ATMEL_MDB161D:
11865                 tp->nvram_jedecnum = JEDEC_ATMEL;
11866                 tg3_flag_set(tp, NVRAM_BUFFERED);
11867                 tg3_flag_set(tp, FLASH);
11868                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11869                 tp->nvram_pagesize = 256;
11870                 break;
11871         case FLASH_5761VENDOR_ST_A_M45PE20:
11872         case FLASH_5761VENDOR_ST_A_M45PE40:
11873         case FLASH_5761VENDOR_ST_A_M45PE80:
11874         case FLASH_5761VENDOR_ST_A_M45PE16:
11875         case FLASH_5761VENDOR_ST_M_M45PE20:
11876         case FLASH_5761VENDOR_ST_M_M45PE40:
11877         case FLASH_5761VENDOR_ST_M_M45PE80:
11878         case FLASH_5761VENDOR_ST_M_M45PE16:
11879                 tp->nvram_jedecnum = JEDEC_ST;
11880                 tg3_flag_set(tp, NVRAM_BUFFERED);
11881                 tg3_flag_set(tp, FLASH);
11882                 tp->nvram_pagesize = 256;
11883                 break;
11884         }
11885
11886         if (protect) {
11887                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11888         } else {
11889                 switch (nvcfg1) {
11890                 case FLASH_5761VENDOR_ATMEL_ADB161D:
11891                 case FLASH_5761VENDOR_ATMEL_MDB161D:
11892                 case FLASH_5761VENDOR_ST_A_M45PE16:
11893                 case FLASH_5761VENDOR_ST_M_M45PE16:
11894                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11895                         break;
11896                 case FLASH_5761VENDOR_ATMEL_ADB081D:
11897                 case FLASH_5761VENDOR_ATMEL_MDB081D:
11898                 case FLASH_5761VENDOR_ST_A_M45PE80:
11899                 case FLASH_5761VENDOR_ST_M_M45PE80:
11900                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11901                         break;
11902                 case FLASH_5761VENDOR_ATMEL_ADB041D:
11903                 case FLASH_5761VENDOR_ATMEL_MDB041D:
11904                 case FLASH_5761VENDOR_ST_A_M45PE40:
11905                 case FLASH_5761VENDOR_ST_M_M45PE40:
11906                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11907                         break;
11908                 case FLASH_5761VENDOR_ATMEL_ADB021D:
11909                 case FLASH_5761VENDOR_ATMEL_MDB021D:
11910                 case FLASH_5761VENDOR_ST_A_M45PE20:
11911                 case FLASH_5761VENDOR_ST_M_M45PE20:
11912                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11913                         break;
11914                 }
11915         }
11916 }
11917
11918 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11919 {
11920         tp->nvram_jedecnum = JEDEC_ATMEL;
11921         tg3_flag_set(tp, NVRAM_BUFFERED);
11922         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11923 }
11924
11925 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11926 {
11927         u32 nvcfg1;
11928
11929         nvcfg1 = tr32(NVRAM_CFG1);
11930
11931         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11932         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11933         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11934                 tp->nvram_jedecnum = JEDEC_ATMEL;
11935                 tg3_flag_set(tp, NVRAM_BUFFERED);
11936                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11937
11938                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11939                 tw32(NVRAM_CFG1, nvcfg1);
11940                 return;
11941         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11942         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11943         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11944         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11945         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11946         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11947         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11948                 tp->nvram_jedecnum = JEDEC_ATMEL;
11949                 tg3_flag_set(tp, NVRAM_BUFFERED);
11950                 tg3_flag_set(tp, FLASH);
11951
11952                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11953                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11954                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11955                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11956                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11957                         break;
11958                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11959                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11960                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11961                         break;
11962                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11963                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11964                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11965                         break;
11966                 }
11967                 break;
11968         case FLASH_5752VENDOR_ST_M45PE10:
11969         case FLASH_5752VENDOR_ST_M45PE20:
11970         case FLASH_5752VENDOR_ST_M45PE40:
11971                 tp->nvram_jedecnum = JEDEC_ST;
11972                 tg3_flag_set(tp, NVRAM_BUFFERED);
11973                 tg3_flag_set(tp, FLASH);
11974
11975                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11976                 case FLASH_5752VENDOR_ST_M45PE10:
11977                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11978                         break;
11979                 case FLASH_5752VENDOR_ST_M45PE20:
11980                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11981                         break;
11982                 case FLASH_5752VENDOR_ST_M45PE40:
11983                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11984                         break;
11985                 }
11986                 break;
11987         default:
11988                 tg3_flag_set(tp, NO_NVRAM);
11989                 return;
11990         }
11991
11992         tg3_nvram_get_pagesize(tp, nvcfg1);
11993         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11994                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11995 }
11996
11997
11998 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11999 {
12000         u32 nvcfg1;
12001
12002         nvcfg1 = tr32(NVRAM_CFG1);
12003
12004         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12005         case FLASH_5717VENDOR_ATMEL_EEPROM:
12006         case FLASH_5717VENDOR_MICRO_EEPROM:
12007                 tp->nvram_jedecnum = JEDEC_ATMEL;
12008                 tg3_flag_set(tp, NVRAM_BUFFERED);
12009                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12010
12011                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12012                 tw32(NVRAM_CFG1, nvcfg1);
12013                 return;
12014         case FLASH_5717VENDOR_ATMEL_MDB011D:
12015         case FLASH_5717VENDOR_ATMEL_ADB011B:
12016         case FLASH_5717VENDOR_ATMEL_ADB011D:
12017         case FLASH_5717VENDOR_ATMEL_MDB021D:
12018         case FLASH_5717VENDOR_ATMEL_ADB021B:
12019         case FLASH_5717VENDOR_ATMEL_ADB021D:
12020         case FLASH_5717VENDOR_ATMEL_45USPT:
12021                 tp->nvram_jedecnum = JEDEC_ATMEL;
12022                 tg3_flag_set(tp, NVRAM_BUFFERED);
12023                 tg3_flag_set(tp, FLASH);
12024
12025                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12026                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12027                         /* Detect size with tg3_nvram_get_size() */
12028                         break;
12029                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12030                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12031                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12032                         break;
12033                 default:
12034                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12035                         break;
12036                 }
12037                 break;
12038         case FLASH_5717VENDOR_ST_M_M25PE10:
12039         case FLASH_5717VENDOR_ST_A_M25PE10:
12040         case FLASH_5717VENDOR_ST_M_M45PE10:
12041         case FLASH_5717VENDOR_ST_A_M45PE10:
12042         case FLASH_5717VENDOR_ST_M_M25PE20:
12043         case FLASH_5717VENDOR_ST_A_M25PE20:
12044         case FLASH_5717VENDOR_ST_M_M45PE20:
12045         case FLASH_5717VENDOR_ST_A_M45PE20:
12046         case FLASH_5717VENDOR_ST_25USPT:
12047         case FLASH_5717VENDOR_ST_45USPT:
12048                 tp->nvram_jedecnum = JEDEC_ST;
12049                 tg3_flag_set(tp, NVRAM_BUFFERED);
12050                 tg3_flag_set(tp, FLASH);
12051
12052                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12053                 case FLASH_5717VENDOR_ST_M_M25PE20:
12054                 case FLASH_5717VENDOR_ST_M_M45PE20:
12055                         /* Detect size with tg3_nvram_get_size() */
12056                         break;
12057                 case FLASH_5717VENDOR_ST_A_M25PE20:
12058                 case FLASH_5717VENDOR_ST_A_M45PE20:
12059                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12060                         break;
12061                 default:
12062                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12063                         break;
12064                 }
12065                 break;
12066         default:
12067                 tg3_flag_set(tp, NO_NVRAM);
12068                 return;
12069         }
12070
12071         tg3_nvram_get_pagesize(tp, nvcfg1);
12072         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12073                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12074 }
12075
12076 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12077 {
12078         u32 nvcfg1, nvmpinstrp;
12079
12080         nvcfg1 = tr32(NVRAM_CFG1);
12081         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12082
12083         switch (nvmpinstrp) {
12084         case FLASH_5720_EEPROM_HD:
12085         case FLASH_5720_EEPROM_LD:
12086                 tp->nvram_jedecnum = JEDEC_ATMEL;
12087                 tg3_flag_set(tp, NVRAM_BUFFERED);
12088
12089                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12090                 tw32(NVRAM_CFG1, nvcfg1);
12091                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12092                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12093                 else
12094                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12095                 return;
12096         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12097         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12098         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12099         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12100         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12101         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12102         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12103         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12104         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12105         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12106         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12107         case FLASH_5720VENDOR_ATMEL_45USPT:
12108                 tp->nvram_jedecnum = JEDEC_ATMEL;
12109                 tg3_flag_set(tp, NVRAM_BUFFERED);
12110                 tg3_flag_set(tp, FLASH);
12111
12112                 switch (nvmpinstrp) {
12113                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12114                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12115                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12116                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12117                         break;
12118                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12119                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12120                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12121                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12122                         break;
12123                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12124                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12125                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12126                         break;
12127                 default:
12128                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12129                         break;
12130                 }
12131                 break;
12132         case FLASH_5720VENDOR_M_ST_M25PE10:
12133         case FLASH_5720VENDOR_M_ST_M45PE10:
12134         case FLASH_5720VENDOR_A_ST_M25PE10:
12135         case FLASH_5720VENDOR_A_ST_M45PE10:
12136         case FLASH_5720VENDOR_M_ST_M25PE20:
12137         case FLASH_5720VENDOR_M_ST_M45PE20:
12138         case FLASH_5720VENDOR_A_ST_M25PE20:
12139         case FLASH_5720VENDOR_A_ST_M45PE20:
12140         case FLASH_5720VENDOR_M_ST_M25PE40:
12141         case FLASH_5720VENDOR_M_ST_M45PE40:
12142         case FLASH_5720VENDOR_A_ST_M25PE40:
12143         case FLASH_5720VENDOR_A_ST_M45PE40:
12144         case FLASH_5720VENDOR_M_ST_M25PE80:
12145         case FLASH_5720VENDOR_M_ST_M45PE80:
12146         case FLASH_5720VENDOR_A_ST_M25PE80:
12147         case FLASH_5720VENDOR_A_ST_M45PE80:
12148         case FLASH_5720VENDOR_ST_25USPT:
12149         case FLASH_5720VENDOR_ST_45USPT:
12150                 tp->nvram_jedecnum = JEDEC_ST;
12151                 tg3_flag_set(tp, NVRAM_BUFFERED);
12152                 tg3_flag_set(tp, FLASH);
12153
12154                 switch (nvmpinstrp) {
12155                 case FLASH_5720VENDOR_M_ST_M25PE20:
12156                 case FLASH_5720VENDOR_M_ST_M45PE20:
12157                 case FLASH_5720VENDOR_A_ST_M25PE20:
12158                 case FLASH_5720VENDOR_A_ST_M45PE20:
12159                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12160                         break;
12161                 case FLASH_5720VENDOR_M_ST_M25PE40:
12162                 case FLASH_5720VENDOR_M_ST_M45PE40:
12163                 case FLASH_5720VENDOR_A_ST_M25PE40:
12164                 case FLASH_5720VENDOR_A_ST_M45PE40:
12165                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12166                         break;
12167                 case FLASH_5720VENDOR_M_ST_M25PE80:
12168                 case FLASH_5720VENDOR_M_ST_M45PE80:
12169                 case FLASH_5720VENDOR_A_ST_M25PE80:
12170                 case FLASH_5720VENDOR_A_ST_M45PE80:
12171                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12172                         break;
12173                 default:
12174                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12175                         break;
12176                 }
12177                 break;
12178         default:
12179                 tg3_flag_set(tp, NO_NVRAM);
12180                 return;
12181         }
12182
12183         tg3_nvram_get_pagesize(tp, nvcfg1);
12184         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12185                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12186 }
12187
12188 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12189 static void __devinit tg3_nvram_init(struct tg3 *tp)
12190 {
12191         tw32_f(GRC_EEPROM_ADDR,
12192              (EEPROM_ADDR_FSM_RESET |
12193               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12194                EEPROM_ADDR_CLKPERD_SHIFT)));
12195
12196         msleep(1);
12197
12198         /* Enable seeprom accesses. */
12199         tw32_f(GRC_LOCAL_CTRL,
12200              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12201         udelay(100);
12202
12203         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12204             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12205                 tg3_flag_set(tp, NVRAM);
12206
12207                 if (tg3_nvram_lock(tp)) {
12208                         netdev_warn(tp->dev,
12209                                     "Cannot get nvram lock, %s failed\n",
12210                                     __func__);
12211                         return;
12212                 }
12213                 tg3_enable_nvram_access(tp);
12214
12215                 tp->nvram_size = 0;
12216
12217                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12218                         tg3_get_5752_nvram_info(tp);
12219                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12220                         tg3_get_5755_nvram_info(tp);
12221                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12222                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12223                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12224                         tg3_get_5787_nvram_info(tp);
12225                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12226                         tg3_get_5761_nvram_info(tp);
12227                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12228                         tg3_get_5906_nvram_info(tp);
12229                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12230                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12231                         tg3_get_57780_nvram_info(tp);
12232                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12233                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12234                         tg3_get_5717_nvram_info(tp);
12235                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12236                         tg3_get_5720_nvram_info(tp);
12237                 else
12238                         tg3_get_nvram_info(tp);
12239
12240                 if (tp->nvram_size == 0)
12241                         tg3_get_nvram_size(tp);
12242
12243                 tg3_disable_nvram_access(tp);
12244                 tg3_nvram_unlock(tp);
12245
12246         } else {
12247                 tg3_flag_clear(tp, NVRAM);
12248                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12249
12250                 tg3_get_eeprom_size(tp);
12251         }
12252 }
12253
12254 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12255                                     u32 offset, u32 len, u8 *buf)
12256 {
12257         int i, j, rc = 0;
12258         u32 val;
12259
12260         for (i = 0; i < len; i += 4) {
12261                 u32 addr;
12262                 __be32 data;
12263
12264                 addr = offset + i;
12265
12266                 memcpy(&data, buf + i, 4);
12267
12268                 /*
12269                  * The SEEPROM interface expects the data to always be opposite
12270                  * the native endian format.  We accomplish this by reversing
12271                  * all the operations that would have been performed on the
12272                  * data from a call to tg3_nvram_read_be32().
12273                  */
12274                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12275
12276                 val = tr32(GRC_EEPROM_ADDR);
12277                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12278
12279                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12280                         EEPROM_ADDR_READ);
12281                 tw32(GRC_EEPROM_ADDR, val |
12282                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12283                         (addr & EEPROM_ADDR_ADDR_MASK) |
12284                         EEPROM_ADDR_START |
12285                         EEPROM_ADDR_WRITE);
12286
12287                 for (j = 0; j < 1000; j++) {
12288                         val = tr32(GRC_EEPROM_ADDR);
12289
12290                         if (val & EEPROM_ADDR_COMPLETE)
12291                                 break;
12292                         msleep(1);
12293                 }
12294                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12295                         rc = -EBUSY;
12296                         break;
12297                 }
12298         }
12299
12300         return rc;
12301 }
12302
12303 /* offset and length are dword aligned */
12304 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12305                 u8 *buf)
12306 {
12307         int ret = 0;
12308         u32 pagesize = tp->nvram_pagesize;
12309         u32 pagemask = pagesize - 1;
12310         u32 nvram_cmd;
12311         u8 *tmp;
12312
12313         tmp = kmalloc(pagesize, GFP_KERNEL);
12314         if (tmp == NULL)
12315                 return -ENOMEM;
12316
12317         while (len) {
12318                 int j;
12319                 u32 phy_addr, page_off, size;
12320
12321                 phy_addr = offset & ~pagemask;
12322
12323                 for (j = 0; j < pagesize; j += 4) {
12324                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12325                                                   (__be32 *) (tmp + j));
12326                         if (ret)
12327                                 break;
12328                 }
12329                 if (ret)
12330                         break;
12331
12332                 page_off = offset & pagemask;
12333                 size = pagesize;
12334                 if (len < size)
12335                         size = len;
12336
12337                 len -= size;
12338
12339                 memcpy(tmp + page_off, buf, size);
12340
12341                 offset = offset + (pagesize - page_off);
12342
12343                 tg3_enable_nvram_access(tp);
12344
12345                 /*
12346                  * Before we can erase the flash page, we need
12347                  * to issue a special "write enable" command.
12348                  */
12349                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12350
12351                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12352                         break;
12353
12354                 /* Erase the target page */
12355                 tw32(NVRAM_ADDR, phy_addr);
12356
12357                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12358                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12359
12360                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12361                         break;
12362
12363                 /* Issue another write enable to start the write. */
12364                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12365
12366                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12367                         break;
12368
12369                 for (j = 0; j < pagesize; j += 4) {
12370                         __be32 data;
12371
12372                         data = *((__be32 *) (tmp + j));
12373
12374                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12375
12376                         tw32(NVRAM_ADDR, phy_addr + j);
12377
12378                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12379                                 NVRAM_CMD_WR;
12380
12381                         if (j == 0)
12382                                 nvram_cmd |= NVRAM_CMD_FIRST;
12383                         else if (j == (pagesize - 4))
12384                                 nvram_cmd |= NVRAM_CMD_LAST;
12385
12386                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12387                                 break;
12388                 }
12389                 if (ret)
12390                         break;
12391         }
12392
12393         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12394         tg3_nvram_exec_cmd(tp, nvram_cmd);
12395
12396         kfree(tmp);
12397
12398         return ret;
12399 }
12400
12401 /* offset and length are dword aligned */
12402 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12403                 u8 *buf)
12404 {
12405         int i, ret = 0;
12406
12407         for (i = 0; i < len; i += 4, offset += 4) {
12408                 u32 page_off, phy_addr, nvram_cmd;
12409                 __be32 data;
12410
12411                 memcpy(&data, buf + i, 4);
12412                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12413
12414                 page_off = offset % tp->nvram_pagesize;
12415
12416                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12417
12418                 tw32(NVRAM_ADDR, phy_addr);
12419
12420                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12421
12422                 if (page_off == 0 || i == 0)
12423                         nvram_cmd |= NVRAM_CMD_FIRST;
12424                 if (page_off == (tp->nvram_pagesize - 4))
12425                         nvram_cmd |= NVRAM_CMD_LAST;
12426
12427                 if (i == (len - 4))
12428                         nvram_cmd |= NVRAM_CMD_LAST;
12429
12430                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12431                     !tg3_flag(tp, 5755_PLUS) &&
12432                     (tp->nvram_jedecnum == JEDEC_ST) &&
12433                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12434
12435                         if ((ret = tg3_nvram_exec_cmd(tp,
12436                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12437                                 NVRAM_CMD_DONE)))
12438
12439                                 break;
12440                 }
12441                 if (!tg3_flag(tp, FLASH)) {
12442                         /* We always do complete word writes to eeprom. */
12443                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12444                 }
12445
12446                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12447                         break;
12448         }
12449         return ret;
12450 }
12451
12452 /* offset and length are dword aligned */
12453 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12454 {
12455         int ret;
12456
12457         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12458                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12459                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12460                 udelay(40);
12461         }
12462
12463         if (!tg3_flag(tp, NVRAM)) {
12464                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12465         } else {
12466                 u32 grc_mode;
12467
12468                 ret = tg3_nvram_lock(tp);
12469                 if (ret)
12470                         return ret;
12471
12472                 tg3_enable_nvram_access(tp);
12473                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12474                         tw32(NVRAM_WRITE1, 0x406);
12475
12476                 grc_mode = tr32(GRC_MODE);
12477                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12478
12479                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12480                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12481                                 buf);
12482                 } else {
12483                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12484                                 buf);
12485                 }
12486
12487                 grc_mode = tr32(GRC_MODE);
12488                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12489
12490                 tg3_disable_nvram_access(tp);
12491                 tg3_nvram_unlock(tp);
12492         }
12493
12494         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12495                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12496                 udelay(40);
12497         }
12498
12499         return ret;
12500 }
12501
12502 struct subsys_tbl_ent {
12503         u16 subsys_vendor, subsys_devid;
12504         u32 phy_id;
12505 };
12506
12507 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12508         /* Broadcom boards. */
12509         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12510           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12511         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12512           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12513         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12514           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12515         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12516           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12517         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12518           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12519         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12520           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12521         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12522           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12523         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12524           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12525         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12526           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12527         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12528           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12529         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12530           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12531
12532         /* 3com boards. */
12533         { TG3PCI_SUBVENDOR_ID_3COM,
12534           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12535         { TG3PCI_SUBVENDOR_ID_3COM,
12536           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12537         { TG3PCI_SUBVENDOR_ID_3COM,
12538           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12539         { TG3PCI_SUBVENDOR_ID_3COM,
12540           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12541         { TG3PCI_SUBVENDOR_ID_3COM,
12542           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12543
12544         /* DELL boards. */
12545         { TG3PCI_SUBVENDOR_ID_DELL,
12546           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12547         { TG3PCI_SUBVENDOR_ID_DELL,
12548           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12549         { TG3PCI_SUBVENDOR_ID_DELL,
12550           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12551         { TG3PCI_SUBVENDOR_ID_DELL,
12552           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12553
12554         /* Compaq boards. */
12555         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12556           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12557         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12558           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12559         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12560           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12561         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12562           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12563         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12564           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12565
12566         /* IBM boards. */
12567         { TG3PCI_SUBVENDOR_ID_IBM,
12568           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12569 };
12570
12571 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12572 {
12573         int i;
12574
12575         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12576                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12577                      tp->pdev->subsystem_vendor) &&
12578                     (subsys_id_to_phy_id[i].subsys_devid ==
12579                      tp->pdev->subsystem_device))
12580                         return &subsys_id_to_phy_id[i];
12581         }
12582         return NULL;
12583 }
12584
12585 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12586 {
12587         u32 val;
12588         u16 pmcsr;
12589
12590         /* On some early chips the SRAM cannot be accessed in D3hot state,
12591          * so need make sure we're in D0.
12592          */
12593         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12594         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12595         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12596         msleep(1);
12597
12598         /* Make sure register accesses (indirect or otherwise)
12599          * will function correctly.
12600          */
12601         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12602                                tp->misc_host_ctrl);
12603
12604         /* The memory arbiter has to be enabled in order for SRAM accesses
12605          * to succeed.  Normally on powerup the tg3 chip firmware will make
12606          * sure it is enabled, but other entities such as system netboot
12607          * code might disable it.
12608          */
12609         val = tr32(MEMARB_MODE);
12610         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12611
12612         tp->phy_id = TG3_PHY_ID_INVALID;
12613         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12614
12615         /* Assume an onboard device and WOL capable by default.  */
12616         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12617         tg3_flag_set(tp, WOL_CAP);
12618
12619         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12620                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12621                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12622                         tg3_flag_set(tp, IS_NIC);
12623                 }
12624                 val = tr32(VCPU_CFGSHDW);
12625                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12626                         tg3_flag_set(tp, ASPM_WORKAROUND);
12627                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12628                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12629                         tg3_flag_set(tp, WOL_ENABLE);
12630                         device_set_wakeup_enable(&tp->pdev->dev, true);
12631                 }
12632                 goto done;
12633         }
12634
12635         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12636         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12637                 u32 nic_cfg, led_cfg;
12638                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12639                 int eeprom_phy_serdes = 0;
12640
12641                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12642                 tp->nic_sram_data_cfg = nic_cfg;
12643
12644                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12645                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12646                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12647                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12648                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12649                     (ver > 0) && (ver < 0x100))
12650                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12651
12652                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12653                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12654
12655                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12656                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12657                         eeprom_phy_serdes = 1;
12658
12659                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12660                 if (nic_phy_id != 0) {
12661                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12662                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12663
12664                         eeprom_phy_id  = (id1 >> 16) << 10;
12665                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12666                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12667                 } else
12668                         eeprom_phy_id = 0;
12669
12670                 tp->phy_id = eeprom_phy_id;
12671                 if (eeprom_phy_serdes) {
12672                         if (!tg3_flag(tp, 5705_PLUS))
12673                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12674                         else
12675                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12676                 }
12677
12678                 if (tg3_flag(tp, 5750_PLUS))
12679                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12680                                     SHASTA_EXT_LED_MODE_MASK);
12681                 else
12682                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12683
12684                 switch (led_cfg) {
12685                 default:
12686                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12687                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12688                         break;
12689
12690                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12691                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12692                         break;
12693
12694                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12695                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12696
12697                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12698                          * read on some older 5700/5701 bootcode.
12699                          */
12700                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12701                             ASIC_REV_5700 ||
12702                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12703                             ASIC_REV_5701)
12704                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12705
12706                         break;
12707
12708                 case SHASTA_EXT_LED_SHARED:
12709                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12710                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12711                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12712                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12713                                                  LED_CTRL_MODE_PHY_2);
12714                         break;
12715
12716                 case SHASTA_EXT_LED_MAC:
12717                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12718                         break;
12719
12720                 case SHASTA_EXT_LED_COMBO:
12721                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12722                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12723                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12724                                                  LED_CTRL_MODE_PHY_2);
12725                         break;
12726
12727                 }
12728
12729                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12730                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12731                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12732                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12733
12734                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12735                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12736
12737                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12738                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12739                         if ((tp->pdev->subsystem_vendor ==
12740                              PCI_VENDOR_ID_ARIMA) &&
12741                             (tp->pdev->subsystem_device == 0x205a ||
12742                              tp->pdev->subsystem_device == 0x2063))
12743                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12744                 } else {
12745                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12746                         tg3_flag_set(tp, IS_NIC);
12747                 }
12748
12749                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12750                         tg3_flag_set(tp, ENABLE_ASF);
12751                         if (tg3_flag(tp, 5750_PLUS))
12752                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12753                 }
12754
12755                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12756                     tg3_flag(tp, 5750_PLUS))
12757                         tg3_flag_set(tp, ENABLE_APE);
12758
12759                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12760                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12761                         tg3_flag_clear(tp, WOL_CAP);
12762
12763                 if (tg3_flag(tp, WOL_CAP) &&
12764                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12765                         tg3_flag_set(tp, WOL_ENABLE);
12766                         device_set_wakeup_enable(&tp->pdev->dev, true);
12767                 }
12768
12769                 if (cfg2 & (1 << 17))
12770                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12771
12772                 /* serdes signal pre-emphasis in register 0x590 set by */
12773                 /* bootcode if bit 18 is set */
12774                 if (cfg2 & (1 << 18))
12775                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12776
12777                 if ((tg3_flag(tp, 57765_PLUS) ||
12778                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12779                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12780                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12781                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12782
12783                 if (tg3_flag(tp, PCI_EXPRESS) &&
12784                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12785                     !tg3_flag(tp, 57765_PLUS)) {
12786                         u32 cfg3;
12787
12788                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12789                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12790                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12791                 }
12792
12793                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12794                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12795                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12796                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12797                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12798                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12799         }
12800 done:
12801         if (tg3_flag(tp, WOL_CAP))
12802                 device_set_wakeup_enable(&tp->pdev->dev,
12803                                          tg3_flag(tp, WOL_ENABLE));
12804         else
12805                 device_set_wakeup_capable(&tp->pdev->dev, false);
12806 }
12807
12808 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12809 {
12810         int i;
12811         u32 val;
12812
12813         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12814         tw32(OTP_CTRL, cmd);
12815
12816         /* Wait for up to 1 ms for command to execute. */
12817         for (i = 0; i < 100; i++) {
12818                 val = tr32(OTP_STATUS);
12819                 if (val & OTP_STATUS_CMD_DONE)
12820                         break;
12821                 udelay(10);
12822         }
12823
12824         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12825 }
12826
12827 /* Read the gphy configuration from the OTP region of the chip.  The gphy
12828  * configuration is a 32-bit value that straddles the alignment boundary.
12829  * We do two 32-bit reads and then shift and merge the results.
12830  */
12831 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12832 {
12833         u32 bhalf_otp, thalf_otp;
12834
12835         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12836
12837         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12838                 return 0;
12839
12840         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12841
12842         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12843                 return 0;
12844
12845         thalf_otp = tr32(OTP_READ_DATA);
12846
12847         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12848
12849         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12850                 return 0;
12851
12852         bhalf_otp = tr32(OTP_READ_DATA);
12853
12854         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12855 }
12856
12857 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12858 {
12859         u32 adv = ADVERTISED_Autoneg |
12860                   ADVERTISED_Pause;
12861
12862         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12863                 adv |= ADVERTISED_1000baseT_Half |
12864                        ADVERTISED_1000baseT_Full;
12865
12866         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12867                 adv |= ADVERTISED_100baseT_Half |
12868                        ADVERTISED_100baseT_Full |
12869                        ADVERTISED_10baseT_Half |
12870                        ADVERTISED_10baseT_Full |
12871                        ADVERTISED_TP;
12872         else
12873                 adv |= ADVERTISED_FIBRE;
12874
12875         tp->link_config.advertising = adv;
12876         tp->link_config.speed = SPEED_INVALID;
12877         tp->link_config.duplex = DUPLEX_INVALID;
12878         tp->link_config.autoneg = AUTONEG_ENABLE;
12879         tp->link_config.active_speed = SPEED_INVALID;
12880         tp->link_config.active_duplex = DUPLEX_INVALID;
12881         tp->link_config.orig_speed = SPEED_INVALID;
12882         tp->link_config.orig_duplex = DUPLEX_INVALID;
12883         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12884 }
12885
12886 static int __devinit tg3_phy_probe(struct tg3 *tp)
12887 {
12888         u32 hw_phy_id_1, hw_phy_id_2;
12889         u32 hw_phy_id, hw_phy_id_masked;
12890         int err;
12891
12892         /* flow control autonegotiation is default behavior */
12893         tg3_flag_set(tp, PAUSE_AUTONEG);
12894         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12895
12896         if (tg3_flag(tp, USE_PHYLIB))
12897                 return tg3_phy_init(tp);
12898
12899         /* Reading the PHY ID register can conflict with ASF
12900          * firmware access to the PHY hardware.
12901          */
12902         err = 0;
12903         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12904                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12905         } else {
12906                 /* Now read the physical PHY_ID from the chip and verify
12907                  * that it is sane.  If it doesn't look good, we fall back
12908                  * to either the hard-coded table based PHY_ID and failing
12909                  * that the value found in the eeprom area.
12910                  */
12911                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12912                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12913
12914                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
12915                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12916                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
12917
12918                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12919         }
12920
12921         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12922                 tp->phy_id = hw_phy_id;
12923                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12924                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12925                 else
12926                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12927         } else {
12928                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12929                         /* Do nothing, phy ID already set up in
12930                          * tg3_get_eeprom_hw_cfg().
12931                          */
12932                 } else {
12933                         struct subsys_tbl_ent *p;
12934
12935                         /* No eeprom signature?  Try the hardcoded
12936                          * subsys device table.
12937                          */
12938                         p = tg3_lookup_by_subsys(tp);
12939                         if (!p)
12940                                 return -ENODEV;
12941
12942                         tp->phy_id = p->phy_id;
12943                         if (!tp->phy_id ||
12944                             tp->phy_id == TG3_PHY_ID_BCM8002)
12945                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12946                 }
12947         }
12948
12949         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12950             ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12951               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12952              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12953               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12954                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12955
12956         tg3_phy_init_link_config(tp);
12957
12958         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12959             !tg3_flag(tp, ENABLE_APE) &&
12960             !tg3_flag(tp, ENABLE_ASF)) {
12961                 u32 bmsr, adv_reg, tg3_ctrl, mask;
12962
12963                 tg3_readphy(tp, MII_BMSR, &bmsr);
12964                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12965                     (bmsr & BMSR_LSTATUS))
12966                         goto skip_phy_reset;
12967
12968                 err = tg3_phy_reset(tp);
12969                 if (err)
12970                         return err;
12971
12972                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12973                            ADVERTISE_100HALF | ADVERTISE_100FULL |
12974                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12975                 tg3_ctrl = 0;
12976                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
12977                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12978                                     MII_TG3_CTRL_ADV_1000_FULL);
12979                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12980                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12981                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12982                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
12983                 }
12984
12985                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12986                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12987                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12988                 if (!tg3_copper_is_advertising_all(tp, mask)) {
12989                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12990
12991                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12992                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12993
12994                         tg3_writephy(tp, MII_BMCR,
12995                                      BMCR_ANENABLE | BMCR_ANRESTART);
12996                 }
12997                 tg3_phy_set_wirespeed(tp);
12998
12999                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
13000                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13001                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
13002         }
13003
13004 skip_phy_reset:
13005         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13006                 err = tg3_init_5401phy_dsp(tp);
13007                 if (err)
13008                         return err;
13009
13010                 err = tg3_init_5401phy_dsp(tp);
13011         }
13012
13013         return err;
13014 }
13015
13016 static void __devinit tg3_read_vpd(struct tg3 *tp)
13017 {
13018         u8 *vpd_data;
13019         unsigned int block_end, rosize, len;
13020         int j, i = 0;
13021
13022         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13023         if (!vpd_data)
13024                 goto out_no_vpd;
13025
13026         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13027                              PCI_VPD_LRDT_RO_DATA);
13028         if (i < 0)
13029                 goto out_not_found;
13030
13031         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13032         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13033         i += PCI_VPD_LRDT_TAG_SIZE;
13034
13035         if (block_end > TG3_NVM_VPD_LEN)
13036                 goto out_not_found;
13037
13038         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13039                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13040         if (j > 0) {
13041                 len = pci_vpd_info_field_size(&vpd_data[j]);
13042
13043                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13044                 if (j + len > block_end || len != 4 ||
13045                     memcmp(&vpd_data[j], "1028", 4))
13046                         goto partno;
13047
13048                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13049                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13050                 if (j < 0)
13051                         goto partno;
13052
13053                 len = pci_vpd_info_field_size(&vpd_data[j]);
13054
13055                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13056                 if (j + len > block_end)
13057                         goto partno;
13058
13059                 memcpy(tp->fw_ver, &vpd_data[j], len);
13060                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13061         }
13062
13063 partno:
13064         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13065                                       PCI_VPD_RO_KEYWORD_PARTNO);
13066         if (i < 0)
13067                 goto out_not_found;
13068
13069         len = pci_vpd_info_field_size(&vpd_data[i]);
13070
13071         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13072         if (len > TG3_BPN_SIZE ||
13073             (len + i) > TG3_NVM_VPD_LEN)
13074                 goto out_not_found;
13075
13076         memcpy(tp->board_part_number, &vpd_data[i], len);
13077
13078 out_not_found:
13079         kfree(vpd_data);
13080         if (tp->board_part_number[0])
13081                 return;
13082
13083 out_no_vpd:
13084         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13085                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13086                         strcpy(tp->board_part_number, "BCM5717");
13087                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13088                         strcpy(tp->board_part_number, "BCM5718");
13089                 else
13090                         goto nomatch;
13091         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13092                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13093                         strcpy(tp->board_part_number, "BCM57780");
13094                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13095                         strcpy(tp->board_part_number, "BCM57760");
13096                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13097                         strcpy(tp->board_part_number, "BCM57790");
13098                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13099                         strcpy(tp->board_part_number, "BCM57788");
13100                 else
13101                         goto nomatch;
13102         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13103                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13104                         strcpy(tp->board_part_number, "BCM57761");
13105                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13106                         strcpy(tp->board_part_number, "BCM57765");
13107                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13108                         strcpy(tp->board_part_number, "BCM57781");
13109                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13110                         strcpy(tp->board_part_number, "BCM57785");
13111                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13112                         strcpy(tp->board_part_number, "BCM57791");
13113                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13114                         strcpy(tp->board_part_number, "BCM57795");
13115                 else
13116                         goto nomatch;
13117         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13118                 strcpy(tp->board_part_number, "BCM95906");
13119         } else {
13120 nomatch:
13121                 strcpy(tp->board_part_number, "none");
13122         }
13123 }
13124
13125 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13126 {
13127         u32 val;
13128
13129         if (tg3_nvram_read(tp, offset, &val) ||
13130             (val & 0xfc000000) != 0x0c000000 ||
13131             tg3_nvram_read(tp, offset + 4, &val) ||
13132             val != 0)
13133                 return 0;
13134
13135         return 1;
13136 }
13137
13138 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13139 {
13140         u32 val, offset, start, ver_offset;
13141         int i, dst_off;
13142         bool newver = false;
13143
13144         if (tg3_nvram_read(tp, 0xc, &offset) ||
13145             tg3_nvram_read(tp, 0x4, &start))
13146                 return;
13147
13148         offset = tg3_nvram_logical_addr(tp, offset);
13149
13150         if (tg3_nvram_read(tp, offset, &val))
13151                 return;
13152
13153         if ((val & 0xfc000000) == 0x0c000000) {
13154                 if (tg3_nvram_read(tp, offset + 4, &val))
13155                         return;
13156
13157                 if (val == 0)
13158                         newver = true;
13159         }
13160
13161         dst_off = strlen(tp->fw_ver);
13162
13163         if (newver) {
13164                 if (TG3_VER_SIZE - dst_off < 16 ||
13165                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13166                         return;
13167
13168                 offset = offset + ver_offset - start;
13169                 for (i = 0; i < 16; i += 4) {
13170                         __be32 v;
13171                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13172                                 return;
13173
13174                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13175                 }
13176         } else {
13177                 u32 major, minor;
13178
13179                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13180                         return;
13181
13182                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13183                         TG3_NVM_BCVER_MAJSFT;
13184                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13185                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13186                          "v%d.%02d", major, minor);
13187         }
13188 }
13189
13190 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13191 {
13192         u32 val, major, minor;
13193
13194         /* Use native endian representation */
13195         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13196                 return;
13197
13198         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13199                 TG3_NVM_HWSB_CFG1_MAJSFT;
13200         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13201                 TG3_NVM_HWSB_CFG1_MINSFT;
13202
13203         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13204 }
13205
13206 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13207 {
13208         u32 offset, major, minor, build;
13209
13210         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13211
13212         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13213                 return;
13214
13215         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13216         case TG3_EEPROM_SB_REVISION_0:
13217                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13218                 break;
13219         case TG3_EEPROM_SB_REVISION_2:
13220                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13221                 break;
13222         case TG3_EEPROM_SB_REVISION_3:
13223                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13224                 break;
13225         case TG3_EEPROM_SB_REVISION_4:
13226                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13227                 break;
13228         case TG3_EEPROM_SB_REVISION_5:
13229                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13230                 break;
13231         case TG3_EEPROM_SB_REVISION_6:
13232                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13233                 break;
13234         default:
13235                 return;
13236         }
13237
13238         if (tg3_nvram_read(tp, offset, &val))
13239                 return;
13240
13241         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13242                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13243         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13244                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13245         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13246
13247         if (minor > 99 || build > 26)
13248                 return;
13249
13250         offset = strlen(tp->fw_ver);
13251         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13252                  " v%d.%02d", major, minor);
13253
13254         if (build > 0) {
13255                 offset = strlen(tp->fw_ver);
13256                 if (offset < TG3_VER_SIZE - 1)
13257                         tp->fw_ver[offset] = 'a' + build - 1;
13258         }
13259 }
13260
13261 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13262 {
13263         u32 val, offset, start;
13264         int i, vlen;
13265
13266         for (offset = TG3_NVM_DIR_START;
13267              offset < TG3_NVM_DIR_END;
13268              offset += TG3_NVM_DIRENT_SIZE) {
13269                 if (tg3_nvram_read(tp, offset, &val))
13270                         return;
13271
13272                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13273                         break;
13274         }
13275
13276         if (offset == TG3_NVM_DIR_END)
13277                 return;
13278
13279         if (!tg3_flag(tp, 5705_PLUS))
13280                 start = 0x08000000;
13281         else if (tg3_nvram_read(tp, offset - 4, &start))
13282                 return;
13283
13284         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13285             !tg3_fw_img_is_valid(tp, offset) ||
13286             tg3_nvram_read(tp, offset + 8, &val))
13287                 return;
13288
13289         offset += val - start;
13290
13291         vlen = strlen(tp->fw_ver);
13292
13293         tp->fw_ver[vlen++] = ',';
13294         tp->fw_ver[vlen++] = ' ';
13295
13296         for (i = 0; i < 4; i++) {
13297                 __be32 v;
13298                 if (tg3_nvram_read_be32(tp, offset, &v))
13299                         return;
13300
13301                 offset += sizeof(v);
13302
13303                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13304                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13305                         break;
13306                 }
13307
13308                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13309                 vlen += sizeof(v);
13310         }
13311 }
13312
13313 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13314 {
13315         int vlen;
13316         u32 apedata;
13317         char *fwtype;
13318
13319         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13320                 return;
13321
13322         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13323         if (apedata != APE_SEG_SIG_MAGIC)
13324                 return;
13325
13326         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13327         if (!(apedata & APE_FW_STATUS_READY))
13328                 return;
13329
13330         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13331
13332         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13333                 tg3_flag_set(tp, APE_HAS_NCSI);
13334                 fwtype = "NCSI";
13335         } else {
13336                 fwtype = "DASH";
13337         }
13338
13339         vlen = strlen(tp->fw_ver);
13340
13341         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13342                  fwtype,
13343                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13344                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13345                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13346                  (apedata & APE_FW_VERSION_BLDMSK));
13347 }
13348
13349 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13350 {
13351         u32 val;
13352         bool vpd_vers = false;
13353
13354         if (tp->fw_ver[0] != 0)
13355                 vpd_vers = true;
13356
13357         if (tg3_flag(tp, NO_NVRAM)) {
13358                 strcat(tp->fw_ver, "sb");
13359                 return;
13360         }
13361
13362         if (tg3_nvram_read(tp, 0, &val))
13363                 return;
13364
13365         if (val == TG3_EEPROM_MAGIC)
13366                 tg3_read_bc_ver(tp);
13367         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13368                 tg3_read_sb_ver(tp, val);
13369         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13370                 tg3_read_hwsb_ver(tp);
13371         else
13372                 return;
13373
13374         if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13375                 goto done;
13376
13377         tg3_read_mgmtfw_ver(tp);
13378
13379 done:
13380         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13381 }
13382
13383 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13384
13385 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13386 {
13387         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13388                 return TG3_RX_RET_MAX_SIZE_5717;
13389         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13390                 return TG3_RX_RET_MAX_SIZE_5700;
13391         else
13392                 return TG3_RX_RET_MAX_SIZE_5705;
13393 }
13394
13395 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13396         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13397         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13398         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13399         { },
13400 };
13401
13402 static int __devinit tg3_get_invariants(struct tg3 *tp)
13403 {
13404         u32 misc_ctrl_reg;
13405         u32 pci_state_reg, grc_misc_cfg;
13406         u32 val;
13407         u16 pci_cmd;
13408         int err;
13409
13410         /* Force memory write invalidate off.  If we leave it on,
13411          * then on 5700_BX chips we have to enable a workaround.
13412          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13413          * to match the cacheline size.  The Broadcom driver have this
13414          * workaround but turns MWI off all the times so never uses
13415          * it.  This seems to suggest that the workaround is insufficient.
13416          */
13417         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13418         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13419         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13420
13421         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13422          * has the register indirect write enable bit set before
13423          * we try to access any of the MMIO registers.  It is also
13424          * critical that the PCI-X hw workaround situation is decided
13425          * before that as well.
13426          */
13427         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13428                               &misc_ctrl_reg);
13429
13430         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13431                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13432         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13433                 u32 prod_id_asic_rev;
13434
13435                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13436                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13437                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13438                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13439                         pci_read_config_dword(tp->pdev,
13440                                               TG3PCI_GEN2_PRODID_ASICREV,
13441                                               &prod_id_asic_rev);
13442                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13443                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13444                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13445                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13446                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13447                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13448                         pci_read_config_dword(tp->pdev,
13449                                               TG3PCI_GEN15_PRODID_ASICREV,
13450                                               &prod_id_asic_rev);
13451                 else
13452                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13453                                               &prod_id_asic_rev);
13454
13455                 tp->pci_chip_rev_id = prod_id_asic_rev;
13456         }
13457
13458         /* Wrong chip ID in 5752 A0. This code can be removed later
13459          * as A0 is not in production.
13460          */
13461         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13462                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13463
13464         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13465          * we need to disable memory and use config. cycles
13466          * only to access all registers. The 5702/03 chips
13467          * can mistakenly decode the special cycles from the
13468          * ICH chipsets as memory write cycles, causing corruption
13469          * of register and memory space. Only certain ICH bridges
13470          * will drive special cycles with non-zero data during the
13471          * address phase which can fall within the 5703's address
13472          * range. This is not an ICH bug as the PCI spec allows
13473          * non-zero address during special cycles. However, only
13474          * these ICH bridges are known to drive non-zero addresses
13475          * during special cycles.
13476          *
13477          * Since special cycles do not cross PCI bridges, we only
13478          * enable this workaround if the 5703 is on the secondary
13479          * bus of these ICH bridges.
13480          */
13481         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13482             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13483                 static struct tg3_dev_id {
13484                         u32     vendor;
13485                         u32     device;
13486                         u32     rev;
13487                 } ich_chipsets[] = {
13488                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13489                           PCI_ANY_ID },
13490                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13491                           PCI_ANY_ID },
13492                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13493                           0xa },
13494                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13495                           PCI_ANY_ID },
13496                         { },
13497                 };
13498                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13499                 struct pci_dev *bridge = NULL;
13500
13501                 while (pci_id->vendor != 0) {
13502                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13503                                                 bridge);
13504                         if (!bridge) {
13505                                 pci_id++;
13506                                 continue;
13507                         }
13508                         if (pci_id->rev != PCI_ANY_ID) {
13509                                 if (bridge->revision > pci_id->rev)
13510                                         continue;
13511                         }
13512                         if (bridge->subordinate &&
13513                             (bridge->subordinate->number ==
13514                              tp->pdev->bus->number)) {
13515                                 tg3_flag_set(tp, ICH_WORKAROUND);
13516                                 pci_dev_put(bridge);
13517                                 break;
13518                         }
13519                 }
13520         }
13521
13522         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13523                 static struct tg3_dev_id {
13524                         u32     vendor;
13525                         u32     device;
13526                 } bridge_chipsets[] = {
13527                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13528                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13529                         { },
13530                 };
13531                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13532                 struct pci_dev *bridge = NULL;
13533
13534                 while (pci_id->vendor != 0) {
13535                         bridge = pci_get_device(pci_id->vendor,
13536                                                 pci_id->device,
13537                                                 bridge);
13538                         if (!bridge) {
13539                                 pci_id++;
13540                                 continue;
13541                         }
13542                         if (bridge->subordinate &&
13543                             (bridge->subordinate->number <=
13544                              tp->pdev->bus->number) &&
13545                             (bridge->subordinate->subordinate >=
13546                              tp->pdev->bus->number)) {
13547                                 tg3_flag_set(tp, 5701_DMA_BUG);
13548                                 pci_dev_put(bridge);
13549                                 break;
13550                         }
13551                 }
13552         }
13553
13554         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13555          * DMA addresses > 40-bit. This bridge may have other additional
13556          * 57xx devices behind it in some 4-port NIC designs for example.
13557          * Any tg3 device found behind the bridge will also need the 40-bit
13558          * DMA workaround.
13559          */
13560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13561             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13562                 tg3_flag_set(tp, 5780_CLASS);
13563                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13564                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13565         } else {
13566                 struct pci_dev *bridge = NULL;
13567
13568                 do {
13569                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13570                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13571                                                 bridge);
13572                         if (bridge && bridge->subordinate &&
13573                             (bridge->subordinate->number <=
13574                              tp->pdev->bus->number) &&
13575                             (bridge->subordinate->subordinate >=
13576                              tp->pdev->bus->number)) {
13577                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13578                                 pci_dev_put(bridge);
13579                                 break;
13580                         }
13581                 } while (bridge);
13582         }
13583
13584         /* Initialize misc host control in PCI block. */
13585         tp->misc_host_ctrl |= (misc_ctrl_reg &
13586                                MISC_HOST_CTRL_CHIPREV);
13587         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13588                                tp->misc_host_ctrl);
13589
13590         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13591             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13592             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13594                 tp->pdev_peer = tg3_find_peer(tp);
13595
13596         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13597             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13598             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13599                 tg3_flag_set(tp, 5717_PLUS);
13600
13601         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13602             tg3_flag(tp, 5717_PLUS))
13603                 tg3_flag_set(tp, 57765_PLUS);
13604
13605         /* Intentionally exclude ASIC_REV_5906 */
13606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13607             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13608             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13609             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13610             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13611             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13612             tg3_flag(tp, 57765_PLUS))
13613                 tg3_flag_set(tp, 5755_PLUS);
13614
13615         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13616             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13617             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13618             tg3_flag(tp, 5755_PLUS) ||
13619             tg3_flag(tp, 5780_CLASS))
13620                 tg3_flag_set(tp, 5750_PLUS);
13621
13622         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13623             tg3_flag(tp, 5750_PLUS))
13624                 tg3_flag_set(tp, 5705_PLUS);
13625
13626         /* 5700 B0 chips do not support checksumming correctly due
13627          * to hardware bugs.
13628          */
13629         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
13630                 u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
13631
13632                 if (tg3_flag(tp, 5755_PLUS))
13633                         features |= NETIF_F_IPV6_CSUM;
13634                 tp->dev->features |= features;
13635                 tp->dev->hw_features |= features;
13636                 tp->dev->vlan_features |= features;
13637         }
13638
13639         /* Determine TSO capabilities */
13640         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13641                 ; /* Do nothing. HW bug. */
13642         else if (tg3_flag(tp, 57765_PLUS))
13643                 tg3_flag_set(tp, HW_TSO_3);
13644         else if (tg3_flag(tp, 5755_PLUS) ||
13645                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13646                 tg3_flag_set(tp, HW_TSO_2);
13647         else if (tg3_flag(tp, 5750_PLUS)) {
13648                 tg3_flag_set(tp, HW_TSO_1);
13649                 tg3_flag_set(tp, TSO_BUG);
13650                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13651                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13652                         tg3_flag_clear(tp, TSO_BUG);
13653         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13654                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13655                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13656                         tg3_flag_set(tp, TSO_BUG);
13657                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13658                         tp->fw_needed = FIRMWARE_TG3TSO5;
13659                 else
13660                         tp->fw_needed = FIRMWARE_TG3TSO;
13661         }
13662
13663         tp->irq_max = 1;
13664
13665         if (tg3_flag(tp, 5750_PLUS)) {
13666                 tg3_flag_set(tp, SUPPORT_MSI);
13667                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13668                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13669                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13670                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13671                      tp->pdev_peer == tp->pdev))
13672                         tg3_flag_clear(tp, SUPPORT_MSI);
13673
13674                 if (tg3_flag(tp, 5755_PLUS) ||
13675                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13676                         tg3_flag_set(tp, 1SHOT_MSI);
13677                 }
13678
13679                 if (tg3_flag(tp, 57765_PLUS)) {
13680                         tg3_flag_set(tp, SUPPORT_MSIX);
13681                         tp->irq_max = TG3_IRQ_MAX_VECS;
13682                 }
13683         }
13684
13685         /* All chips can get confused if TX buffers
13686          * straddle the 4GB address boundary.
13687          */
13688         tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13689
13690         if (tg3_flag(tp, 5755_PLUS))
13691                 tg3_flag_set(tp, SHORT_DMA_BUG);
13692         else
13693                 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13694
13695         if (tg3_flag(tp, 5717_PLUS))
13696                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13697
13698         if (tg3_flag(tp, 57765_PLUS) &&
13699             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13700                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13701
13702         if (!tg3_flag(tp, 5705_PLUS) ||
13703             tg3_flag(tp, 5780_CLASS) ||
13704             tg3_flag(tp, USE_JUMBO_BDFLAG))
13705                 tg3_flag_set(tp, JUMBO_CAPABLE);
13706
13707         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13708                               &pci_state_reg);
13709
13710         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13711         if (tp->pcie_cap != 0) {
13712                 u16 lnkctl;
13713
13714                 tg3_flag_set(tp, PCI_EXPRESS);
13715
13716                 tp->pcie_readrq = 4096;
13717                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13718                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13719                         tp->pcie_readrq = 2048;
13720
13721                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13722
13723                 pci_read_config_word(tp->pdev,
13724                                      tp->pcie_cap + PCI_EXP_LNKCTL,
13725                                      &lnkctl);
13726                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13727                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13728                                 tg3_flag_clear(tp, HW_TSO_2);
13729                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13730                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13731                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13732                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13733                                 tg3_flag_set(tp, CLKREQ_BUG);
13734                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13735                         tg3_flag_set(tp, L1PLLPD_EN);
13736                 }
13737         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13738                 tg3_flag_set(tp, PCI_EXPRESS);
13739         } else if (!tg3_flag(tp, 5705_PLUS) ||
13740                    tg3_flag(tp, 5780_CLASS)) {
13741                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13742                 if (!tp->pcix_cap) {
13743                         dev_err(&tp->pdev->dev,
13744                                 "Cannot find PCI-X capability, aborting\n");
13745                         return -EIO;
13746                 }
13747
13748                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13749                         tg3_flag_set(tp, PCIX_MODE);
13750         }
13751
13752         /* If we have an AMD 762 or VIA K8T800 chipset, write
13753          * reordering to the mailbox registers done by the host
13754          * controller can cause major troubles.  We read back from
13755          * every mailbox register write to force the writes to be
13756          * posted to the chip in order.
13757          */
13758         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13759             !tg3_flag(tp, PCI_EXPRESS))
13760                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13761
13762         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13763                              &tp->pci_cacheline_sz);
13764         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13765                              &tp->pci_lat_timer);
13766         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13767             tp->pci_lat_timer < 64) {
13768                 tp->pci_lat_timer = 64;
13769                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13770                                       tp->pci_lat_timer);
13771         }
13772
13773         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13774                 /* 5700 BX chips need to have their TX producer index
13775                  * mailboxes written twice to workaround a bug.
13776                  */
13777                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13778
13779                 /* If we are in PCI-X mode, enable register write workaround.
13780                  *
13781                  * The workaround is to use indirect register accesses
13782                  * for all chip writes not to mailbox registers.
13783                  */
13784                 if (tg3_flag(tp, PCIX_MODE)) {
13785                         u32 pm_reg;
13786
13787                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13788
13789                         /* The chip can have it's power management PCI config
13790                          * space registers clobbered due to this bug.
13791                          * So explicitly force the chip into D0 here.
13792                          */
13793                         pci_read_config_dword(tp->pdev,
13794                                               tp->pm_cap + PCI_PM_CTRL,
13795                                               &pm_reg);
13796                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13797                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13798                         pci_write_config_dword(tp->pdev,
13799                                                tp->pm_cap + PCI_PM_CTRL,
13800                                                pm_reg);
13801
13802                         /* Also, force SERR#/PERR# in PCI command. */
13803                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13804                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13805                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13806                 }
13807         }
13808
13809         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13810                 tg3_flag_set(tp, PCI_HIGH_SPEED);
13811         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13812                 tg3_flag_set(tp, PCI_32BIT);
13813
13814         /* Chip-specific fixup from Broadcom driver */
13815         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13816             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13817                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13818                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13819         }
13820
13821         /* Default fast path register access methods */
13822         tp->read32 = tg3_read32;
13823         tp->write32 = tg3_write32;
13824         tp->read32_mbox = tg3_read32;
13825         tp->write32_mbox = tg3_write32;
13826         tp->write32_tx_mbox = tg3_write32;
13827         tp->write32_rx_mbox = tg3_write32;
13828
13829         /* Various workaround register access methods */
13830         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13831                 tp->write32 = tg3_write_indirect_reg32;
13832         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13833                  (tg3_flag(tp, PCI_EXPRESS) &&
13834                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13835                 /*
13836                  * Back to back register writes can cause problems on these
13837                  * chips, the workaround is to read back all reg writes
13838                  * except those to mailbox regs.
13839                  *
13840                  * See tg3_write_indirect_reg32().
13841                  */
13842                 tp->write32 = tg3_write_flush_reg32;
13843         }
13844
13845         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13846                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13847                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13848                         tp->write32_rx_mbox = tg3_write_flush_reg32;
13849         }
13850
13851         if (tg3_flag(tp, ICH_WORKAROUND)) {
13852                 tp->read32 = tg3_read_indirect_reg32;
13853                 tp->write32 = tg3_write_indirect_reg32;
13854                 tp->read32_mbox = tg3_read_indirect_mbox;
13855                 tp->write32_mbox = tg3_write_indirect_mbox;
13856                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13857                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13858
13859                 iounmap(tp->regs);
13860                 tp->regs = NULL;
13861
13862                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13863                 pci_cmd &= ~PCI_COMMAND_MEMORY;
13864                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13865         }
13866         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13867                 tp->read32_mbox = tg3_read32_mbox_5906;
13868                 tp->write32_mbox = tg3_write32_mbox_5906;
13869                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13870                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13871         }
13872
13873         if (tp->write32 == tg3_write_indirect_reg32 ||
13874             (tg3_flag(tp, PCIX_MODE) &&
13875              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13876               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13877                 tg3_flag_set(tp, SRAM_USE_CONFIG);
13878
13879         /* Get eeprom hw config before calling tg3_set_power_state().
13880          * In particular, the TG3_FLAG_IS_NIC flag must be
13881          * determined before calling tg3_set_power_state() so that
13882          * we know whether or not to switch out of Vaux power.
13883          * When the flag is set, it means that GPIO1 is used for eeprom
13884          * write protect and also implies that it is a LOM where GPIOs
13885          * are not used to switch power.
13886          */
13887         tg3_get_eeprom_hw_cfg(tp);
13888
13889         if (tg3_flag(tp, ENABLE_APE)) {
13890                 /* Allow reads and writes to the
13891                  * APE register and memory space.
13892                  */
13893                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13894                                  PCISTATE_ALLOW_APE_SHMEM_WR |
13895                                  PCISTATE_ALLOW_APE_PSPACE_WR;
13896                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13897                                        pci_state_reg);
13898         }
13899
13900         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13901             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13902             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13903             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13904             tg3_flag(tp, 57765_PLUS))
13905                 tg3_flag_set(tp, CPMU_PRESENT);
13906
13907         /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13908          * GPIO1 driven high will bring 5700's external PHY out of reset.
13909          * It is also used as eeprom write protect on LOMs.
13910          */
13911         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13912         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13913             tg3_flag(tp, EEPROM_WRITE_PROT))
13914                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13915                                        GRC_LCLCTRL_GPIO_OUTPUT1);
13916         /* Unused GPIO3 must be driven as output on 5752 because there
13917          * are no pull-up resistors on unused GPIO pins.
13918          */
13919         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13920                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13921
13922         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13923             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13924             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13925                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13926
13927         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13928             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13929                 /* Turn off the debug UART. */
13930                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13931                 if (tg3_flag(tp, IS_NIC))
13932                         /* Keep VMain power. */
13933                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13934                                               GRC_LCLCTRL_GPIO_OUTPUT0;
13935         }
13936
13937         /* Force the chip into D0. */
13938         err = tg3_power_up(tp);
13939         if (err) {
13940                 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13941                 return err;
13942         }
13943
13944         /* Derive initial jumbo mode from MTU assigned in
13945          * ether_setup() via the alloc_etherdev() call
13946          */
13947         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13948                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13949
13950         /* Determine WakeOnLan speed to use. */
13951         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13952             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13953             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13954             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13955                 tg3_flag_clear(tp, WOL_SPEED_100MB);
13956         } else {
13957                 tg3_flag_set(tp, WOL_SPEED_100MB);
13958         }
13959
13960         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13961                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13962
13963         /* A few boards don't want Ethernet@WireSpeed phy feature */
13964         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13965             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13966              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13967              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13968             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13969             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13970                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13971
13972         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13973             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13974                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13975         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13976                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13977
13978         if (tg3_flag(tp, 5705_PLUS) &&
13979             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13980             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13981             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13982             !tg3_flag(tp, 57765_PLUS)) {
13983                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13984                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13985                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13986                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13987                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13988                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13989                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13990                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13991                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13992                 } else
13993                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13994         }
13995
13996         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13997             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13998                 tp->phy_otp = tg3_read_otp_phycfg(tp);
13999                 if (tp->phy_otp == 0)
14000                         tp->phy_otp = TG3_OTP_DEFAULT;
14001         }
14002
14003         if (tg3_flag(tp, CPMU_PRESENT))
14004                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14005         else
14006                 tp->mi_mode = MAC_MI_MODE_BASE;
14007
14008         tp->coalesce_mode = 0;
14009         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14010             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14011                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14012
14013         /* Set these bits to enable statistics workaround. */
14014         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14015             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14016             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14017                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14018                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14019         }
14020
14021         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14022             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14023                 tg3_flag_set(tp, USE_PHYLIB);
14024
14025         err = tg3_mdio_init(tp);
14026         if (err)
14027                 return err;
14028
14029         /* Initialize data/descriptor byte/word swapping. */
14030         val = tr32(GRC_MODE);
14031         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14032                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14033                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14034                         GRC_MODE_B2HRX_ENABLE |
14035                         GRC_MODE_HTX2B_ENABLE |
14036                         GRC_MODE_HOST_STACKUP);
14037         else
14038                 val &= GRC_MODE_HOST_STACKUP;
14039
14040         tw32(GRC_MODE, val | tp->grc_mode);
14041
14042         tg3_switch_clocks(tp);
14043
14044         /* Clear this out for sanity. */
14045         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14046
14047         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14048                               &pci_state_reg);
14049         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14050             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14051                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14052
14053                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14054                     chiprevid == CHIPREV_ID_5701_B0 ||
14055                     chiprevid == CHIPREV_ID_5701_B2 ||
14056                     chiprevid == CHIPREV_ID_5701_B5) {
14057                         void __iomem *sram_base;
14058
14059                         /* Write some dummy words into the SRAM status block
14060                          * area, see if it reads back correctly.  If the return
14061                          * value is bad, force enable the PCIX workaround.
14062                          */
14063                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14064
14065                         writel(0x00000000, sram_base);
14066                         writel(0x00000000, sram_base + 4);
14067                         writel(0xffffffff, sram_base + 4);
14068                         if (readl(sram_base) != 0x00000000)
14069                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14070                 }
14071         }
14072
14073         udelay(50);
14074         tg3_nvram_init(tp);
14075
14076         grc_misc_cfg = tr32(GRC_MISC_CFG);
14077         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14078
14079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14080             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14081              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14082                 tg3_flag_set(tp, IS_5788);
14083
14084         if (!tg3_flag(tp, IS_5788) &&
14085             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
14086                 tg3_flag_set(tp, TAGGED_STATUS);
14087         if (tg3_flag(tp, TAGGED_STATUS)) {
14088                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14089                                       HOSTCC_MODE_CLRTICK_TXBD);
14090
14091                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14092                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14093                                        tp->misc_host_ctrl);
14094         }
14095
14096         /* Preserve the APE MAC_MODE bits */
14097         if (tg3_flag(tp, ENABLE_APE))
14098                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14099         else
14100                 tp->mac_mode = TG3_DEF_MAC_MODE;
14101
14102         /* these are limited to 10/100 only */
14103         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14104              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14105             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14106              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14107              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14108               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14109               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14110             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14111              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14112               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14113               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14114             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14115             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14116             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14117             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14118                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14119
14120         err = tg3_phy_probe(tp);
14121         if (err) {
14122                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14123                 /* ... but do not return immediately ... */
14124                 tg3_mdio_fini(tp);
14125         }
14126
14127         tg3_read_vpd(tp);
14128         tg3_read_fw_ver(tp);
14129
14130         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14131                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14132         } else {
14133                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14134                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14135                 else
14136                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14137         }
14138
14139         /* 5700 {AX,BX} chips have a broken status block link
14140          * change bit implementation, so we must use the
14141          * status register in those cases.
14142          */
14143         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14144                 tg3_flag_set(tp, USE_LINKCHG_REG);
14145         else
14146                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14147
14148         /* The led_ctrl is set during tg3_phy_probe, here we might
14149          * have to force the link status polling mechanism based
14150          * upon subsystem IDs.
14151          */
14152         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14153             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14154             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14155                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14156                 tg3_flag_set(tp, USE_LINKCHG_REG);
14157         }
14158
14159         /* For all SERDES we poll the MAC status register. */
14160         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14161                 tg3_flag_set(tp, POLL_SERDES);
14162         else
14163                 tg3_flag_clear(tp, POLL_SERDES);
14164
14165         tp->rx_offset = NET_IP_ALIGN;
14166         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14167         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14168             tg3_flag(tp, PCIX_MODE)) {
14169                 tp->rx_offset = 0;
14170 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14171                 tp->rx_copy_thresh = ~(u16)0;
14172 #endif
14173         }
14174
14175         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14176         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14177         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14178
14179         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14180
14181         /* Increment the rx prod index on the rx std ring by at most
14182          * 8 for these chips to workaround hw errata.
14183          */
14184         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14185             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14186             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14187                 tp->rx_std_max_post = 8;
14188
14189         if (tg3_flag(tp, ASPM_WORKAROUND))
14190                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14191                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14192
14193         return err;
14194 }
14195
14196 #ifdef CONFIG_SPARC
14197 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14198 {
14199         struct net_device *dev = tp->dev;
14200         struct pci_dev *pdev = tp->pdev;
14201         struct device_node *dp = pci_device_to_OF_node(pdev);
14202         const unsigned char *addr;
14203         int len;
14204
14205         addr = of_get_property(dp, "local-mac-address", &len);
14206         if (addr && len == 6) {
14207                 memcpy(dev->dev_addr, addr, 6);
14208                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14209                 return 0;
14210         }
14211         return -ENODEV;
14212 }
14213
14214 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14215 {
14216         struct net_device *dev = tp->dev;
14217
14218         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14219         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14220         return 0;
14221 }
14222 #endif
14223
14224 static int __devinit tg3_get_device_address(struct tg3 *tp)
14225 {
14226         struct net_device *dev = tp->dev;
14227         u32 hi, lo, mac_offset;
14228         int addr_ok = 0;
14229
14230 #ifdef CONFIG_SPARC
14231         if (!tg3_get_macaddr_sparc(tp))
14232                 return 0;
14233 #endif
14234
14235         mac_offset = 0x7c;
14236         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
14237             tg3_flag(tp, 5780_CLASS)) {
14238                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14239                         mac_offset = 0xcc;
14240                 if (tg3_nvram_lock(tp))
14241                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14242                 else
14243                         tg3_nvram_unlock(tp);
14244         } else if (tg3_flag(tp, 5717_PLUS)) {
14245                 if (PCI_FUNC(tp->pdev->devfn) & 1)
14246                         mac_offset = 0xcc;
14247                 if (PCI_FUNC(tp->pdev->devfn) > 1)
14248                         mac_offset += 0x18c;
14249         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14250                 mac_offset = 0x10;
14251
14252         /* First try to get it from MAC address mailbox. */
14253         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14254         if ((hi >> 16) == 0x484b) {
14255                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14256                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14257
14258                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14259                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14260                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14261                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14262                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14263
14264                 /* Some old bootcode may report a 0 MAC address in SRAM */
14265                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14266         }
14267         if (!addr_ok) {
14268                 /* Next, try NVRAM. */
14269                 if (!tg3_flag(tp, NO_NVRAM) &&
14270                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14271                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14272                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14273                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14274                 }
14275                 /* Finally just fetch it out of the MAC control regs. */
14276                 else {
14277                         hi = tr32(MAC_ADDR_0_HIGH);
14278                         lo = tr32(MAC_ADDR_0_LOW);
14279
14280                         dev->dev_addr[5] = lo & 0xff;
14281                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14282                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14283                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14284                         dev->dev_addr[1] = hi & 0xff;
14285                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14286                 }
14287         }
14288
14289         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14290 #ifdef CONFIG_SPARC
14291                 if (!tg3_get_default_macaddr_sparc(tp))
14292                         return 0;
14293 #endif
14294                 return -EINVAL;
14295         }
14296         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14297         return 0;
14298 }
14299
14300 #define BOUNDARY_SINGLE_CACHELINE       1
14301 #define BOUNDARY_MULTI_CACHELINE        2
14302
14303 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14304 {
14305         int cacheline_size;
14306         u8 byte;
14307         int goal;
14308
14309         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14310         if (byte == 0)
14311                 cacheline_size = 1024;
14312         else
14313                 cacheline_size = (int) byte * 4;
14314
14315         /* On 5703 and later chips, the boundary bits have no
14316          * effect.
14317          */
14318         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14319             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14320             !tg3_flag(tp, PCI_EXPRESS))
14321                 goto out;
14322
14323 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14324         goal = BOUNDARY_MULTI_CACHELINE;
14325 #else
14326 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14327         goal = BOUNDARY_SINGLE_CACHELINE;
14328 #else
14329         goal = 0;
14330 #endif
14331 #endif
14332
14333         if (tg3_flag(tp, 57765_PLUS)) {
14334                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14335                 goto out;
14336         }
14337
14338         if (!goal)
14339                 goto out;
14340
14341         /* PCI controllers on most RISC systems tend to disconnect
14342          * when a device tries to burst across a cache-line boundary.
14343          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14344          *
14345          * Unfortunately, for PCI-E there are only limited
14346          * write-side controls for this, and thus for reads
14347          * we will still get the disconnects.  We'll also waste
14348          * these PCI cycles for both read and write for chips
14349          * other than 5700 and 5701 which do not implement the
14350          * boundary bits.
14351          */
14352         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14353                 switch (cacheline_size) {
14354                 case 16:
14355                 case 32:
14356                 case 64:
14357                 case 128:
14358                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14359                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14360                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14361                         } else {
14362                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14363                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14364                         }
14365                         break;
14366
14367                 case 256:
14368                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14369                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14370                         break;
14371
14372                 default:
14373                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14374                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14375                         break;
14376                 }
14377         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14378                 switch (cacheline_size) {
14379                 case 16:
14380                 case 32:
14381                 case 64:
14382                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14383                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14384                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14385                                 break;
14386                         }
14387                         /* fallthrough */
14388                 case 128:
14389                 default:
14390                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14391                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14392                         break;
14393                 }
14394         } else {
14395                 switch (cacheline_size) {
14396                 case 16:
14397                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14398                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14399                                         DMA_RWCTRL_WRITE_BNDRY_16);
14400                                 break;
14401                         }
14402                         /* fallthrough */
14403                 case 32:
14404                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14405                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14406                                         DMA_RWCTRL_WRITE_BNDRY_32);
14407                                 break;
14408                         }
14409                         /* fallthrough */
14410                 case 64:
14411                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14412                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14413                                         DMA_RWCTRL_WRITE_BNDRY_64);
14414                                 break;
14415                         }
14416                         /* fallthrough */
14417                 case 128:
14418                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14419                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14420                                         DMA_RWCTRL_WRITE_BNDRY_128);
14421                                 break;
14422                         }
14423                         /* fallthrough */
14424                 case 256:
14425                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14426                                 DMA_RWCTRL_WRITE_BNDRY_256);
14427                         break;
14428                 case 512:
14429                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14430                                 DMA_RWCTRL_WRITE_BNDRY_512);
14431                         break;
14432                 case 1024:
14433                 default:
14434                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14435                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14436                         break;
14437                 }
14438         }
14439
14440 out:
14441         return val;
14442 }
14443
14444 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14445 {
14446         struct tg3_internal_buffer_desc test_desc;
14447         u32 sram_dma_descs;
14448         int i, ret;
14449
14450         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14451
14452         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14453         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14454         tw32(RDMAC_STATUS, 0);
14455         tw32(WDMAC_STATUS, 0);
14456
14457         tw32(BUFMGR_MODE, 0);
14458         tw32(FTQ_RESET, 0);
14459
14460         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14461         test_desc.addr_lo = buf_dma & 0xffffffff;
14462         test_desc.nic_mbuf = 0x00002100;
14463         test_desc.len = size;
14464
14465         /*
14466          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14467          * the *second* time the tg3 driver was getting loaded after an
14468          * initial scan.
14469          *
14470          * Broadcom tells me:
14471          *   ...the DMA engine is connected to the GRC block and a DMA
14472          *   reset may affect the GRC block in some unpredictable way...
14473          *   The behavior of resets to individual blocks has not been tested.
14474          *
14475          * Broadcom noted the GRC reset will also reset all sub-components.
14476          */
14477         if (to_device) {
14478                 test_desc.cqid_sqid = (13 << 8) | 2;
14479
14480                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14481                 udelay(40);
14482         } else {
14483                 test_desc.cqid_sqid = (16 << 8) | 7;
14484
14485                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14486                 udelay(40);
14487         }
14488         test_desc.flags = 0x00000005;
14489
14490         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14491                 u32 val;
14492
14493                 val = *(((u32 *)&test_desc) + i);
14494                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14495                                        sram_dma_descs + (i * sizeof(u32)));
14496                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14497         }
14498         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14499
14500         if (to_device)
14501                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14502         else
14503                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14504
14505         ret = -ENODEV;
14506         for (i = 0; i < 40; i++) {
14507                 u32 val;
14508
14509                 if (to_device)
14510                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14511                 else
14512                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14513                 if ((val & 0xffff) == sram_dma_descs) {
14514                         ret = 0;
14515                         break;
14516                 }
14517
14518                 udelay(100);
14519         }
14520
14521         return ret;
14522 }
14523
14524 #define TEST_BUFFER_SIZE        0x2000
14525
14526 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14527         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14528         { },
14529 };
14530
14531 static int __devinit tg3_test_dma(struct tg3 *tp)
14532 {
14533         dma_addr_t buf_dma;
14534         u32 *buf, saved_dma_rwctrl;
14535         int ret = 0;
14536
14537         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14538                                  &buf_dma, GFP_KERNEL);
14539         if (!buf) {
14540                 ret = -ENOMEM;
14541                 goto out_nofree;
14542         }
14543
14544         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14545                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14546
14547         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14548
14549         if (tg3_flag(tp, 57765_PLUS))
14550                 goto out;
14551
14552         if (tg3_flag(tp, PCI_EXPRESS)) {
14553                 /* DMA read watermark not used on PCIE */
14554                 tp->dma_rwctrl |= 0x00180000;
14555         } else if (!tg3_flag(tp, PCIX_MODE)) {
14556                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14557                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14558                         tp->dma_rwctrl |= 0x003f0000;
14559                 else
14560                         tp->dma_rwctrl |= 0x003f000f;
14561         } else {
14562                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14563                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14564                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14565                         u32 read_water = 0x7;
14566
14567                         /* If the 5704 is behind the EPB bridge, we can
14568                          * do the less restrictive ONE_DMA workaround for
14569                          * better performance.
14570                          */
14571                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14572                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14573                                 tp->dma_rwctrl |= 0x8000;
14574                         else if (ccval == 0x6 || ccval == 0x7)
14575                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14576
14577                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14578                                 read_water = 4;
14579                         /* Set bit 23 to enable PCIX hw bug fix */
14580                         tp->dma_rwctrl |=
14581                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14582                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14583                                 (1 << 23);
14584                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14585                         /* 5780 always in PCIX mode */
14586                         tp->dma_rwctrl |= 0x00144000;
14587                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14588                         /* 5714 always in PCIX mode */
14589                         tp->dma_rwctrl |= 0x00148000;
14590                 } else {
14591                         tp->dma_rwctrl |= 0x001b000f;
14592                 }
14593         }
14594
14595         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14596             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14597                 tp->dma_rwctrl &= 0xfffffff0;
14598
14599         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14600             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14601                 /* Remove this if it causes problems for some boards. */
14602                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14603
14604                 /* On 5700/5701 chips, we need to set this bit.
14605                  * Otherwise the chip will issue cacheline transactions
14606                  * to streamable DMA memory with not all the byte
14607                  * enables turned on.  This is an error on several
14608                  * RISC PCI controllers, in particular sparc64.
14609                  *
14610                  * On 5703/5704 chips, this bit has been reassigned
14611                  * a different meaning.  In particular, it is used
14612                  * on those chips to enable a PCI-X workaround.
14613                  */
14614                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14615         }
14616
14617         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14618
14619 #if 0
14620         /* Unneeded, already done by tg3_get_invariants.  */
14621         tg3_switch_clocks(tp);
14622 #endif
14623
14624         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14625             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14626                 goto out;
14627
14628         /* It is best to perform DMA test with maximum write burst size
14629          * to expose the 5700/5701 write DMA bug.
14630          */
14631         saved_dma_rwctrl = tp->dma_rwctrl;
14632         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14633         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14634
14635         while (1) {
14636                 u32 *p = buf, i;
14637
14638                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14639                         p[i] = i;
14640
14641                 /* Send the buffer to the chip. */
14642                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14643                 if (ret) {
14644                         dev_err(&tp->pdev->dev,
14645                                 "%s: Buffer write failed. err = %d\n",
14646                                 __func__, ret);
14647                         break;
14648                 }
14649
14650 #if 0
14651                 /* validate data reached card RAM correctly. */
14652                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14653                         u32 val;
14654                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14655                         if (le32_to_cpu(val) != p[i]) {
14656                                 dev_err(&tp->pdev->dev,
14657                                         "%s: Buffer corrupted on device! "
14658                                         "(%d != %d)\n", __func__, val, i);
14659                                 /* ret = -ENODEV here? */
14660                         }
14661                         p[i] = 0;
14662                 }
14663 #endif
14664                 /* Now read it back. */
14665                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14666                 if (ret) {
14667                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14668                                 "err = %d\n", __func__, ret);
14669                         break;
14670                 }
14671
14672                 /* Verify it. */
14673                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14674                         if (p[i] == i)
14675                                 continue;
14676
14677                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14678                             DMA_RWCTRL_WRITE_BNDRY_16) {
14679                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14680                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14681                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14682                                 break;
14683                         } else {
14684                                 dev_err(&tp->pdev->dev,
14685                                         "%s: Buffer corrupted on read back! "
14686                                         "(%d != %d)\n", __func__, p[i], i);
14687                                 ret = -ENODEV;
14688                                 goto out;
14689                         }
14690                 }
14691
14692                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14693                         /* Success. */
14694                         ret = 0;
14695                         break;
14696                 }
14697         }
14698         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14699             DMA_RWCTRL_WRITE_BNDRY_16) {
14700                 /* DMA test passed without adjusting DMA boundary,
14701                  * now look for chipsets that are known to expose the
14702                  * DMA bug without failing the test.
14703                  */
14704                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14705                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14706                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14707                 } else {
14708                         /* Safe to use the calculated DMA boundary. */
14709                         tp->dma_rwctrl = saved_dma_rwctrl;
14710                 }
14711
14712                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14713         }
14714
14715 out:
14716         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14717 out_nofree:
14718         return ret;
14719 }
14720
14721 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14722 {
14723         if (tg3_flag(tp, 57765_PLUS)) {
14724                 tp->bufmgr_config.mbuf_read_dma_low_water =
14725                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14726                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14727                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14728                 tp->bufmgr_config.mbuf_high_water =
14729                         DEFAULT_MB_HIGH_WATER_57765;
14730
14731                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14732                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14733                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14734                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14735                 tp->bufmgr_config.mbuf_high_water_jumbo =
14736                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14737         } else if (tg3_flag(tp, 5705_PLUS)) {
14738                 tp->bufmgr_config.mbuf_read_dma_low_water =
14739                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14740                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14741                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14742                 tp->bufmgr_config.mbuf_high_water =
14743                         DEFAULT_MB_HIGH_WATER_5705;
14744                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14745                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14746                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14747                         tp->bufmgr_config.mbuf_high_water =
14748                                 DEFAULT_MB_HIGH_WATER_5906;
14749                 }
14750
14751                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14752                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14753                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14754                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14755                 tp->bufmgr_config.mbuf_high_water_jumbo =
14756                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14757         } else {
14758                 tp->bufmgr_config.mbuf_read_dma_low_water =
14759                         DEFAULT_MB_RDMA_LOW_WATER;
14760                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14761                         DEFAULT_MB_MACRX_LOW_WATER;
14762                 tp->bufmgr_config.mbuf_high_water =
14763                         DEFAULT_MB_HIGH_WATER;
14764
14765                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14766                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14767                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14768                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14769                 tp->bufmgr_config.mbuf_high_water_jumbo =
14770                         DEFAULT_MB_HIGH_WATER_JUMBO;
14771         }
14772
14773         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14774         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14775 }
14776
14777 static char * __devinit tg3_phy_string(struct tg3 *tp)
14778 {
14779         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14780         case TG3_PHY_ID_BCM5400:        return "5400";
14781         case TG3_PHY_ID_BCM5401:        return "5401";
14782         case TG3_PHY_ID_BCM5411:        return "5411";
14783         case TG3_PHY_ID_BCM5701:        return "5701";
14784         case TG3_PHY_ID_BCM5703:        return "5703";
14785         case TG3_PHY_ID_BCM5704:        return "5704";
14786         case TG3_PHY_ID_BCM5705:        return "5705";
14787         case TG3_PHY_ID_BCM5750:        return "5750";
14788         case TG3_PHY_ID_BCM5752:        return "5752";
14789         case TG3_PHY_ID_BCM5714:        return "5714";
14790         case TG3_PHY_ID_BCM5780:        return "5780";
14791         case TG3_PHY_ID_BCM5755:        return "5755";
14792         case TG3_PHY_ID_BCM5787:        return "5787";
14793         case TG3_PHY_ID_BCM5784:        return "5784";
14794         case TG3_PHY_ID_BCM5756:        return "5722/5756";
14795         case TG3_PHY_ID_BCM5906:        return "5906";
14796         case TG3_PHY_ID_BCM5761:        return "5761";
14797         case TG3_PHY_ID_BCM5718C:       return "5718C";
14798         case TG3_PHY_ID_BCM5718S:       return "5718S";
14799         case TG3_PHY_ID_BCM57765:       return "57765";
14800         case TG3_PHY_ID_BCM5719C:       return "5719C";
14801         case TG3_PHY_ID_BCM5720C:       return "5720C";
14802         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
14803         case 0:                 return "serdes";
14804         default:                return "unknown";
14805         }
14806 }
14807
14808 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14809 {
14810         if (tg3_flag(tp, PCI_EXPRESS)) {
14811                 strcpy(str, "PCI Express");
14812                 return str;
14813         } else if (tg3_flag(tp, PCIX_MODE)) {
14814                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14815
14816                 strcpy(str, "PCIX:");
14817
14818                 if ((clock_ctrl == 7) ||
14819                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14820                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14821                         strcat(str, "133MHz");
14822                 else if (clock_ctrl == 0)
14823                         strcat(str, "33MHz");
14824                 else if (clock_ctrl == 2)
14825                         strcat(str, "50MHz");
14826                 else if (clock_ctrl == 4)
14827                         strcat(str, "66MHz");
14828                 else if (clock_ctrl == 6)
14829                         strcat(str, "100MHz");
14830         } else {
14831                 strcpy(str, "PCI:");
14832                 if (tg3_flag(tp, PCI_HIGH_SPEED))
14833                         strcat(str, "66MHz");
14834                 else
14835                         strcat(str, "33MHz");
14836         }
14837         if (tg3_flag(tp, PCI_32BIT))
14838                 strcat(str, ":32-bit");
14839         else
14840                 strcat(str, ":64-bit");
14841         return str;
14842 }
14843
14844 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14845 {
14846         struct pci_dev *peer;
14847         unsigned int func, devnr = tp->pdev->devfn & ~7;
14848
14849         for (func = 0; func < 8; func++) {
14850                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14851                 if (peer && peer != tp->pdev)
14852                         break;
14853                 pci_dev_put(peer);
14854         }
14855         /* 5704 can be configured in single-port mode, set peer to
14856          * tp->pdev in that case.
14857          */
14858         if (!peer) {
14859                 peer = tp->pdev;
14860                 return peer;
14861         }
14862
14863         /*
14864          * We don't need to keep the refcount elevated; there's no way
14865          * to remove one half of this device without removing the other
14866          */
14867         pci_dev_put(peer);
14868
14869         return peer;
14870 }
14871
14872 static void __devinit tg3_init_coal(struct tg3 *tp)
14873 {
14874         struct ethtool_coalesce *ec = &tp->coal;
14875
14876         memset(ec, 0, sizeof(*ec));
14877         ec->cmd = ETHTOOL_GCOALESCE;
14878         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14879         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14880         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14881         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14882         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14883         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14884         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14885         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14886         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14887
14888         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14889                                  HOSTCC_MODE_CLRTICK_TXBD)) {
14890                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14891                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14892                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14893                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14894         }
14895
14896         if (tg3_flag(tp, 5705_PLUS)) {
14897                 ec->rx_coalesce_usecs_irq = 0;
14898                 ec->tx_coalesce_usecs_irq = 0;
14899                 ec->stats_block_coalesce_usecs = 0;
14900         }
14901 }
14902
14903 static const struct net_device_ops tg3_netdev_ops = {
14904         .ndo_open               = tg3_open,
14905         .ndo_stop               = tg3_close,
14906         .ndo_start_xmit         = tg3_start_xmit,
14907         .ndo_get_stats64        = tg3_get_stats64,
14908         .ndo_validate_addr      = eth_validate_addr,
14909         .ndo_set_multicast_list = tg3_set_rx_mode,
14910         .ndo_set_mac_address    = tg3_set_mac_addr,
14911         .ndo_do_ioctl           = tg3_ioctl,
14912         .ndo_tx_timeout         = tg3_tx_timeout,
14913         .ndo_change_mtu         = tg3_change_mtu,
14914         .ndo_fix_features       = tg3_fix_features,
14915         .ndo_set_features       = tg3_set_features,
14916 #ifdef CONFIG_NET_POLL_CONTROLLER
14917         .ndo_poll_controller    = tg3_poll_controller,
14918 #endif
14919 };
14920
14921 static int __devinit tg3_init_one(struct pci_dev *pdev,
14922                                   const struct pci_device_id *ent)
14923 {
14924         struct net_device *dev;
14925         struct tg3 *tp;
14926         int i, err, pm_cap;
14927         u32 sndmbx, rcvmbx, intmbx;
14928         char str[40];
14929         u64 dma_mask, persist_dma_mask;
14930         u32 hw_features = 0;
14931
14932         printk_once(KERN_INFO "%s\n", version);
14933
14934         err = pci_enable_device(pdev);
14935         if (err) {
14936                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14937                 return err;
14938         }
14939
14940         err = pci_request_regions(pdev, DRV_MODULE_NAME);
14941         if (err) {
14942                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14943                 goto err_out_disable_pdev;
14944         }
14945
14946         pci_set_master(pdev);
14947
14948         /* Find power-management capability. */
14949         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14950         if (pm_cap == 0) {
14951                 dev_err(&pdev->dev,
14952                         "Cannot find Power Management capability, aborting\n");
14953                 err = -EIO;
14954                 goto err_out_free_res;
14955         }
14956
14957         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14958         if (!dev) {
14959                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14960                 err = -ENOMEM;
14961                 goto err_out_free_res;
14962         }
14963
14964         SET_NETDEV_DEV(dev, &pdev->dev);
14965
14966         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14967
14968         tp = netdev_priv(dev);
14969         tp->pdev = pdev;
14970         tp->dev = dev;
14971         tp->pm_cap = pm_cap;
14972         tp->rx_mode = TG3_DEF_RX_MODE;
14973         tp->tx_mode = TG3_DEF_TX_MODE;
14974
14975         if (tg3_debug > 0)
14976                 tp->msg_enable = tg3_debug;
14977         else
14978                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14979
14980         /* The word/byte swap controls here control register access byte
14981          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
14982          * setting below.
14983          */
14984         tp->misc_host_ctrl =
14985                 MISC_HOST_CTRL_MASK_PCI_INT |
14986                 MISC_HOST_CTRL_WORD_SWAP |
14987                 MISC_HOST_CTRL_INDIR_ACCESS |
14988                 MISC_HOST_CTRL_PCISTATE_RW;
14989
14990         /* The NONFRM (non-frame) byte/word swap controls take effect
14991          * on descriptor entries, anything which isn't packet data.
14992          *
14993          * The StrongARM chips on the board (one for tx, one for rx)
14994          * are running in big-endian mode.
14995          */
14996         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14997                         GRC_MODE_WSWAP_NONFRM_DATA);
14998 #ifdef __BIG_ENDIAN
14999         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15000 #endif
15001         spin_lock_init(&tp->lock);
15002         spin_lock_init(&tp->indirect_lock);
15003         INIT_WORK(&tp->reset_task, tg3_reset_task);
15004
15005         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15006         if (!tp->regs) {
15007                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15008                 err = -ENOMEM;
15009                 goto err_out_free_dev;
15010         }
15011
15012         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15013         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15014
15015         dev->ethtool_ops = &tg3_ethtool_ops;
15016         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15017         dev->netdev_ops = &tg3_netdev_ops;
15018         dev->irq = pdev->irq;
15019
15020         err = tg3_get_invariants(tp);
15021         if (err) {
15022                 dev_err(&pdev->dev,
15023                         "Problem fetching invariants of chip, aborting\n");
15024                 goto err_out_iounmap;
15025         }
15026
15027         /* The EPB bridge inside 5714, 5715, and 5780 and any
15028          * device behind the EPB cannot support DMA addresses > 40-bit.
15029          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15030          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15031          * do DMA address check in tg3_start_xmit().
15032          */
15033         if (tg3_flag(tp, IS_5788))
15034                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15035         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15036                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15037 #ifdef CONFIG_HIGHMEM
15038                 dma_mask = DMA_BIT_MASK(64);
15039 #endif
15040         } else
15041                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15042
15043         /* Configure DMA attributes. */
15044         if (dma_mask > DMA_BIT_MASK(32)) {
15045                 err = pci_set_dma_mask(pdev, dma_mask);
15046                 if (!err) {
15047                         dev->features |= NETIF_F_HIGHDMA;
15048                         err = pci_set_consistent_dma_mask(pdev,
15049                                                           persist_dma_mask);
15050                         if (err < 0) {
15051                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15052                                         "DMA for consistent allocations\n");
15053                                 goto err_out_iounmap;
15054                         }
15055                 }
15056         }
15057         if (err || dma_mask == DMA_BIT_MASK(32)) {
15058                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15059                 if (err) {
15060                         dev_err(&pdev->dev,
15061                                 "No usable DMA configuration, aborting\n");
15062                         goto err_out_iounmap;
15063                 }
15064         }
15065
15066         tg3_init_bufmgr_config(tp);
15067
15068         /* Selectively allow TSO based on operating conditions */
15069         if ((tg3_flag(tp, HW_TSO_1) ||
15070              tg3_flag(tp, HW_TSO_2) ||
15071              tg3_flag(tp, HW_TSO_3)) ||
15072             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
15073                 tg3_flag_set(tp, TSO_CAPABLE);
15074         else {
15075                 tg3_flag_clear(tp, TSO_CAPABLE);
15076                 tg3_flag_clear(tp, TSO_BUG);
15077                 tp->fw_needed = NULL;
15078         }
15079
15080         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15081                 tp->fw_needed = FIRMWARE_TG3;
15082
15083         /* TSO is on by default on chips that support hardware TSO.
15084          * Firmware TSO on older chips gives lower performance, so it
15085          * is off by default, but can be enabled using ethtool.
15086          */
15087         if ((tg3_flag(tp, HW_TSO_1) ||
15088              tg3_flag(tp, HW_TSO_2) ||
15089              tg3_flag(tp, HW_TSO_3)) &&
15090             (dev->features & NETIF_F_IP_CSUM))
15091                 hw_features |= NETIF_F_TSO;
15092         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15093                 if (dev->features & NETIF_F_IPV6_CSUM)
15094                         hw_features |= NETIF_F_TSO6;
15095                 if (tg3_flag(tp, HW_TSO_3) ||
15096                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15097                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15098                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15099                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15100                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15101                         hw_features |= NETIF_F_TSO_ECN;
15102         }
15103
15104         dev->hw_features |= hw_features;
15105         dev->features |= hw_features;
15106         dev->vlan_features |= hw_features;
15107
15108         /*
15109          * Add loopback capability only for a subset of devices that support
15110          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15111          * loopback for the remaining devices.
15112          */
15113         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15114             !tg3_flag(tp, CPMU_PRESENT))
15115                 /* Add the loopback capability */
15116                 dev->hw_features |= NETIF_F_LOOPBACK;
15117
15118         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15119             !tg3_flag(tp, TSO_CAPABLE) &&
15120             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15121                 tg3_flag_set(tp, MAX_RXPEND_64);
15122                 tp->rx_pending = 63;
15123         }
15124
15125         err = tg3_get_device_address(tp);
15126         if (err) {
15127                 dev_err(&pdev->dev,
15128                         "Could not obtain valid ethernet address, aborting\n");
15129                 goto err_out_iounmap;
15130         }
15131
15132         if (tg3_flag(tp, ENABLE_APE)) {
15133                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15134                 if (!tp->aperegs) {
15135                         dev_err(&pdev->dev,
15136                                 "Cannot map APE registers, aborting\n");
15137                         err = -ENOMEM;
15138                         goto err_out_iounmap;
15139                 }
15140
15141                 tg3_ape_lock_init(tp);
15142
15143                 if (tg3_flag(tp, ENABLE_ASF))
15144                         tg3_read_dash_ver(tp);
15145         }
15146
15147         /*
15148          * Reset chip in case UNDI or EFI driver did not shutdown
15149          * DMA self test will enable WDMAC and we'll see (spurious)
15150          * pending DMA on the PCI bus at that point.
15151          */
15152         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15153             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15154                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15155                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15156         }
15157
15158         err = tg3_test_dma(tp);
15159         if (err) {
15160                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15161                 goto err_out_apeunmap;
15162         }
15163
15164         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15165         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15166         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15167         for (i = 0; i < tp->irq_max; i++) {
15168                 struct tg3_napi *tnapi = &tp->napi[i];
15169
15170                 tnapi->tp = tp;
15171                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15172
15173                 tnapi->int_mbox = intmbx;
15174                 if (i < 4)
15175                         intmbx += 0x8;
15176                 else
15177                         intmbx += 0x4;
15178
15179                 tnapi->consmbox = rcvmbx;
15180                 tnapi->prodmbox = sndmbx;
15181
15182                 if (i)
15183                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15184                 else
15185                         tnapi->coal_now = HOSTCC_MODE_NOW;
15186
15187                 if (!tg3_flag(tp, SUPPORT_MSIX))
15188                         break;
15189
15190                 /*
15191                  * If we support MSIX, we'll be using RSS.  If we're using
15192                  * RSS, the first vector only handles link interrupts and the
15193                  * remaining vectors handle rx and tx interrupts.  Reuse the
15194                  * mailbox values for the next iteration.  The values we setup
15195                  * above are still useful for the single vectored mode.
15196                  */
15197                 if (!i)
15198                         continue;
15199
15200                 rcvmbx += 0x8;
15201
15202                 if (sndmbx & 0x4)
15203                         sndmbx -= 0x4;
15204                 else
15205                         sndmbx += 0xc;
15206         }
15207
15208         tg3_init_coal(tp);
15209
15210         pci_set_drvdata(pdev, dev);
15211
15212         err = register_netdev(dev);
15213         if (err) {
15214                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15215                 goto err_out_apeunmap;
15216         }
15217
15218         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15219                     tp->board_part_number,
15220                     tp->pci_chip_rev_id,
15221                     tg3_bus_string(tp, str),
15222                     dev->dev_addr);
15223
15224         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15225                 struct phy_device *phydev;
15226                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15227                 netdev_info(dev,
15228                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15229                             phydev->drv->name, dev_name(&phydev->dev));
15230         } else {
15231                 char *ethtype;
15232
15233                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15234                         ethtype = "10/100Base-TX";
15235                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15236                         ethtype = "1000Base-SX";
15237                 else
15238                         ethtype = "10/100/1000Base-T";
15239
15240                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15241                             "(WireSpeed[%d], EEE[%d])\n",
15242                             tg3_phy_string(tp), ethtype,
15243                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15244                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15245         }
15246
15247         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15248                     (dev->features & NETIF_F_RXCSUM) != 0,
15249                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15250                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15251                     tg3_flag(tp, ENABLE_ASF) != 0,
15252                     tg3_flag(tp, TSO_CAPABLE) != 0);
15253         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15254                     tp->dma_rwctrl,
15255                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15256                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15257
15258         pci_save_state(pdev);
15259
15260         return 0;
15261
15262 err_out_apeunmap:
15263         if (tp->aperegs) {
15264                 iounmap(tp->aperegs);
15265                 tp->aperegs = NULL;
15266         }
15267
15268 err_out_iounmap:
15269         if (tp->regs) {
15270                 iounmap(tp->regs);
15271                 tp->regs = NULL;
15272         }
15273
15274 err_out_free_dev:
15275         free_netdev(dev);
15276
15277 err_out_free_res:
15278         pci_release_regions(pdev);
15279
15280 err_out_disable_pdev:
15281         pci_disable_device(pdev);
15282         pci_set_drvdata(pdev, NULL);
15283         return err;
15284 }
15285
15286 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15287 {
15288         struct net_device *dev = pci_get_drvdata(pdev);
15289
15290         if (dev) {
15291                 struct tg3 *tp = netdev_priv(dev);
15292
15293                 if (tp->fw)
15294                         release_firmware(tp->fw);
15295
15296                 cancel_work_sync(&tp->reset_task);
15297
15298                 if (!tg3_flag(tp, USE_PHYLIB)) {
15299                         tg3_phy_fini(tp);
15300                         tg3_mdio_fini(tp);
15301                 }
15302
15303                 unregister_netdev(dev);
15304                 if (tp->aperegs) {
15305                         iounmap(tp->aperegs);
15306                         tp->aperegs = NULL;
15307                 }
15308                 if (tp->regs) {
15309                         iounmap(tp->regs);
15310                         tp->regs = NULL;
15311                 }
15312                 free_netdev(dev);
15313                 pci_release_regions(pdev);
15314                 pci_disable_device(pdev);
15315                 pci_set_drvdata(pdev, NULL);
15316         }
15317 }
15318
15319 #ifdef CONFIG_PM_SLEEP
15320 static int tg3_suspend(struct device *device)
15321 {
15322         struct pci_dev *pdev = to_pci_dev(device);
15323         struct net_device *dev = pci_get_drvdata(pdev);
15324         struct tg3 *tp = netdev_priv(dev);
15325         int err;
15326
15327         if (!netif_running(dev))
15328                 return 0;
15329
15330         flush_work_sync(&tp->reset_task);
15331         tg3_phy_stop(tp);
15332         tg3_netif_stop(tp);
15333
15334         del_timer_sync(&tp->timer);
15335
15336         tg3_full_lock(tp, 1);
15337         tg3_disable_ints(tp);
15338         tg3_full_unlock(tp);
15339
15340         netif_device_detach(dev);
15341
15342         tg3_full_lock(tp, 0);
15343         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15344         tg3_flag_clear(tp, INIT_COMPLETE);
15345         tg3_full_unlock(tp);
15346
15347         err = tg3_power_down_prepare(tp);
15348         if (err) {
15349                 int err2;
15350
15351                 tg3_full_lock(tp, 0);
15352
15353                 tg3_flag_set(tp, INIT_COMPLETE);
15354                 err2 = tg3_restart_hw(tp, 1);
15355                 if (err2)
15356                         goto out;
15357
15358                 tp->timer.expires = jiffies + tp->timer_offset;
15359                 add_timer(&tp->timer);
15360
15361                 netif_device_attach(dev);
15362                 tg3_netif_start(tp);
15363
15364 out:
15365                 tg3_full_unlock(tp);
15366
15367                 if (!err2)
15368                         tg3_phy_start(tp);
15369         }
15370
15371         return err;
15372 }
15373
15374 static int tg3_resume(struct device *device)
15375 {
15376         struct pci_dev *pdev = to_pci_dev(device);
15377         struct net_device *dev = pci_get_drvdata(pdev);
15378         struct tg3 *tp = netdev_priv(dev);
15379         int err;
15380
15381         if (!netif_running(dev))
15382                 return 0;
15383
15384         netif_device_attach(dev);
15385
15386         tg3_full_lock(tp, 0);
15387
15388         tg3_flag_set(tp, INIT_COMPLETE);
15389         err = tg3_restart_hw(tp, 1);
15390         if (err)
15391                 goto out;
15392
15393         tp->timer.expires = jiffies + tp->timer_offset;
15394         add_timer(&tp->timer);
15395
15396         tg3_netif_start(tp);
15397
15398 out:
15399         tg3_full_unlock(tp);
15400
15401         if (!err)
15402                 tg3_phy_start(tp);
15403
15404         return err;
15405 }
15406
15407 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15408 #define TG3_PM_OPS (&tg3_pm_ops)
15409
15410 #else
15411
15412 #define TG3_PM_OPS NULL
15413
15414 #endif /* CONFIG_PM_SLEEP */
15415
15416 /**
15417  * tg3_io_error_detected - called when PCI error is detected
15418  * @pdev: Pointer to PCI device
15419  * @state: The current pci connection state
15420  *
15421  * This function is called after a PCI bus error affecting
15422  * this device has been detected.
15423  */
15424 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15425                                               pci_channel_state_t state)
15426 {
15427         struct net_device *netdev = pci_get_drvdata(pdev);
15428         struct tg3 *tp = netdev_priv(netdev);
15429         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15430
15431         netdev_info(netdev, "PCI I/O error detected\n");
15432
15433         rtnl_lock();
15434
15435         if (!netif_running(netdev))
15436                 goto done;
15437
15438         tg3_phy_stop(tp);
15439
15440         tg3_netif_stop(tp);
15441
15442         del_timer_sync(&tp->timer);
15443         tg3_flag_clear(tp, RESTART_TIMER);
15444
15445         /* Want to make sure that the reset task doesn't run */
15446         cancel_work_sync(&tp->reset_task);
15447         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15448         tg3_flag_clear(tp, RESTART_TIMER);
15449
15450         netif_device_detach(netdev);
15451
15452         /* Clean up software state, even if MMIO is blocked */
15453         tg3_full_lock(tp, 0);
15454         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15455         tg3_full_unlock(tp);
15456
15457 done:
15458         if (state == pci_channel_io_perm_failure)
15459                 err = PCI_ERS_RESULT_DISCONNECT;
15460         else
15461                 pci_disable_device(pdev);
15462
15463         rtnl_unlock();
15464
15465         return err;
15466 }
15467
15468 /**
15469  * tg3_io_slot_reset - called after the pci bus has been reset.
15470  * @pdev: Pointer to PCI device
15471  *
15472  * Restart the card from scratch, as if from a cold-boot.
15473  * At this point, the card has exprienced a hard reset,
15474  * followed by fixups by BIOS, and has its config space
15475  * set up identically to what it was at cold boot.
15476  */
15477 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15478 {
15479         struct net_device *netdev = pci_get_drvdata(pdev);
15480         struct tg3 *tp = netdev_priv(netdev);
15481         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15482         int err;
15483
15484         rtnl_lock();
15485
15486         if (pci_enable_device(pdev)) {
15487                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15488                 goto done;
15489         }
15490
15491         pci_set_master(pdev);
15492         pci_restore_state(pdev);
15493         pci_save_state(pdev);
15494
15495         if (!netif_running(netdev)) {
15496                 rc = PCI_ERS_RESULT_RECOVERED;
15497                 goto done;
15498         }
15499
15500         err = tg3_power_up(tp);
15501         if (err) {
15502                 netdev_err(netdev, "Failed to restore register access.\n");
15503                 goto done;
15504         }
15505
15506         rc = PCI_ERS_RESULT_RECOVERED;
15507
15508 done:
15509         rtnl_unlock();
15510
15511         return rc;
15512 }
15513
15514 /**
15515  * tg3_io_resume - called when traffic can start flowing again.
15516  * @pdev: Pointer to PCI device
15517  *
15518  * This callback is called when the error recovery driver tells
15519  * us that its OK to resume normal operation.
15520  */
15521 static void tg3_io_resume(struct pci_dev *pdev)
15522 {
15523         struct net_device *netdev = pci_get_drvdata(pdev);
15524         struct tg3 *tp = netdev_priv(netdev);
15525         int err;
15526
15527         rtnl_lock();
15528
15529         if (!netif_running(netdev))
15530                 goto done;
15531
15532         tg3_full_lock(tp, 0);
15533         tg3_flag_set(tp, INIT_COMPLETE);
15534         err = tg3_restart_hw(tp, 1);
15535         tg3_full_unlock(tp);
15536         if (err) {
15537                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15538                 goto done;
15539         }
15540
15541         netif_device_attach(netdev);
15542
15543         tp->timer.expires = jiffies + tp->timer_offset;
15544         add_timer(&tp->timer);
15545
15546         tg3_netif_start(tp);
15547
15548         tg3_phy_start(tp);
15549
15550 done:
15551         rtnl_unlock();
15552 }
15553
15554 static struct pci_error_handlers tg3_err_handler = {
15555         .error_detected = tg3_io_error_detected,
15556         .slot_reset     = tg3_io_slot_reset,
15557         .resume         = tg3_io_resume
15558 };
15559
15560 static struct pci_driver tg3_driver = {
15561         .name           = DRV_MODULE_NAME,
15562         .id_table       = tg3_pci_tbl,
15563         .probe          = tg3_init_one,
15564         .remove         = __devexit_p(tg3_remove_one),
15565         .err_handler    = &tg3_err_handler,
15566         .driver.pm      = TG3_PM_OPS,
15567 };
15568
15569 static int __init tg3_init(void)
15570 {
15571         return pci_register_driver(&tg3_driver);
15572 }
15573
15574 static void __exit tg3_cleanup(void)
15575 {
15576         pci_unregister_driver(&tg3_driver);
15577 }
15578
15579 module_init(tg3_init);
15580 module_exit(tg3_cleanup);