2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
56 #include <asm/idprom.h>
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 return test_bit(flag, bits);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 clear_bit(flag, bits);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MIN_NUM 123
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "March 21, 2012"
96 #define RESET_KIND_SHUTDOWN 0
97 #define RESET_KIND_INIT 1
98 #define RESET_KIND_SUSPEND 2
100 #define TG3_DEF_RX_MODE 0
101 #define TG3_DEF_TX_MODE 0
102 #define TG3_DEF_MSG_ENABLE \
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
114 /* length of time before we decide the hardware is borked,
115 * and dev->tx_timeout() should be called to fix the problem
118 #define TG3_TX_TIMEOUT (5 * HZ)
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU 60
122 #define TG3_MAX_MTU(tp) \
123 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126 * You can't change the ring sizes, but you can change where you place
127 * them in the NIC onboard memory.
129 #define TG3_RX_STD_RING_SIZE(tp) \
130 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING 200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 /* Do not place this n-ring entries value into the tp struct itself,
139 * we really want to expose these constants to GCC so that modulo et
140 * al. operations are done with shifts and masks instead of with
141 * hw multiply/modulo instructions. Another solution would be to
142 * replace things like '% foo' with '& (foo - 1)'.
145 #define TG3_TX_RING_SIZE 512
146 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
148 #define TG3_RX_STD_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
156 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158 #define TG3_DMA_BYTE_ENAB 64
160 #define TG3_RX_STD_DMA_SZ 1536
161 #define TG3_RX_JMB_DMA_SZ 9046
163 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
165 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175 * that are at least dword aligned when used in PCIX mode. The driver
176 * works around this bug by double copying the packet. This workaround
177 * is built into the normal double copy length check for efficiency.
179 * However, the double copy is only necessary on those architectures
180 * where unaligned memory accesses are inefficient. For those architectures
181 * where unaligned memory accesses incur little penalty, we can reintegrate
182 * the 5701 in the normal rx path. Doing so saves a device structure
183 * dereference by hardcoding the double copy threshold in place.
185 #define TG3_RX_COPY_THRESHOLD 256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
189 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
195 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K 2048
201 #define TG3_TX_BD_DMA_MAX_4K 4096
203 #define TG3_RAW_IP_ALIGN 2
205 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
206 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version[] __devinitdata =
213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
307 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
308 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
309 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
313 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
315 static const struct {
316 const char string[ETH_GSTRING_LEN];
317 } ethtool_stats_keys[] = {
320 { "rx_ucast_packets" },
321 { "rx_mcast_packets" },
322 { "rx_bcast_packets" },
324 { "rx_align_errors" },
325 { "rx_xon_pause_rcvd" },
326 { "rx_xoff_pause_rcvd" },
327 { "rx_mac_ctrl_rcvd" },
328 { "rx_xoff_entered" },
329 { "rx_frame_too_long_errors" },
331 { "rx_undersize_packets" },
332 { "rx_in_length_errors" },
333 { "rx_out_length_errors" },
334 { "rx_64_or_less_octet_packets" },
335 { "rx_65_to_127_octet_packets" },
336 { "rx_128_to_255_octet_packets" },
337 { "rx_256_to_511_octet_packets" },
338 { "rx_512_to_1023_octet_packets" },
339 { "rx_1024_to_1522_octet_packets" },
340 { "rx_1523_to_2047_octet_packets" },
341 { "rx_2048_to_4095_octet_packets" },
342 { "rx_4096_to_8191_octet_packets" },
343 { "rx_8192_to_9022_octet_packets" },
350 { "tx_flow_control" },
352 { "tx_single_collisions" },
353 { "tx_mult_collisions" },
355 { "tx_excessive_collisions" },
356 { "tx_late_collisions" },
357 { "tx_collide_2times" },
358 { "tx_collide_3times" },
359 { "tx_collide_4times" },
360 { "tx_collide_5times" },
361 { "tx_collide_6times" },
362 { "tx_collide_7times" },
363 { "tx_collide_8times" },
364 { "tx_collide_9times" },
365 { "tx_collide_10times" },
366 { "tx_collide_11times" },
367 { "tx_collide_12times" },
368 { "tx_collide_13times" },
369 { "tx_collide_14times" },
370 { "tx_collide_15times" },
371 { "tx_ucast_packets" },
372 { "tx_mcast_packets" },
373 { "tx_bcast_packets" },
374 { "tx_carrier_sense_errors" },
378 { "dma_writeq_full" },
379 { "dma_write_prioq_full" },
383 { "rx_threshold_hit" },
385 { "dma_readq_full" },
386 { "dma_read_prioq_full" },
387 { "tx_comp_queue_full" },
389 { "ring_set_send_prod_index" },
390 { "ring_status_update" },
392 { "nic_avoided_irqs" },
393 { "nic_tx_threshold_hit" },
395 { "mbuf_lwm_thresh_hit" },
398 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
401 static const struct {
402 const char string[ETH_GSTRING_LEN];
403 } ethtool_test_keys[] = {
404 { "nvram test (online) " },
405 { "link test (online) " },
406 { "register test (offline)" },
407 { "memory test (offline)" },
408 { "mac loopback test (offline)" },
409 { "phy loopback test (offline)" },
410 { "ext loopback test (offline)" },
411 { "interrupt test (offline)" },
414 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
417 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
419 writel(val, tp->regs + off);
422 static u32 tg3_read32(struct tg3 *tp, u32 off)
424 return readl(tp->regs + off);
427 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
429 writel(val, tp->aperegs + off);
432 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
434 return readl(tp->aperegs + off);
437 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
441 spin_lock_irqsave(&tp->indirect_lock, flags);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
443 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
444 spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
449 writel(val, tp->regs + off);
450 readl(tp->regs + off);
453 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
458 spin_lock_irqsave(&tp->indirect_lock, flags);
459 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
460 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
461 spin_unlock_irqrestore(&tp->indirect_lock, flags);
465 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
469 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
470 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
471 TG3_64BIT_REG_LOW, val);
474 if (off == TG3_RX_STD_PROD_IDX_REG) {
475 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
476 TG3_64BIT_REG_LOW, val);
480 spin_lock_irqsave(&tp->indirect_lock, flags);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
482 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483 spin_unlock_irqrestore(&tp->indirect_lock, flags);
485 /* In indirect mode when disabling interrupts, we also need
486 * to clear the interrupt bit in the GRC local ctrl register.
488 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
490 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
491 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
495 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
500 spin_lock_irqsave(&tp->indirect_lock, flags);
501 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
502 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503 spin_unlock_irqrestore(&tp->indirect_lock, flags);
507 /* usec_wait specifies the wait time in usec when writing to certain registers
508 * where it is unsafe to read back the register without some delay.
509 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
510 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
512 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
514 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
515 /* Non-posted methods */
516 tp->write32(tp, off, val);
519 tg3_write32(tp, off, val);
524 /* Wait again after the read for the posted method to guarantee that
525 * the wait time is met.
531 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
533 tp->write32_mbox(tp, off, val);
534 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
535 tp->read32_mbox(tp, off);
538 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
540 void __iomem *mbox = tp->regs + off;
542 if (tg3_flag(tp, TXD_MBOX_HWBUG))
544 if (tg3_flag(tp, MBOX_WRITE_REORDER))
548 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
550 return readl(tp->regs + off + GRCMBOX_BASE);
553 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
555 writel(val, tp->regs + off + GRCMBOX_BASE);
558 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
559 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
560 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
561 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
562 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
564 #define tw32(reg, val) tp->write32(tp, reg, val)
565 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
566 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
567 #define tr32(reg) tp->read32(tp, reg)
569 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
574 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
577 spin_lock_irqsave(&tp->indirect_lock, flags);
578 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
580 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
582 /* Always leave this as zero. */
583 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
585 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
586 tw32_f(TG3PCI_MEM_WIN_DATA, val);
588 /* Always leave this as zero. */
589 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
591 spin_unlock_irqrestore(&tp->indirect_lock, flags);
594 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
599 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
604 spin_lock_irqsave(&tp->indirect_lock, flags);
605 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
606 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
607 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
609 /* Always leave this as zero. */
610 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
612 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
613 *val = tr32(TG3PCI_MEM_WIN_DATA);
615 /* Always leave this as zero. */
616 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
618 spin_unlock_irqrestore(&tp->indirect_lock, flags);
621 static void tg3_ape_lock_init(struct tg3 *tp)
626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
627 regbase = TG3_APE_LOCK_GRANT;
629 regbase = TG3_APE_PER_LOCK_GRANT;
631 /* Make sure the driver hasn't any stale locks. */
632 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
634 case TG3_APE_LOCK_PHY0:
635 case TG3_APE_LOCK_PHY1:
636 case TG3_APE_LOCK_PHY2:
637 case TG3_APE_LOCK_PHY3:
638 bit = APE_LOCK_GRANT_DRIVER;
642 bit = APE_LOCK_GRANT_DRIVER;
644 bit = 1 << tp->pci_fn;
646 tg3_ape_write32(tp, regbase + 4 * i, bit);
651 static int tg3_ape_lock(struct tg3 *tp, int locknum)
655 u32 status, req, gnt, bit;
657 if (!tg3_flag(tp, ENABLE_APE))
661 case TG3_APE_LOCK_GPIO:
662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
664 case TG3_APE_LOCK_GRC:
665 case TG3_APE_LOCK_MEM:
667 bit = APE_LOCK_REQ_DRIVER;
669 bit = 1 << tp->pci_fn;
675 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
676 req = TG3_APE_LOCK_REQ;
677 gnt = TG3_APE_LOCK_GRANT;
679 req = TG3_APE_PER_LOCK_REQ;
680 gnt = TG3_APE_PER_LOCK_GRANT;
685 tg3_ape_write32(tp, req + off, bit);
687 /* Wait for up to 1 millisecond to acquire lock. */
688 for (i = 0; i < 100; i++) {
689 status = tg3_ape_read32(tp, gnt + off);
696 /* Revoke the lock request. */
697 tg3_ape_write32(tp, gnt + off, bit);
704 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
708 if (!tg3_flag(tp, ENABLE_APE))
712 case TG3_APE_LOCK_GPIO:
713 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
715 case TG3_APE_LOCK_GRC:
716 case TG3_APE_LOCK_MEM:
718 bit = APE_LOCK_GRANT_DRIVER;
720 bit = 1 << tp->pci_fn;
726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
727 gnt = TG3_APE_LOCK_GRANT;
729 gnt = TG3_APE_PER_LOCK_GRANT;
731 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
734 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
739 /* NCSI does not support APE events */
740 if (tg3_flag(tp, APE_HAS_NCSI))
743 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
744 if (apedata != APE_SEG_SIG_MAGIC)
747 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
748 if (!(apedata & APE_FW_STATUS_READY))
751 /* Wait for up to 1 millisecond for APE to service previous event. */
752 for (i = 0; i < 10; i++) {
753 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
756 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
758 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
759 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
760 event | APE_EVENT_STATUS_EVENT_PENDING);
762 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
764 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
771 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
774 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
779 if (!tg3_flag(tp, ENABLE_APE))
783 case RESET_KIND_INIT:
784 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
785 APE_HOST_SEG_SIG_MAGIC);
786 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
787 APE_HOST_SEG_LEN_MAGIC);
788 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
789 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
790 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
791 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
792 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
793 APE_HOST_BEHAV_NO_PHYLOCK);
794 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
795 TG3_APE_HOST_DRVR_STATE_START);
797 event = APE_EVENT_STATUS_STATE_START;
799 case RESET_KIND_SHUTDOWN:
800 /* With the interface we are currently using,
801 * APE does not track driver state. Wiping
802 * out the HOST SEGMENT SIGNATURE forces
803 * the APE to assume OS absent status.
805 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
807 if (device_may_wakeup(&tp->pdev->dev) &&
808 tg3_flag(tp, WOL_ENABLE)) {
809 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
810 TG3_APE_HOST_WOL_SPEED_AUTO);
811 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
813 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
815 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
817 event = APE_EVENT_STATUS_STATE_UNLOAD;
819 case RESET_KIND_SUSPEND:
820 event = APE_EVENT_STATUS_STATE_SUSPEND;
826 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
828 tg3_ape_send_event(tp, event);
831 static void tg3_disable_ints(struct tg3 *tp)
835 tw32(TG3PCI_MISC_HOST_CTRL,
836 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
837 for (i = 0; i < tp->irq_max; i++)
838 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
841 static void tg3_enable_ints(struct tg3 *tp)
848 tw32(TG3PCI_MISC_HOST_CTRL,
849 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
851 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
852 for (i = 0; i < tp->irq_cnt; i++) {
853 struct tg3_napi *tnapi = &tp->napi[i];
855 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
856 if (tg3_flag(tp, 1SHOT_MSI))
857 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
859 tp->coal_now |= tnapi->coal_now;
862 /* Force an initial interrupt */
863 if (!tg3_flag(tp, TAGGED_STATUS) &&
864 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
865 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
867 tw32(HOSTCC_MODE, tp->coal_now);
869 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
872 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
874 struct tg3 *tp = tnapi->tp;
875 struct tg3_hw_status *sblk = tnapi->hw_status;
876 unsigned int work_exists = 0;
878 /* check for phy events */
879 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
880 if (sblk->status & SD_STATUS_LINK_CHG)
884 /* check for TX work to do */
885 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
888 /* check for RX work to do */
889 if (tnapi->rx_rcb_prod_idx &&
890 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
897 * similar to tg3_enable_ints, but it accurately determines whether there
898 * is new work pending and can return without flushing the PIO write
899 * which reenables interrupts
901 static void tg3_int_reenable(struct tg3_napi *tnapi)
903 struct tg3 *tp = tnapi->tp;
905 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
908 /* When doing tagged status, this work check is unnecessary.
909 * The last_tag we write above tells the chip which piece of
910 * work we've completed.
912 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
913 tw32(HOSTCC_MODE, tp->coalesce_mode |
914 HOSTCC_MODE_ENABLE | tnapi->coal_now);
917 static void tg3_switch_clocks(struct tg3 *tp)
922 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
925 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
927 orig_clock_ctrl = clock_ctrl;
928 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
929 CLOCK_CTRL_CLKRUN_OENABLE |
931 tp->pci_clock_ctrl = clock_ctrl;
933 if (tg3_flag(tp, 5705_PLUS)) {
934 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
935 tw32_wait_f(TG3PCI_CLOCK_CTRL,
936 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
938 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
939 tw32_wait_f(TG3PCI_CLOCK_CTRL,
941 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
943 tw32_wait_f(TG3PCI_CLOCK_CTRL,
944 clock_ctrl | (CLOCK_CTRL_ALTCLK),
947 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
950 #define PHY_BUSY_LOOPS 5000
952 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
958 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
960 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
966 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
967 MI_COM_PHY_ADDR_MASK);
968 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
969 MI_COM_REG_ADDR_MASK);
970 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
972 tw32_f(MAC_MI_COM, frame_val);
974 loops = PHY_BUSY_LOOPS;
977 frame_val = tr32(MAC_MI_COM);
979 if ((frame_val & MI_COM_BUSY) == 0) {
981 frame_val = tr32(MAC_MI_COM);
989 *val = frame_val & MI_COM_DATA_MASK;
993 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
994 tw32_f(MAC_MI_MODE, tp->mi_mode);
1001 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1007 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1008 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1011 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1013 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1017 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1018 MI_COM_PHY_ADDR_MASK);
1019 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1020 MI_COM_REG_ADDR_MASK);
1021 frame_val |= (val & MI_COM_DATA_MASK);
1022 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1024 tw32_f(MAC_MI_COM, frame_val);
1026 loops = PHY_BUSY_LOOPS;
1027 while (loops != 0) {
1029 frame_val = tr32(MAC_MI_COM);
1030 if ((frame_val & MI_COM_BUSY) == 0) {
1032 frame_val = tr32(MAC_MI_COM);
1042 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1043 tw32_f(MAC_MI_MODE, tp->mi_mode);
1050 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1054 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1058 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1062 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1063 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1067 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1073 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1077 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1081 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1085 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1086 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1090 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1096 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1100 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1102 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1107 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1111 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1113 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1118 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1122 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1123 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1124 MII_TG3_AUXCTL_SHDWSEL_MISC);
1126 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1131 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1133 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1134 set |= MII_TG3_AUXCTL_MISC_WREN;
1136 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1139 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1140 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1141 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1142 MII_TG3_AUXCTL_ACTL_TX_6DB)
1144 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1145 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1146 MII_TG3_AUXCTL_ACTL_TX_6DB);
1148 static int tg3_bmcr_reset(struct tg3 *tp)
1153 /* OK, reset it, and poll the BMCR_RESET bit until it
1154 * clears or we time out.
1156 phy_control = BMCR_RESET;
1157 err = tg3_writephy(tp, MII_BMCR, phy_control);
1163 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1167 if ((phy_control & BMCR_RESET) == 0) {
1179 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1181 struct tg3 *tp = bp->priv;
1184 spin_lock_bh(&tp->lock);
1186 if (tg3_readphy(tp, reg, &val))
1189 spin_unlock_bh(&tp->lock);
1194 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1196 struct tg3 *tp = bp->priv;
1199 spin_lock_bh(&tp->lock);
1201 if (tg3_writephy(tp, reg, val))
1204 spin_unlock_bh(&tp->lock);
1209 static int tg3_mdio_reset(struct mii_bus *bp)
1214 static void tg3_mdio_config_5785(struct tg3 *tp)
1217 struct phy_device *phydev;
1219 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1220 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1221 case PHY_ID_BCM50610:
1222 case PHY_ID_BCM50610M:
1223 val = MAC_PHYCFG2_50610_LED_MODES;
1225 case PHY_ID_BCMAC131:
1226 val = MAC_PHYCFG2_AC131_LED_MODES;
1228 case PHY_ID_RTL8211C:
1229 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1231 case PHY_ID_RTL8201E:
1232 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1238 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1239 tw32(MAC_PHYCFG2, val);
1241 val = tr32(MAC_PHYCFG1);
1242 val &= ~(MAC_PHYCFG1_RGMII_INT |
1243 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1244 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1245 tw32(MAC_PHYCFG1, val);
1250 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1251 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1252 MAC_PHYCFG2_FMODE_MASK_MASK |
1253 MAC_PHYCFG2_GMODE_MASK_MASK |
1254 MAC_PHYCFG2_ACT_MASK_MASK |
1255 MAC_PHYCFG2_QUAL_MASK_MASK |
1256 MAC_PHYCFG2_INBAND_ENABLE;
1258 tw32(MAC_PHYCFG2, val);
1260 val = tr32(MAC_PHYCFG1);
1261 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1262 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1263 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1264 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1265 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1266 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1267 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1269 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1270 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1271 tw32(MAC_PHYCFG1, val);
1273 val = tr32(MAC_EXT_RGMII_MODE);
1274 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1275 MAC_RGMII_MODE_RX_QUALITY |
1276 MAC_RGMII_MODE_RX_ACTIVITY |
1277 MAC_RGMII_MODE_RX_ENG_DET |
1278 MAC_RGMII_MODE_TX_ENABLE |
1279 MAC_RGMII_MODE_TX_LOWPWR |
1280 MAC_RGMII_MODE_TX_RESET);
1281 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1282 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1283 val |= MAC_RGMII_MODE_RX_INT_B |
1284 MAC_RGMII_MODE_RX_QUALITY |
1285 MAC_RGMII_MODE_RX_ACTIVITY |
1286 MAC_RGMII_MODE_RX_ENG_DET;
1287 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1288 val |= MAC_RGMII_MODE_TX_ENABLE |
1289 MAC_RGMII_MODE_TX_LOWPWR |
1290 MAC_RGMII_MODE_TX_RESET;
1292 tw32(MAC_EXT_RGMII_MODE, val);
1295 static void tg3_mdio_start(struct tg3 *tp)
1297 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1298 tw32_f(MAC_MI_MODE, tp->mi_mode);
1301 if (tg3_flag(tp, MDIOBUS_INITED) &&
1302 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1303 tg3_mdio_config_5785(tp);
1306 static int tg3_mdio_init(struct tg3 *tp)
1310 struct phy_device *phydev;
1312 if (tg3_flag(tp, 5717_PLUS)) {
1315 tp->phy_addr = tp->pci_fn + 1;
1317 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1318 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1320 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1321 TG3_CPMU_PHY_STRAP_IS_SERDES;
1325 tp->phy_addr = TG3_PHY_MII_ADDR;
1329 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1332 tp->mdio_bus = mdiobus_alloc();
1333 if (tp->mdio_bus == NULL)
1336 tp->mdio_bus->name = "tg3 mdio bus";
1337 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1338 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1339 tp->mdio_bus->priv = tp;
1340 tp->mdio_bus->parent = &tp->pdev->dev;
1341 tp->mdio_bus->read = &tg3_mdio_read;
1342 tp->mdio_bus->write = &tg3_mdio_write;
1343 tp->mdio_bus->reset = &tg3_mdio_reset;
1344 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1345 tp->mdio_bus->irq = &tp->mdio_irq[0];
1347 for (i = 0; i < PHY_MAX_ADDR; i++)
1348 tp->mdio_bus->irq[i] = PHY_POLL;
1350 /* The bus registration will look for all the PHYs on the mdio bus.
1351 * Unfortunately, it does not ensure the PHY is powered up before
1352 * accessing the PHY ID registers. A chip reset is the
1353 * quickest way to bring the device back to an operational state..
1355 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1358 i = mdiobus_register(tp->mdio_bus);
1360 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1361 mdiobus_free(tp->mdio_bus);
1365 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1367 if (!phydev || !phydev->drv) {
1368 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1369 mdiobus_unregister(tp->mdio_bus);
1370 mdiobus_free(tp->mdio_bus);
1374 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1375 case PHY_ID_BCM57780:
1376 phydev->interface = PHY_INTERFACE_MODE_GMII;
1377 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379 case PHY_ID_BCM50610:
1380 case PHY_ID_BCM50610M:
1381 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1382 PHY_BRCM_RX_REFCLK_UNUSED |
1383 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1384 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1385 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1386 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1387 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1388 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1389 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1390 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1392 case PHY_ID_RTL8211C:
1393 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1395 case PHY_ID_RTL8201E:
1396 case PHY_ID_BCMAC131:
1397 phydev->interface = PHY_INTERFACE_MODE_MII;
1398 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1399 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1403 tg3_flag_set(tp, MDIOBUS_INITED);
1405 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1406 tg3_mdio_config_5785(tp);
1411 static void tg3_mdio_fini(struct tg3 *tp)
1413 if (tg3_flag(tp, MDIOBUS_INITED)) {
1414 tg3_flag_clear(tp, MDIOBUS_INITED);
1415 mdiobus_unregister(tp->mdio_bus);
1416 mdiobus_free(tp->mdio_bus);
1420 /* tp->lock is held. */
1421 static inline void tg3_generate_fw_event(struct tg3 *tp)
1425 val = tr32(GRC_RX_CPU_EVENT);
1426 val |= GRC_RX_CPU_DRIVER_EVENT;
1427 tw32_f(GRC_RX_CPU_EVENT, val);
1429 tp->last_event_jiffies = jiffies;
1432 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1434 /* tp->lock is held. */
1435 static void tg3_wait_for_event_ack(struct tg3 *tp)
1438 unsigned int delay_cnt;
1441 /* If enough time has passed, no wait is necessary. */
1442 time_remain = (long)(tp->last_event_jiffies + 1 +
1443 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1445 if (time_remain < 0)
1448 /* Check if we can shorten the wait time. */
1449 delay_cnt = jiffies_to_usecs(time_remain);
1450 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1451 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1452 delay_cnt = (delay_cnt >> 3) + 1;
1454 for (i = 0; i < delay_cnt; i++) {
1455 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1461 /* tp->lock is held. */
1462 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1467 if (!tg3_readphy(tp, MII_BMCR, ®))
1469 if (!tg3_readphy(tp, MII_BMSR, ®))
1470 val |= (reg & 0xffff);
1474 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1476 if (!tg3_readphy(tp, MII_LPA, ®))
1477 val |= (reg & 0xffff);
1481 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1482 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1484 if (!tg3_readphy(tp, MII_STAT1000, ®))
1485 val |= (reg & 0xffff);
1489 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1496 /* tp->lock is held. */
1497 static void tg3_ump_link_report(struct tg3 *tp)
1501 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1504 tg3_phy_gather_ump_data(tp, data);
1506 tg3_wait_for_event_ack(tp);
1508 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1509 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1510 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1511 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1512 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1513 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1515 tg3_generate_fw_event(tp);
1518 /* tp->lock is held. */
1519 static void tg3_stop_fw(struct tg3 *tp)
1521 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1522 /* Wait for RX cpu to ACK the previous event. */
1523 tg3_wait_for_event_ack(tp);
1525 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1527 tg3_generate_fw_event(tp);
1529 /* Wait for RX cpu to ACK this event. */
1530 tg3_wait_for_event_ack(tp);
1534 /* tp->lock is held. */
1535 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1537 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1538 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1540 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1542 case RESET_KIND_INIT:
1543 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1547 case RESET_KIND_SHUTDOWN:
1548 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1552 case RESET_KIND_SUSPEND:
1553 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1562 if (kind == RESET_KIND_INIT ||
1563 kind == RESET_KIND_SUSPEND)
1564 tg3_ape_driver_state_change(tp, kind);
1567 /* tp->lock is held. */
1568 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1570 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1572 case RESET_KIND_INIT:
1573 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1574 DRV_STATE_START_DONE);
1577 case RESET_KIND_SHUTDOWN:
1578 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1579 DRV_STATE_UNLOAD_DONE);
1587 if (kind == RESET_KIND_SHUTDOWN)
1588 tg3_ape_driver_state_change(tp, kind);
1591 /* tp->lock is held. */
1592 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1594 if (tg3_flag(tp, ENABLE_ASF)) {
1596 case RESET_KIND_INIT:
1597 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1601 case RESET_KIND_SHUTDOWN:
1602 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1606 case RESET_KIND_SUSPEND:
1607 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1617 static int tg3_poll_fw(struct tg3 *tp)
1622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1623 /* Wait up to 20ms for init done. */
1624 for (i = 0; i < 200; i++) {
1625 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1632 /* Wait for firmware initialization to complete. */
1633 for (i = 0; i < 100000; i++) {
1634 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1635 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1640 /* Chip might not be fitted with firmware. Some Sun onboard
1641 * parts are configured like that. So don't signal the timeout
1642 * of the above loop as an error, but do report the lack of
1643 * running firmware once.
1645 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1646 tg3_flag_set(tp, NO_FWARE_REPORTED);
1648 netdev_info(tp->dev, "No firmware running\n");
1651 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1652 /* The 57765 A0 needs a little more
1653 * time to do some important work.
1661 static void tg3_link_report(struct tg3 *tp)
1663 if (!netif_carrier_ok(tp->dev)) {
1664 netif_info(tp, link, tp->dev, "Link is down\n");
1665 tg3_ump_link_report(tp);
1666 } else if (netif_msg_link(tp)) {
1667 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1668 (tp->link_config.active_speed == SPEED_1000 ?
1670 (tp->link_config.active_speed == SPEED_100 ?
1672 (tp->link_config.active_duplex == DUPLEX_FULL ?
1675 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1676 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1678 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1681 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1682 netdev_info(tp->dev, "EEE is %s\n",
1683 tp->setlpicnt ? "enabled" : "disabled");
1685 tg3_ump_link_report(tp);
1689 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1693 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1694 miireg = ADVERTISE_1000XPAUSE;
1695 else if (flow_ctrl & FLOW_CTRL_TX)
1696 miireg = ADVERTISE_1000XPSE_ASYM;
1697 else if (flow_ctrl & FLOW_CTRL_RX)
1698 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1705 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1709 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1710 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1711 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1712 if (lcladv & ADVERTISE_1000XPAUSE)
1714 if (rmtadv & ADVERTISE_1000XPAUSE)
1721 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1725 u32 old_rx_mode = tp->rx_mode;
1726 u32 old_tx_mode = tp->tx_mode;
1728 if (tg3_flag(tp, USE_PHYLIB))
1729 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1731 autoneg = tp->link_config.autoneg;
1733 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1734 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1735 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1737 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1739 flowctrl = tp->link_config.flowctrl;
1741 tp->link_config.active_flowctrl = flowctrl;
1743 if (flowctrl & FLOW_CTRL_RX)
1744 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1746 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1748 if (old_rx_mode != tp->rx_mode)
1749 tw32_f(MAC_RX_MODE, tp->rx_mode);
1751 if (flowctrl & FLOW_CTRL_TX)
1752 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1754 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1756 if (old_tx_mode != tp->tx_mode)
1757 tw32_f(MAC_TX_MODE, tp->tx_mode);
1760 static void tg3_adjust_link(struct net_device *dev)
1762 u8 oldflowctrl, linkmesg = 0;
1763 u32 mac_mode, lcl_adv, rmt_adv;
1764 struct tg3 *tp = netdev_priv(dev);
1765 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1767 spin_lock_bh(&tp->lock);
1769 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1770 MAC_MODE_HALF_DUPLEX);
1772 oldflowctrl = tp->link_config.active_flowctrl;
1778 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1779 mac_mode |= MAC_MODE_PORT_MODE_MII;
1780 else if (phydev->speed == SPEED_1000 ||
1781 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1782 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1784 mac_mode |= MAC_MODE_PORT_MODE_MII;
1786 if (phydev->duplex == DUPLEX_HALF)
1787 mac_mode |= MAC_MODE_HALF_DUPLEX;
1789 lcl_adv = mii_advertise_flowctrl(
1790 tp->link_config.flowctrl);
1793 rmt_adv = LPA_PAUSE_CAP;
1794 if (phydev->asym_pause)
1795 rmt_adv |= LPA_PAUSE_ASYM;
1798 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1800 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1802 if (mac_mode != tp->mac_mode) {
1803 tp->mac_mode = mac_mode;
1804 tw32_f(MAC_MODE, tp->mac_mode);
1808 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1809 if (phydev->speed == SPEED_10)
1811 MAC_MI_STAT_10MBPS_MODE |
1812 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1814 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1817 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1818 tw32(MAC_TX_LENGTHS,
1819 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1820 (6 << TX_LENGTHS_IPG_SHIFT) |
1821 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1823 tw32(MAC_TX_LENGTHS,
1824 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1825 (6 << TX_LENGTHS_IPG_SHIFT) |
1826 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1828 if (phydev->link != tp->old_link ||
1829 phydev->speed != tp->link_config.active_speed ||
1830 phydev->duplex != tp->link_config.active_duplex ||
1831 oldflowctrl != tp->link_config.active_flowctrl)
1834 tp->old_link = phydev->link;
1835 tp->link_config.active_speed = phydev->speed;
1836 tp->link_config.active_duplex = phydev->duplex;
1838 spin_unlock_bh(&tp->lock);
1841 tg3_link_report(tp);
1844 static int tg3_phy_init(struct tg3 *tp)
1846 struct phy_device *phydev;
1848 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1851 /* Bring the PHY back to a known state. */
1854 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1856 /* Attach the MAC to the PHY. */
1857 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1858 phydev->dev_flags, phydev->interface);
1859 if (IS_ERR(phydev)) {
1860 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1861 return PTR_ERR(phydev);
1864 /* Mask with MAC supported features. */
1865 switch (phydev->interface) {
1866 case PHY_INTERFACE_MODE_GMII:
1867 case PHY_INTERFACE_MODE_RGMII:
1868 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1869 phydev->supported &= (PHY_GBIT_FEATURES |
1871 SUPPORTED_Asym_Pause);
1875 case PHY_INTERFACE_MODE_MII:
1876 phydev->supported &= (PHY_BASIC_FEATURES |
1878 SUPPORTED_Asym_Pause);
1881 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1885 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1887 phydev->advertising = phydev->supported;
1892 static void tg3_phy_start(struct tg3 *tp)
1894 struct phy_device *phydev;
1896 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1899 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1901 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1902 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1903 phydev->speed = tp->link_config.speed;
1904 phydev->duplex = tp->link_config.duplex;
1905 phydev->autoneg = tp->link_config.autoneg;
1906 phydev->advertising = tp->link_config.advertising;
1911 phy_start_aneg(phydev);
1914 static void tg3_phy_stop(struct tg3 *tp)
1916 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1919 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1922 static void tg3_phy_fini(struct tg3 *tp)
1924 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1925 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1926 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1930 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1935 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1938 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1939 /* Cannot do read-modify-write on 5401 */
1940 err = tg3_phy_auxctl_write(tp,
1941 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1942 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1947 err = tg3_phy_auxctl_read(tp,
1948 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1952 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1953 err = tg3_phy_auxctl_write(tp,
1954 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1960 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1964 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1967 tg3_writephy(tp, MII_TG3_FET_TEST,
1968 phytest | MII_TG3_FET_SHADOW_EN);
1969 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1971 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1973 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1974 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1976 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1980 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1984 if (!tg3_flag(tp, 5705_PLUS) ||
1985 (tg3_flag(tp, 5717_PLUS) &&
1986 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1989 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1990 tg3_phy_fet_toggle_apd(tp, enable);
1994 reg = MII_TG3_MISC_SHDW_WREN |
1995 MII_TG3_MISC_SHDW_SCR5_SEL |
1996 MII_TG3_MISC_SHDW_SCR5_LPED |
1997 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1998 MII_TG3_MISC_SHDW_SCR5_SDTL |
1999 MII_TG3_MISC_SHDW_SCR5_C125OE;
2000 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2001 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2003 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2006 reg = MII_TG3_MISC_SHDW_WREN |
2007 MII_TG3_MISC_SHDW_APD_SEL |
2008 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2010 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2012 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2015 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2019 if (!tg3_flag(tp, 5705_PLUS) ||
2020 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2023 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2026 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2027 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2029 tg3_writephy(tp, MII_TG3_FET_TEST,
2030 ephy | MII_TG3_FET_SHADOW_EN);
2031 if (!tg3_readphy(tp, reg, &phy)) {
2033 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2035 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2036 tg3_writephy(tp, reg, phy);
2038 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2043 ret = tg3_phy_auxctl_read(tp,
2044 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2047 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2049 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2050 tg3_phy_auxctl_write(tp,
2051 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2056 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2061 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2064 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2066 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2067 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2070 static void tg3_phy_apply_otp(struct tg3 *tp)
2079 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2082 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2083 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2084 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2086 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2087 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2088 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2090 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2091 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2092 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2094 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2095 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2097 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2098 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2100 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2101 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2102 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2104 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2107 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2111 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2116 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2117 current_link_up == 1 &&
2118 tp->link_config.active_duplex == DUPLEX_FULL &&
2119 (tp->link_config.active_speed == SPEED_100 ||
2120 tp->link_config.active_speed == SPEED_1000)) {
2123 if (tp->link_config.active_speed == SPEED_1000)
2124 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2126 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2128 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2130 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2131 TG3_CL45_D7_EEERES_STAT, &val);
2133 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2134 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2138 if (!tp->setlpicnt) {
2139 if (current_link_up == 1 &&
2140 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2141 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2142 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2145 val = tr32(TG3_CPMU_EEE_MODE);
2146 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2150 static void tg3_phy_eee_enable(struct tg3 *tp)
2154 if (tp->link_config.active_speed == SPEED_1000 &&
2155 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2156 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2157 tg3_flag(tp, 57765_CLASS)) &&
2158 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2159 val = MII_TG3_DSP_TAP26_ALNOKO |
2160 MII_TG3_DSP_TAP26_RMRXSTO;
2161 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2162 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2165 val = tr32(TG3_CPMU_EEE_MODE);
2166 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2169 static int tg3_wait_macro_done(struct tg3 *tp)
2176 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2177 if ((tmp32 & 0x1000) == 0)
2187 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2189 static const u32 test_pat[4][6] = {
2190 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2191 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2192 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2193 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2197 for (chan = 0; chan < 4; chan++) {
2200 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2201 (chan * 0x2000) | 0x0200);
2202 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2204 for (i = 0; i < 6; i++)
2205 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2208 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2209 if (tg3_wait_macro_done(tp)) {
2214 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2215 (chan * 0x2000) | 0x0200);
2216 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2217 if (tg3_wait_macro_done(tp)) {
2222 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2223 if (tg3_wait_macro_done(tp)) {
2228 for (i = 0; i < 6; i += 2) {
2231 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2232 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2233 tg3_wait_macro_done(tp)) {
2239 if (low != test_pat[chan][i] ||
2240 high != test_pat[chan][i+1]) {
2241 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2242 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2243 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2253 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2257 for (chan = 0; chan < 4; chan++) {
2260 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2261 (chan * 0x2000) | 0x0200);
2262 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2263 for (i = 0; i < 6; i++)
2264 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2265 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2266 if (tg3_wait_macro_done(tp))
2273 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2275 u32 reg32, phy9_orig;
2276 int retries, do_phy_reset, err;
2282 err = tg3_bmcr_reset(tp);
2288 /* Disable transmitter and interrupt. */
2289 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2293 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2295 /* Set full-duplex, 1000 mbps. */
2296 tg3_writephy(tp, MII_BMCR,
2297 BMCR_FULLDPLX | BMCR_SPEED1000);
2299 /* Set to master mode. */
2300 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2303 tg3_writephy(tp, MII_CTRL1000,
2304 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2306 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2310 /* Block the PHY control access. */
2311 tg3_phydsp_write(tp, 0x8005, 0x0800);
2313 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2316 } while (--retries);
2318 err = tg3_phy_reset_chanpat(tp);
2322 tg3_phydsp_write(tp, 0x8005, 0x0000);
2324 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2325 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2327 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2329 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2331 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2333 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2340 /* This will reset the tigon3 PHY if there is no valid
2341 * link unless the FORCE argument is non-zero.
2343 static int tg3_phy_reset(struct tg3 *tp)
2348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2349 val = tr32(GRC_MISC_CFG);
2350 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2353 err = tg3_readphy(tp, MII_BMSR, &val);
2354 err |= tg3_readphy(tp, MII_BMSR, &val);
2358 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2359 netif_carrier_off(tp->dev);
2360 tg3_link_report(tp);
2363 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2364 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2366 err = tg3_phy_reset_5703_4_5(tp);
2373 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2374 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2375 cpmuctrl = tr32(TG3_CPMU_CTRL);
2376 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2378 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2381 err = tg3_bmcr_reset(tp);
2385 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2386 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2387 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2389 tw32(TG3_CPMU_CTRL, cpmuctrl);
2392 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2393 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2394 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2395 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2396 CPMU_LSPD_1000MB_MACCLK_12_5) {
2397 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2399 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2403 if (tg3_flag(tp, 5717_PLUS) &&
2404 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2407 tg3_phy_apply_otp(tp);
2409 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2410 tg3_phy_toggle_apd(tp, true);
2412 tg3_phy_toggle_apd(tp, false);
2415 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2416 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2417 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2418 tg3_phydsp_write(tp, 0x000a, 0x0323);
2419 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2422 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2423 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2424 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2427 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2428 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2429 tg3_phydsp_write(tp, 0x000a, 0x310b);
2430 tg3_phydsp_write(tp, 0x201f, 0x9506);
2431 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2432 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2434 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2435 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2436 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2437 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2438 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2439 tg3_writephy(tp, MII_TG3_TEST1,
2440 MII_TG3_TEST1_TRIM_EN | 0x4);
2442 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2444 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2448 /* Set Extended packet length bit (bit 14) on all chips that */
2449 /* support jumbo frames */
2450 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2451 /* Cannot do read-modify-write on 5401 */
2452 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2453 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2454 /* Set bit 14 with read-modify-write to preserve other bits */
2455 err = tg3_phy_auxctl_read(tp,
2456 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2458 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2459 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2462 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2463 * jumbo frames transmission.
2465 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2466 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2467 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2468 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2472 /* adjust output voltage */
2473 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2476 tg3_phy_toggle_automdix(tp, 1);
2477 tg3_phy_set_wirespeed(tp);
2481 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2482 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2483 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2484 TG3_GPIO_MSG_NEED_VAUX)
2485 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2486 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2487 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2488 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2489 (TG3_GPIO_MSG_DRVR_PRES << 12))
2491 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2492 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2493 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2494 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2495 (TG3_GPIO_MSG_NEED_VAUX << 12))
2497 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2502 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2503 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2505 status = tr32(TG3_CPMU_DRV_STATUS);
2507 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2508 status &= ~(TG3_GPIO_MSG_MASK << shift);
2509 status |= (newstat << shift);
2511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2512 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2513 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2515 tw32(TG3_CPMU_DRV_STATUS, status);
2517 return status >> TG3_APE_GPIO_MSG_SHIFT;
2520 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2522 if (!tg3_flag(tp, IS_NIC))
2525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2528 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2531 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2533 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2534 TG3_GRC_LCLCTL_PWRSW_DELAY);
2536 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2538 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2539 TG3_GRC_LCLCTL_PWRSW_DELAY);
2545 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2549 if (!tg3_flag(tp, IS_NIC) ||
2550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2554 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2556 tw32_wait_f(GRC_LOCAL_CTRL,
2557 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2558 TG3_GRC_LCLCTL_PWRSW_DELAY);
2560 tw32_wait_f(GRC_LOCAL_CTRL,
2562 TG3_GRC_LCLCTL_PWRSW_DELAY);
2564 tw32_wait_f(GRC_LOCAL_CTRL,
2565 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2566 TG3_GRC_LCLCTL_PWRSW_DELAY);
2569 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2571 if (!tg3_flag(tp, IS_NIC))
2574 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2575 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2576 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2577 (GRC_LCLCTRL_GPIO_OE0 |
2578 GRC_LCLCTRL_GPIO_OE1 |
2579 GRC_LCLCTRL_GPIO_OE2 |
2580 GRC_LCLCTRL_GPIO_OUTPUT0 |
2581 GRC_LCLCTRL_GPIO_OUTPUT1),
2582 TG3_GRC_LCLCTL_PWRSW_DELAY);
2583 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2584 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2585 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2586 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2587 GRC_LCLCTRL_GPIO_OE1 |
2588 GRC_LCLCTRL_GPIO_OE2 |
2589 GRC_LCLCTRL_GPIO_OUTPUT0 |
2590 GRC_LCLCTRL_GPIO_OUTPUT1 |
2592 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2593 TG3_GRC_LCLCTL_PWRSW_DELAY);
2595 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2596 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2597 TG3_GRC_LCLCTL_PWRSW_DELAY);
2599 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2600 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2601 TG3_GRC_LCLCTL_PWRSW_DELAY);
2604 u32 grc_local_ctrl = 0;
2606 /* Workaround to prevent overdrawing Amps. */
2607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2608 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2609 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2611 TG3_GRC_LCLCTL_PWRSW_DELAY);
2614 /* On 5753 and variants, GPIO2 cannot be used. */
2615 no_gpio2 = tp->nic_sram_data_cfg &
2616 NIC_SRAM_DATA_CFG_NO_GPIO2;
2618 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2619 GRC_LCLCTRL_GPIO_OE1 |
2620 GRC_LCLCTRL_GPIO_OE2 |
2621 GRC_LCLCTRL_GPIO_OUTPUT1 |
2622 GRC_LCLCTRL_GPIO_OUTPUT2;
2624 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2625 GRC_LCLCTRL_GPIO_OUTPUT2);
2627 tw32_wait_f(GRC_LOCAL_CTRL,
2628 tp->grc_local_ctrl | grc_local_ctrl,
2629 TG3_GRC_LCLCTL_PWRSW_DELAY);
2631 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2633 tw32_wait_f(GRC_LOCAL_CTRL,
2634 tp->grc_local_ctrl | grc_local_ctrl,
2635 TG3_GRC_LCLCTL_PWRSW_DELAY);
2638 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2639 tw32_wait_f(GRC_LOCAL_CTRL,
2640 tp->grc_local_ctrl | grc_local_ctrl,
2641 TG3_GRC_LCLCTL_PWRSW_DELAY);
2646 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2650 /* Serialize power state transitions */
2651 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2654 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2655 msg = TG3_GPIO_MSG_NEED_VAUX;
2657 msg = tg3_set_function_status(tp, msg);
2659 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2662 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2663 tg3_pwrsrc_switch_to_vaux(tp);
2665 tg3_pwrsrc_die_with_vmain(tp);
2668 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2671 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2673 bool need_vaux = false;
2675 /* The GPIOs do something completely different on 57765. */
2676 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2679 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2680 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2681 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2682 tg3_frob_aux_power_5717(tp, include_wol ?
2683 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2687 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2688 struct net_device *dev_peer;
2690 dev_peer = pci_get_drvdata(tp->pdev_peer);
2692 /* remove_one() may have been run on the peer. */
2694 struct tg3 *tp_peer = netdev_priv(dev_peer);
2696 if (tg3_flag(tp_peer, INIT_COMPLETE))
2699 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2700 tg3_flag(tp_peer, ENABLE_ASF))
2705 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2706 tg3_flag(tp, ENABLE_ASF))
2710 tg3_pwrsrc_switch_to_vaux(tp);
2712 tg3_pwrsrc_die_with_vmain(tp);
2715 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2717 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2719 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2720 if (speed != SPEED_10)
2722 } else if (speed == SPEED_10)
2728 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2732 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2733 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2734 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2735 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2738 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2739 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2740 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2745 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2747 val = tr32(GRC_MISC_CFG);
2748 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2751 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2753 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2756 tg3_writephy(tp, MII_ADVERTISE, 0);
2757 tg3_writephy(tp, MII_BMCR,
2758 BMCR_ANENABLE | BMCR_ANRESTART);
2760 tg3_writephy(tp, MII_TG3_FET_TEST,
2761 phytest | MII_TG3_FET_SHADOW_EN);
2762 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2763 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2765 MII_TG3_FET_SHDW_AUXMODE4,
2768 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2771 } else if (do_low_power) {
2772 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2773 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2775 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2776 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2777 MII_TG3_AUXCTL_PCTL_VREG_11V;
2778 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2781 /* The PHY should not be powered down on some chips because
2784 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2785 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2786 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2787 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2788 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2792 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2793 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2794 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2795 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2796 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2797 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2800 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2803 /* tp->lock is held. */
2804 static int tg3_nvram_lock(struct tg3 *tp)
2806 if (tg3_flag(tp, NVRAM)) {
2809 if (tp->nvram_lock_cnt == 0) {
2810 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2811 for (i = 0; i < 8000; i++) {
2812 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2817 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2821 tp->nvram_lock_cnt++;
2826 /* tp->lock is held. */
2827 static void tg3_nvram_unlock(struct tg3 *tp)
2829 if (tg3_flag(tp, NVRAM)) {
2830 if (tp->nvram_lock_cnt > 0)
2831 tp->nvram_lock_cnt--;
2832 if (tp->nvram_lock_cnt == 0)
2833 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2837 /* tp->lock is held. */
2838 static void tg3_enable_nvram_access(struct tg3 *tp)
2840 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2841 u32 nvaccess = tr32(NVRAM_ACCESS);
2843 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2847 /* tp->lock is held. */
2848 static void tg3_disable_nvram_access(struct tg3 *tp)
2850 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2851 u32 nvaccess = tr32(NVRAM_ACCESS);
2853 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2857 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2858 u32 offset, u32 *val)
2863 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2866 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2867 EEPROM_ADDR_DEVID_MASK |
2869 tw32(GRC_EEPROM_ADDR,
2871 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2872 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2873 EEPROM_ADDR_ADDR_MASK) |
2874 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2876 for (i = 0; i < 1000; i++) {
2877 tmp = tr32(GRC_EEPROM_ADDR);
2879 if (tmp & EEPROM_ADDR_COMPLETE)
2883 if (!(tmp & EEPROM_ADDR_COMPLETE))
2886 tmp = tr32(GRC_EEPROM_DATA);
2889 * The data will always be opposite the native endian
2890 * format. Perform a blind byteswap to compensate.
2897 #define NVRAM_CMD_TIMEOUT 10000
2899 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2903 tw32(NVRAM_CMD, nvram_cmd);
2904 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2906 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2912 if (i == NVRAM_CMD_TIMEOUT)
2918 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2920 if (tg3_flag(tp, NVRAM) &&
2921 tg3_flag(tp, NVRAM_BUFFERED) &&
2922 tg3_flag(tp, FLASH) &&
2923 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2924 (tp->nvram_jedecnum == JEDEC_ATMEL))
2926 addr = ((addr / tp->nvram_pagesize) <<
2927 ATMEL_AT45DB0X1B_PAGE_POS) +
2928 (addr % tp->nvram_pagesize);
2933 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2935 if (tg3_flag(tp, NVRAM) &&
2936 tg3_flag(tp, NVRAM_BUFFERED) &&
2937 tg3_flag(tp, FLASH) &&
2938 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2939 (tp->nvram_jedecnum == JEDEC_ATMEL))
2941 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2942 tp->nvram_pagesize) +
2943 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2948 /* NOTE: Data read in from NVRAM is byteswapped according to
2949 * the byteswapping settings for all other register accesses.
2950 * tg3 devices are BE devices, so on a BE machine, the data
2951 * returned will be exactly as it is seen in NVRAM. On a LE
2952 * machine, the 32-bit value will be byteswapped.
2954 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2958 if (!tg3_flag(tp, NVRAM))
2959 return tg3_nvram_read_using_eeprom(tp, offset, val);
2961 offset = tg3_nvram_phys_addr(tp, offset);
2963 if (offset > NVRAM_ADDR_MSK)
2966 ret = tg3_nvram_lock(tp);
2970 tg3_enable_nvram_access(tp);
2972 tw32(NVRAM_ADDR, offset);
2973 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2974 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2977 *val = tr32(NVRAM_RDDATA);
2979 tg3_disable_nvram_access(tp);
2981 tg3_nvram_unlock(tp);
2986 /* Ensures NVRAM data is in bytestream format. */
2987 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2990 int res = tg3_nvram_read(tp, offset, &v);
2992 *val = cpu_to_be32(v);
2996 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2997 u32 offset, u32 len, u8 *buf)
3002 for (i = 0; i < len; i += 4) {
3008 memcpy(&data, buf + i, 4);
3011 * The SEEPROM interface expects the data to always be opposite
3012 * the native endian format. We accomplish this by reversing
3013 * all the operations that would have been performed on the
3014 * data from a call to tg3_nvram_read_be32().
3016 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3018 val = tr32(GRC_EEPROM_ADDR);
3019 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3021 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3023 tw32(GRC_EEPROM_ADDR, val |
3024 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3025 (addr & EEPROM_ADDR_ADDR_MASK) |
3029 for (j = 0; j < 1000; j++) {
3030 val = tr32(GRC_EEPROM_ADDR);
3032 if (val & EEPROM_ADDR_COMPLETE)
3036 if (!(val & EEPROM_ADDR_COMPLETE)) {
3045 /* offset and length are dword aligned */
3046 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3050 u32 pagesize = tp->nvram_pagesize;
3051 u32 pagemask = pagesize - 1;
3055 tmp = kmalloc(pagesize, GFP_KERNEL);
3061 u32 phy_addr, page_off, size;
3063 phy_addr = offset & ~pagemask;
3065 for (j = 0; j < pagesize; j += 4) {
3066 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3067 (__be32 *) (tmp + j));
3074 page_off = offset & pagemask;
3081 memcpy(tmp + page_off, buf, size);
3083 offset = offset + (pagesize - page_off);
3085 tg3_enable_nvram_access(tp);
3088 * Before we can erase the flash page, we need
3089 * to issue a special "write enable" command.
3091 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3093 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3096 /* Erase the target page */
3097 tw32(NVRAM_ADDR, phy_addr);
3099 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3100 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3102 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3105 /* Issue another write enable to start the write. */
3106 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3108 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3111 for (j = 0; j < pagesize; j += 4) {
3114 data = *((__be32 *) (tmp + j));
3116 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3118 tw32(NVRAM_ADDR, phy_addr + j);
3120 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3124 nvram_cmd |= NVRAM_CMD_FIRST;
3125 else if (j == (pagesize - 4))
3126 nvram_cmd |= NVRAM_CMD_LAST;
3128 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3136 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3137 tg3_nvram_exec_cmd(tp, nvram_cmd);
3144 /* offset and length are dword aligned */
3145 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3150 for (i = 0; i < len; i += 4, offset += 4) {
3151 u32 page_off, phy_addr, nvram_cmd;
3154 memcpy(&data, buf + i, 4);
3155 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3157 page_off = offset % tp->nvram_pagesize;
3159 phy_addr = tg3_nvram_phys_addr(tp, offset);
3161 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3163 if (page_off == 0 || i == 0)
3164 nvram_cmd |= NVRAM_CMD_FIRST;
3165 if (page_off == (tp->nvram_pagesize - 4))
3166 nvram_cmd |= NVRAM_CMD_LAST;
3169 nvram_cmd |= NVRAM_CMD_LAST;
3171 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3172 !tg3_flag(tp, FLASH) ||
3173 !tg3_flag(tp, 57765_PLUS))
3174 tw32(NVRAM_ADDR, phy_addr);
3176 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3177 !tg3_flag(tp, 5755_PLUS) &&
3178 (tp->nvram_jedecnum == JEDEC_ST) &&
3179 (nvram_cmd & NVRAM_CMD_FIRST)) {
3182 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3183 ret = tg3_nvram_exec_cmd(tp, cmd);
3187 if (!tg3_flag(tp, FLASH)) {
3188 /* We always do complete word writes to eeprom. */
3189 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3192 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3199 /* offset and length are dword aligned */
3200 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3204 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3205 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3206 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3210 if (!tg3_flag(tp, NVRAM)) {
3211 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3215 ret = tg3_nvram_lock(tp);
3219 tg3_enable_nvram_access(tp);
3220 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3221 tw32(NVRAM_WRITE1, 0x406);
3223 grc_mode = tr32(GRC_MODE);
3224 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3226 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3227 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3230 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3234 grc_mode = tr32(GRC_MODE);
3235 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3237 tg3_disable_nvram_access(tp);
3238 tg3_nvram_unlock(tp);
3241 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3242 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3249 #define RX_CPU_SCRATCH_BASE 0x30000
3250 #define RX_CPU_SCRATCH_SIZE 0x04000
3251 #define TX_CPU_SCRATCH_BASE 0x34000
3252 #define TX_CPU_SCRATCH_SIZE 0x04000
3254 /* tp->lock is held. */
3255 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3259 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3262 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3264 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3267 if (offset == RX_CPU_BASE) {
3268 for (i = 0; i < 10000; i++) {
3269 tw32(offset + CPU_STATE, 0xffffffff);
3270 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3271 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3275 tw32(offset + CPU_STATE, 0xffffffff);
3276 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3279 for (i = 0; i < 10000; i++) {
3280 tw32(offset + CPU_STATE, 0xffffffff);
3281 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3282 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3288 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3289 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3293 /* Clear firmware's nvram arbitration. */
3294 if (tg3_flag(tp, NVRAM))
3295 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3300 unsigned int fw_base;
3301 unsigned int fw_len;
3302 const __be32 *fw_data;
3305 /* tp->lock is held. */
3306 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3307 u32 cpu_scratch_base, int cpu_scratch_size,
3308 struct fw_info *info)
3310 int err, lock_err, i;
3311 void (*write_op)(struct tg3 *, u32, u32);
3313 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3315 "%s: Trying to load TX cpu firmware which is 5705\n",
3320 if (tg3_flag(tp, 5705_PLUS))
3321 write_op = tg3_write_mem;
3323 write_op = tg3_write_indirect_reg32;
3325 /* It is possible that bootcode is still loading at this point.
3326 * Get the nvram lock first before halting the cpu.
3328 lock_err = tg3_nvram_lock(tp);
3329 err = tg3_halt_cpu(tp, cpu_base);
3331 tg3_nvram_unlock(tp);
3335 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3336 write_op(tp, cpu_scratch_base + i, 0);
3337 tw32(cpu_base + CPU_STATE, 0xffffffff);
3338 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3339 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3340 write_op(tp, (cpu_scratch_base +
3341 (info->fw_base & 0xffff) +
3343 be32_to_cpu(info->fw_data[i]));
3351 /* tp->lock is held. */
3352 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3354 struct fw_info info;
3355 const __be32 *fw_data;
3358 fw_data = (void *)tp->fw->data;
3360 /* Firmware blob starts with version numbers, followed by
3361 start address and length. We are setting complete length.
3362 length = end_address_of_bss - start_address_of_text.
3363 Remainder is the blob to be loaded contiguously
3364 from start address. */
3366 info.fw_base = be32_to_cpu(fw_data[1]);
3367 info.fw_len = tp->fw->size - 12;
3368 info.fw_data = &fw_data[3];
3370 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3371 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3376 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3377 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3382 /* Now startup only the RX cpu. */
3383 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3384 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3386 for (i = 0; i < 5; i++) {
3387 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3389 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3390 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3391 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3395 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3396 "should be %08x\n", __func__,
3397 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3400 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3401 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3406 /* tp->lock is held. */
3407 static int tg3_load_tso_firmware(struct tg3 *tp)
3409 struct fw_info info;
3410 const __be32 *fw_data;
3411 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3414 if (tg3_flag(tp, HW_TSO_1) ||
3415 tg3_flag(tp, HW_TSO_2) ||
3416 tg3_flag(tp, HW_TSO_3))
3419 fw_data = (void *)tp->fw->data;
3421 /* Firmware blob starts with version numbers, followed by
3422 start address and length. We are setting complete length.
3423 length = end_address_of_bss - start_address_of_text.
3424 Remainder is the blob to be loaded contiguously
3425 from start address. */
3427 info.fw_base = be32_to_cpu(fw_data[1]);
3428 cpu_scratch_size = tp->fw_len;
3429 info.fw_len = tp->fw->size - 12;
3430 info.fw_data = &fw_data[3];
3432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3433 cpu_base = RX_CPU_BASE;
3434 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3436 cpu_base = TX_CPU_BASE;
3437 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3438 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3441 err = tg3_load_firmware_cpu(tp, cpu_base,
3442 cpu_scratch_base, cpu_scratch_size,
3447 /* Now startup the cpu. */
3448 tw32(cpu_base + CPU_STATE, 0xffffffff);
3449 tw32_f(cpu_base + CPU_PC, info.fw_base);
3451 for (i = 0; i < 5; i++) {
3452 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3454 tw32(cpu_base + CPU_STATE, 0xffffffff);
3455 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3456 tw32_f(cpu_base + CPU_PC, info.fw_base);
3461 "%s fails to set CPU PC, is %08x should be %08x\n",
3462 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3465 tw32(cpu_base + CPU_STATE, 0xffffffff);
3466 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3471 /* tp->lock is held. */
3472 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3474 u32 addr_high, addr_low;
3477 addr_high = ((tp->dev->dev_addr[0] << 8) |
3478 tp->dev->dev_addr[1]);
3479 addr_low = ((tp->dev->dev_addr[2] << 24) |
3480 (tp->dev->dev_addr[3] << 16) |
3481 (tp->dev->dev_addr[4] << 8) |
3482 (tp->dev->dev_addr[5] << 0));
3483 for (i = 0; i < 4; i++) {
3484 if (i == 1 && skip_mac_1)
3486 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3487 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3490 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3491 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3492 for (i = 0; i < 12; i++) {
3493 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3494 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3498 addr_high = (tp->dev->dev_addr[0] +
3499 tp->dev->dev_addr[1] +
3500 tp->dev->dev_addr[2] +
3501 tp->dev->dev_addr[3] +
3502 tp->dev->dev_addr[4] +
3503 tp->dev->dev_addr[5]) &
3504 TX_BACKOFF_SEED_MASK;
3505 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3508 static void tg3_enable_register_access(struct tg3 *tp)
3511 * Make sure register accesses (indirect or otherwise) will function
3514 pci_write_config_dword(tp->pdev,
3515 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3518 static int tg3_power_up(struct tg3 *tp)
3522 tg3_enable_register_access(tp);
3524 err = pci_set_power_state(tp->pdev, PCI_D0);
3526 /* Switch out of Vaux if it is a NIC */
3527 tg3_pwrsrc_switch_to_vmain(tp);
3529 netdev_err(tp->dev, "Transition to D0 failed\n");
3535 static int tg3_setup_phy(struct tg3 *, int);
3537 static int tg3_power_down_prepare(struct tg3 *tp)
3540 bool device_should_wake, do_low_power;
3542 tg3_enable_register_access(tp);
3544 /* Restore the CLKREQ setting. */
3545 if (tg3_flag(tp, CLKREQ_BUG)) {
3548 pci_read_config_word(tp->pdev,
3549 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3551 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3552 pci_write_config_word(tp->pdev,
3553 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3557 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3558 tw32(TG3PCI_MISC_HOST_CTRL,
3559 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3561 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3562 tg3_flag(tp, WOL_ENABLE);
3564 if (tg3_flag(tp, USE_PHYLIB)) {
3565 do_low_power = false;
3566 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3567 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3568 struct phy_device *phydev;
3569 u32 phyid, advertising;
3571 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3573 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3575 tp->link_config.speed = phydev->speed;
3576 tp->link_config.duplex = phydev->duplex;
3577 tp->link_config.autoneg = phydev->autoneg;
3578 tp->link_config.advertising = phydev->advertising;
3580 advertising = ADVERTISED_TP |
3582 ADVERTISED_Autoneg |
3583 ADVERTISED_10baseT_Half;
3585 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3586 if (tg3_flag(tp, WOL_SPEED_100MB))
3588 ADVERTISED_100baseT_Half |
3589 ADVERTISED_100baseT_Full |
3590 ADVERTISED_10baseT_Full;
3592 advertising |= ADVERTISED_10baseT_Full;
3595 phydev->advertising = advertising;
3597 phy_start_aneg(phydev);
3599 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3600 if (phyid != PHY_ID_BCMAC131) {
3601 phyid &= PHY_BCM_OUI_MASK;
3602 if (phyid == PHY_BCM_OUI_1 ||
3603 phyid == PHY_BCM_OUI_2 ||
3604 phyid == PHY_BCM_OUI_3)
3605 do_low_power = true;
3609 do_low_power = true;
3611 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3612 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3614 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3615 tg3_setup_phy(tp, 0);
3618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3621 val = tr32(GRC_VCPU_EXT_CTRL);
3622 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3623 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3627 for (i = 0; i < 200; i++) {
3628 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3629 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3634 if (tg3_flag(tp, WOL_CAP))
3635 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3636 WOL_DRV_STATE_SHUTDOWN |
3640 if (device_should_wake) {
3643 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3645 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3646 tg3_phy_auxctl_write(tp,
3647 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3648 MII_TG3_AUXCTL_PCTL_WOL_EN |
3649 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3650 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3654 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3655 mac_mode = MAC_MODE_PORT_MODE_GMII;
3657 mac_mode = MAC_MODE_PORT_MODE_MII;
3659 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3660 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3662 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3663 SPEED_100 : SPEED_10;
3664 if (tg3_5700_link_polarity(tp, speed))
3665 mac_mode |= MAC_MODE_LINK_POLARITY;
3667 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3670 mac_mode = MAC_MODE_PORT_MODE_TBI;
3673 if (!tg3_flag(tp, 5750_PLUS))
3674 tw32(MAC_LED_CTRL, tp->led_ctrl);
3676 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3677 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3678 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3679 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3681 if (tg3_flag(tp, ENABLE_APE))
3682 mac_mode |= MAC_MODE_APE_TX_EN |
3683 MAC_MODE_APE_RX_EN |
3684 MAC_MODE_TDE_ENABLE;
3686 tw32_f(MAC_MODE, mac_mode);
3689 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3693 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3694 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3695 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3698 base_val = tp->pci_clock_ctrl;
3699 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3700 CLOCK_CTRL_TXCLK_DISABLE);
3702 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3703 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3704 } else if (tg3_flag(tp, 5780_CLASS) ||
3705 tg3_flag(tp, CPMU_PRESENT) ||
3706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3708 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3709 u32 newbits1, newbits2;
3711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3713 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3714 CLOCK_CTRL_TXCLK_DISABLE |
3716 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3717 } else if (tg3_flag(tp, 5705_PLUS)) {
3718 newbits1 = CLOCK_CTRL_625_CORE;
3719 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3721 newbits1 = CLOCK_CTRL_ALTCLK;
3722 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3725 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3728 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3731 if (!tg3_flag(tp, 5705_PLUS)) {
3734 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3735 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3736 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3737 CLOCK_CTRL_TXCLK_DISABLE |
3738 CLOCK_CTRL_44MHZ_CORE);
3740 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3743 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3744 tp->pci_clock_ctrl | newbits3, 40);
3748 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3749 tg3_power_down_phy(tp, do_low_power);
3751 tg3_frob_aux_power(tp, true);
3753 /* Workaround for unstable PLL clock */
3754 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3755 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3756 u32 val = tr32(0x7d00);
3758 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3760 if (!tg3_flag(tp, ENABLE_ASF)) {
3763 err = tg3_nvram_lock(tp);
3764 tg3_halt_cpu(tp, RX_CPU_BASE);
3766 tg3_nvram_unlock(tp);
3770 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3775 static void tg3_power_down(struct tg3 *tp)
3777 tg3_power_down_prepare(tp);
3779 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3780 pci_set_power_state(tp->pdev, PCI_D3hot);
3783 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3785 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3786 case MII_TG3_AUX_STAT_10HALF:
3788 *duplex = DUPLEX_HALF;
3791 case MII_TG3_AUX_STAT_10FULL:
3793 *duplex = DUPLEX_FULL;
3796 case MII_TG3_AUX_STAT_100HALF:
3798 *duplex = DUPLEX_HALF;
3801 case MII_TG3_AUX_STAT_100FULL:
3803 *duplex = DUPLEX_FULL;
3806 case MII_TG3_AUX_STAT_1000HALF:
3807 *speed = SPEED_1000;
3808 *duplex = DUPLEX_HALF;
3811 case MII_TG3_AUX_STAT_1000FULL:
3812 *speed = SPEED_1000;
3813 *duplex = DUPLEX_FULL;
3817 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3818 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3820 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3824 *speed = SPEED_UNKNOWN;
3825 *duplex = DUPLEX_UNKNOWN;
3830 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3835 new_adv = ADVERTISE_CSMA;
3836 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3837 new_adv |= mii_advertise_flowctrl(flowctrl);
3839 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3843 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3844 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3846 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3847 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3848 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3850 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3855 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3858 tw32(TG3_CPMU_EEE_MODE,
3859 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3861 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3866 /* Advertise 100-BaseTX EEE ability */
3867 if (advertise & ADVERTISED_100baseT_Full)
3868 val |= MDIO_AN_EEE_ADV_100TX;
3869 /* Advertise 1000-BaseT EEE ability */
3870 if (advertise & ADVERTISED_1000baseT_Full)
3871 val |= MDIO_AN_EEE_ADV_1000T;
3872 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3876 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3878 case ASIC_REV_57765:
3879 case ASIC_REV_57766:
3881 /* If we advertised any eee advertisements above... */
3883 val = MII_TG3_DSP_TAP26_ALNOKO |
3884 MII_TG3_DSP_TAP26_RMRXSTO |
3885 MII_TG3_DSP_TAP26_OPCSINPT;
3886 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3889 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3890 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3891 MII_TG3_DSP_CH34TP2_HIBW01);
3894 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3903 static void tg3_phy_copper_begin(struct tg3 *tp)
3905 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3906 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3909 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3910 adv = ADVERTISED_10baseT_Half |
3911 ADVERTISED_10baseT_Full;
3912 if (tg3_flag(tp, WOL_SPEED_100MB))
3913 adv |= ADVERTISED_100baseT_Half |
3914 ADVERTISED_100baseT_Full;
3916 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3918 adv = tp->link_config.advertising;
3919 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3920 adv &= ~(ADVERTISED_1000baseT_Half |
3921 ADVERTISED_1000baseT_Full);
3923 fc = tp->link_config.flowctrl;
3926 tg3_phy_autoneg_cfg(tp, adv, fc);
3928 tg3_writephy(tp, MII_BMCR,
3929 BMCR_ANENABLE | BMCR_ANRESTART);
3932 u32 bmcr, orig_bmcr;
3934 tp->link_config.active_speed = tp->link_config.speed;
3935 tp->link_config.active_duplex = tp->link_config.duplex;
3938 switch (tp->link_config.speed) {
3944 bmcr |= BMCR_SPEED100;
3948 bmcr |= BMCR_SPEED1000;
3952 if (tp->link_config.duplex == DUPLEX_FULL)
3953 bmcr |= BMCR_FULLDPLX;
3955 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3956 (bmcr != orig_bmcr)) {
3957 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3958 for (i = 0; i < 1500; i++) {
3962 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3963 tg3_readphy(tp, MII_BMSR, &tmp))
3965 if (!(tmp & BMSR_LSTATUS)) {
3970 tg3_writephy(tp, MII_BMCR, bmcr);
3976 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3980 /* Turn off tap power management. */
3981 /* Set Extended packet length bit */
3982 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3984 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3985 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3986 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3987 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3988 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3995 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3997 u32 advmsk, tgtadv, advertising;
3999 advertising = tp->link_config.advertising;
4000 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4002 advmsk = ADVERTISE_ALL;
4003 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4004 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4005 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4008 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4011 if ((*lcladv & advmsk) != tgtadv)
4014 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4017 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4019 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4023 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4024 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4025 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4026 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4027 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4029 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4032 if (tg3_ctrl != tgtadv)
4039 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4043 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4046 if (tg3_readphy(tp, MII_STAT1000, &val))
4049 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4052 if (tg3_readphy(tp, MII_LPA, rmtadv))
4055 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4056 tp->link_config.rmt_adv = lpeth;
4061 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4063 int current_link_up;
4065 u32 lcl_adv, rmt_adv;
4073 (MAC_STATUS_SYNC_CHANGED |
4074 MAC_STATUS_CFG_CHANGED |
4075 MAC_STATUS_MI_COMPLETION |
4076 MAC_STATUS_LNKSTATE_CHANGED));
4079 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4081 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4085 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4087 /* Some third-party PHYs need to be reset on link going
4090 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4092 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4093 netif_carrier_ok(tp->dev)) {
4094 tg3_readphy(tp, MII_BMSR, &bmsr);
4095 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4096 !(bmsr & BMSR_LSTATUS))
4102 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4103 tg3_readphy(tp, MII_BMSR, &bmsr);
4104 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4105 !tg3_flag(tp, INIT_COMPLETE))
4108 if (!(bmsr & BMSR_LSTATUS)) {
4109 err = tg3_init_5401phy_dsp(tp);
4113 tg3_readphy(tp, MII_BMSR, &bmsr);
4114 for (i = 0; i < 1000; i++) {
4116 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4117 (bmsr & BMSR_LSTATUS)) {
4123 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4124 TG3_PHY_REV_BCM5401_B0 &&
4125 !(bmsr & BMSR_LSTATUS) &&
4126 tp->link_config.active_speed == SPEED_1000) {
4127 err = tg3_phy_reset(tp);
4129 err = tg3_init_5401phy_dsp(tp);
4134 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4135 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4136 /* 5701 {A0,B0} CRC bug workaround */
4137 tg3_writephy(tp, 0x15, 0x0a75);
4138 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4139 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4140 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4143 /* Clear pending interrupts... */
4144 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4145 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4147 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4148 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4149 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4150 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4152 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4153 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4154 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4155 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4156 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4158 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4161 current_link_up = 0;
4162 current_speed = SPEED_UNKNOWN;
4163 current_duplex = DUPLEX_UNKNOWN;
4164 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4165 tp->link_config.rmt_adv = 0;
4167 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4168 err = tg3_phy_auxctl_read(tp,
4169 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4171 if (!err && !(val & (1 << 10))) {
4172 tg3_phy_auxctl_write(tp,
4173 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4180 for (i = 0; i < 100; i++) {
4181 tg3_readphy(tp, MII_BMSR, &bmsr);
4182 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4183 (bmsr & BMSR_LSTATUS))
4188 if (bmsr & BMSR_LSTATUS) {
4191 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4192 for (i = 0; i < 2000; i++) {
4194 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4199 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4204 for (i = 0; i < 200; i++) {
4205 tg3_readphy(tp, MII_BMCR, &bmcr);
4206 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4208 if (bmcr && bmcr != 0x7fff)
4216 tp->link_config.active_speed = current_speed;
4217 tp->link_config.active_duplex = current_duplex;
4219 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4220 if ((bmcr & BMCR_ANENABLE) &&
4221 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4222 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4223 current_link_up = 1;
4225 if (!(bmcr & BMCR_ANENABLE) &&
4226 tp->link_config.speed == current_speed &&
4227 tp->link_config.duplex == current_duplex &&
4228 tp->link_config.flowctrl ==
4229 tp->link_config.active_flowctrl) {
4230 current_link_up = 1;
4234 if (current_link_up == 1 &&
4235 tp->link_config.active_duplex == DUPLEX_FULL) {
4238 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4239 reg = MII_TG3_FET_GEN_STAT;
4240 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4242 reg = MII_TG3_EXT_STAT;
4243 bit = MII_TG3_EXT_STAT_MDIX;
4246 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4247 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4249 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4254 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4255 tg3_phy_copper_begin(tp);
4257 tg3_readphy(tp, MII_BMSR, &bmsr);
4258 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4259 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4260 current_link_up = 1;
4263 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4264 if (current_link_up == 1) {
4265 if (tp->link_config.active_speed == SPEED_100 ||
4266 tp->link_config.active_speed == SPEED_10)
4267 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4269 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4270 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4271 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4273 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4275 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4276 if (tp->link_config.active_duplex == DUPLEX_HALF)
4277 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4279 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4280 if (current_link_up == 1 &&
4281 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4282 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4284 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4287 /* ??? Without this setting Netgear GA302T PHY does not
4288 * ??? send/receive packets...
4290 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4291 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4292 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4293 tw32_f(MAC_MI_MODE, tp->mi_mode);
4297 tw32_f(MAC_MODE, tp->mac_mode);
4300 tg3_phy_eee_adjust(tp, current_link_up);
4302 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4303 /* Polled via timer. */
4304 tw32_f(MAC_EVENT, 0);
4306 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4311 current_link_up == 1 &&
4312 tp->link_config.active_speed == SPEED_1000 &&
4313 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4316 (MAC_STATUS_SYNC_CHANGED |
4317 MAC_STATUS_CFG_CHANGED));
4320 NIC_SRAM_FIRMWARE_MBOX,
4321 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4324 /* Prevent send BD corruption. */
4325 if (tg3_flag(tp, CLKREQ_BUG)) {
4326 u16 oldlnkctl, newlnkctl;
4328 pci_read_config_word(tp->pdev,
4329 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4331 if (tp->link_config.active_speed == SPEED_100 ||
4332 tp->link_config.active_speed == SPEED_10)
4333 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4335 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4336 if (newlnkctl != oldlnkctl)
4337 pci_write_config_word(tp->pdev,
4338 pci_pcie_cap(tp->pdev) +
4339 PCI_EXP_LNKCTL, newlnkctl);
4342 if (current_link_up != netif_carrier_ok(tp->dev)) {
4343 if (current_link_up)
4344 netif_carrier_on(tp->dev);
4346 netif_carrier_off(tp->dev);
4347 tg3_link_report(tp);
4353 struct tg3_fiber_aneginfo {
4355 #define ANEG_STATE_UNKNOWN 0
4356 #define ANEG_STATE_AN_ENABLE 1
4357 #define ANEG_STATE_RESTART_INIT 2
4358 #define ANEG_STATE_RESTART 3
4359 #define ANEG_STATE_DISABLE_LINK_OK 4
4360 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4361 #define ANEG_STATE_ABILITY_DETECT 6
4362 #define ANEG_STATE_ACK_DETECT_INIT 7
4363 #define ANEG_STATE_ACK_DETECT 8
4364 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4365 #define ANEG_STATE_COMPLETE_ACK 10
4366 #define ANEG_STATE_IDLE_DETECT_INIT 11
4367 #define ANEG_STATE_IDLE_DETECT 12
4368 #define ANEG_STATE_LINK_OK 13
4369 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4370 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4373 #define MR_AN_ENABLE 0x00000001
4374 #define MR_RESTART_AN 0x00000002
4375 #define MR_AN_COMPLETE 0x00000004
4376 #define MR_PAGE_RX 0x00000008
4377 #define MR_NP_LOADED 0x00000010
4378 #define MR_TOGGLE_TX 0x00000020
4379 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4380 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4381 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4382 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4383 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4384 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4385 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4386 #define MR_TOGGLE_RX 0x00002000
4387 #define MR_NP_RX 0x00004000
4389 #define MR_LINK_OK 0x80000000
4391 unsigned long link_time, cur_time;
4393 u32 ability_match_cfg;
4394 int ability_match_count;
4396 char ability_match, idle_match, ack_match;
4398 u32 txconfig, rxconfig;
4399 #define ANEG_CFG_NP 0x00000080
4400 #define ANEG_CFG_ACK 0x00000040
4401 #define ANEG_CFG_RF2 0x00000020
4402 #define ANEG_CFG_RF1 0x00000010
4403 #define ANEG_CFG_PS2 0x00000001
4404 #define ANEG_CFG_PS1 0x00008000
4405 #define ANEG_CFG_HD 0x00004000
4406 #define ANEG_CFG_FD 0x00002000
4407 #define ANEG_CFG_INVAL 0x00001f06
4412 #define ANEG_TIMER_ENAB 2
4413 #define ANEG_FAILED -1
4415 #define ANEG_STATE_SETTLE_TIME 10000
4417 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4418 struct tg3_fiber_aneginfo *ap)
4421 unsigned long delta;
4425 if (ap->state == ANEG_STATE_UNKNOWN) {
4429 ap->ability_match_cfg = 0;
4430 ap->ability_match_count = 0;
4431 ap->ability_match = 0;
4437 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4438 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4440 if (rx_cfg_reg != ap->ability_match_cfg) {
4441 ap->ability_match_cfg = rx_cfg_reg;
4442 ap->ability_match = 0;
4443 ap->ability_match_count = 0;
4445 if (++ap->ability_match_count > 1) {
4446 ap->ability_match = 1;
4447 ap->ability_match_cfg = rx_cfg_reg;
4450 if (rx_cfg_reg & ANEG_CFG_ACK)
4458 ap->ability_match_cfg = 0;
4459 ap->ability_match_count = 0;
4460 ap->ability_match = 0;
4466 ap->rxconfig = rx_cfg_reg;
4469 switch (ap->state) {
4470 case ANEG_STATE_UNKNOWN:
4471 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4472 ap->state = ANEG_STATE_AN_ENABLE;
4475 case ANEG_STATE_AN_ENABLE:
4476 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4477 if (ap->flags & MR_AN_ENABLE) {
4480 ap->ability_match_cfg = 0;
4481 ap->ability_match_count = 0;
4482 ap->ability_match = 0;
4486 ap->state = ANEG_STATE_RESTART_INIT;
4488 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4492 case ANEG_STATE_RESTART_INIT:
4493 ap->link_time = ap->cur_time;
4494 ap->flags &= ~(MR_NP_LOADED);
4496 tw32(MAC_TX_AUTO_NEG, 0);
4497 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4498 tw32_f(MAC_MODE, tp->mac_mode);
4501 ret = ANEG_TIMER_ENAB;
4502 ap->state = ANEG_STATE_RESTART;
4505 case ANEG_STATE_RESTART:
4506 delta = ap->cur_time - ap->link_time;
4507 if (delta > ANEG_STATE_SETTLE_TIME)
4508 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4510 ret = ANEG_TIMER_ENAB;
4513 case ANEG_STATE_DISABLE_LINK_OK:
4517 case ANEG_STATE_ABILITY_DETECT_INIT:
4518 ap->flags &= ~(MR_TOGGLE_TX);
4519 ap->txconfig = ANEG_CFG_FD;
4520 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4521 if (flowctrl & ADVERTISE_1000XPAUSE)
4522 ap->txconfig |= ANEG_CFG_PS1;
4523 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4524 ap->txconfig |= ANEG_CFG_PS2;
4525 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4526 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4527 tw32_f(MAC_MODE, tp->mac_mode);
4530 ap->state = ANEG_STATE_ABILITY_DETECT;
4533 case ANEG_STATE_ABILITY_DETECT:
4534 if (ap->ability_match != 0 && ap->rxconfig != 0)
4535 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4538 case ANEG_STATE_ACK_DETECT_INIT:
4539 ap->txconfig |= ANEG_CFG_ACK;
4540 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4541 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4542 tw32_f(MAC_MODE, tp->mac_mode);
4545 ap->state = ANEG_STATE_ACK_DETECT;
4548 case ANEG_STATE_ACK_DETECT:
4549 if (ap->ack_match != 0) {
4550 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4551 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4552 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4554 ap->state = ANEG_STATE_AN_ENABLE;
4556 } else if (ap->ability_match != 0 &&
4557 ap->rxconfig == 0) {
4558 ap->state = ANEG_STATE_AN_ENABLE;
4562 case ANEG_STATE_COMPLETE_ACK_INIT:
4563 if (ap->rxconfig & ANEG_CFG_INVAL) {
4567 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4568 MR_LP_ADV_HALF_DUPLEX |
4569 MR_LP_ADV_SYM_PAUSE |
4570 MR_LP_ADV_ASYM_PAUSE |
4571 MR_LP_ADV_REMOTE_FAULT1 |
4572 MR_LP_ADV_REMOTE_FAULT2 |
4573 MR_LP_ADV_NEXT_PAGE |
4576 if (ap->rxconfig & ANEG_CFG_FD)
4577 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4578 if (ap->rxconfig & ANEG_CFG_HD)
4579 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4580 if (ap->rxconfig & ANEG_CFG_PS1)
4581 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4582 if (ap->rxconfig & ANEG_CFG_PS2)
4583 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4584 if (ap->rxconfig & ANEG_CFG_RF1)
4585 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4586 if (ap->rxconfig & ANEG_CFG_RF2)
4587 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4588 if (ap->rxconfig & ANEG_CFG_NP)
4589 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4591 ap->link_time = ap->cur_time;
4593 ap->flags ^= (MR_TOGGLE_TX);
4594 if (ap->rxconfig & 0x0008)
4595 ap->flags |= MR_TOGGLE_RX;
4596 if (ap->rxconfig & ANEG_CFG_NP)
4597 ap->flags |= MR_NP_RX;
4598 ap->flags |= MR_PAGE_RX;
4600 ap->state = ANEG_STATE_COMPLETE_ACK;
4601 ret = ANEG_TIMER_ENAB;
4604 case ANEG_STATE_COMPLETE_ACK:
4605 if (ap->ability_match != 0 &&
4606 ap->rxconfig == 0) {
4607 ap->state = ANEG_STATE_AN_ENABLE;
4610 delta = ap->cur_time - ap->link_time;
4611 if (delta > ANEG_STATE_SETTLE_TIME) {
4612 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4613 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4615 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4616 !(ap->flags & MR_NP_RX)) {
4617 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4625 case ANEG_STATE_IDLE_DETECT_INIT:
4626 ap->link_time = ap->cur_time;
4627 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4628 tw32_f(MAC_MODE, tp->mac_mode);
4631 ap->state = ANEG_STATE_IDLE_DETECT;
4632 ret = ANEG_TIMER_ENAB;
4635 case ANEG_STATE_IDLE_DETECT:
4636 if (ap->ability_match != 0 &&
4637 ap->rxconfig == 0) {
4638 ap->state = ANEG_STATE_AN_ENABLE;
4641 delta = ap->cur_time - ap->link_time;
4642 if (delta > ANEG_STATE_SETTLE_TIME) {
4643 /* XXX another gem from the Broadcom driver :( */
4644 ap->state = ANEG_STATE_LINK_OK;
4648 case ANEG_STATE_LINK_OK:
4649 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4653 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4654 /* ??? unimplemented */
4657 case ANEG_STATE_NEXT_PAGE_WAIT:
4658 /* ??? unimplemented */
4669 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4672 struct tg3_fiber_aneginfo aninfo;
4673 int status = ANEG_FAILED;
4677 tw32_f(MAC_TX_AUTO_NEG, 0);
4679 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4680 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4683 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4686 memset(&aninfo, 0, sizeof(aninfo));
4687 aninfo.flags |= MR_AN_ENABLE;
4688 aninfo.state = ANEG_STATE_UNKNOWN;
4689 aninfo.cur_time = 0;
4691 while (++tick < 195000) {
4692 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4693 if (status == ANEG_DONE || status == ANEG_FAILED)
4699 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4700 tw32_f(MAC_MODE, tp->mac_mode);
4703 *txflags = aninfo.txconfig;
4704 *rxflags = aninfo.flags;
4706 if (status == ANEG_DONE &&
4707 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4708 MR_LP_ADV_FULL_DUPLEX)))
4714 static void tg3_init_bcm8002(struct tg3 *tp)
4716 u32 mac_status = tr32(MAC_STATUS);
4719 /* Reset when initting first time or we have a link. */
4720 if (tg3_flag(tp, INIT_COMPLETE) &&
4721 !(mac_status & MAC_STATUS_PCS_SYNCED))
4724 /* Set PLL lock range. */
4725 tg3_writephy(tp, 0x16, 0x8007);
4728 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4730 /* Wait for reset to complete. */
4731 /* XXX schedule_timeout() ... */
4732 for (i = 0; i < 500; i++)
4735 /* Config mode; select PMA/Ch 1 regs. */
4736 tg3_writephy(tp, 0x10, 0x8411);
4738 /* Enable auto-lock and comdet, select txclk for tx. */
4739 tg3_writephy(tp, 0x11, 0x0a10);
4741 tg3_writephy(tp, 0x18, 0x00a0);
4742 tg3_writephy(tp, 0x16, 0x41ff);
4744 /* Assert and deassert POR. */
4745 tg3_writephy(tp, 0x13, 0x0400);
4747 tg3_writephy(tp, 0x13, 0x0000);
4749 tg3_writephy(tp, 0x11, 0x0a50);
4751 tg3_writephy(tp, 0x11, 0x0a10);
4753 /* Wait for signal to stabilize */
4754 /* XXX schedule_timeout() ... */
4755 for (i = 0; i < 15000; i++)
4758 /* Deselect the channel register so we can read the PHYID
4761 tg3_writephy(tp, 0x10, 0x8011);
4764 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4767 u32 sg_dig_ctrl, sg_dig_status;
4768 u32 serdes_cfg, expected_sg_dig_ctrl;
4769 int workaround, port_a;
4770 int current_link_up;
4773 expected_sg_dig_ctrl = 0;
4776 current_link_up = 0;
4778 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4779 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4781 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4784 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4785 /* preserve bits 20-23 for voltage regulator */
4786 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4789 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4791 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4792 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4794 u32 val = serdes_cfg;
4800 tw32_f(MAC_SERDES_CFG, val);
4803 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4805 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4806 tg3_setup_flow_control(tp, 0, 0);
4807 current_link_up = 1;
4812 /* Want auto-negotiation. */
4813 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4815 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4816 if (flowctrl & ADVERTISE_1000XPAUSE)
4817 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4818 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4819 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4821 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4822 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4823 tp->serdes_counter &&
4824 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4825 MAC_STATUS_RCVD_CFG)) ==
4826 MAC_STATUS_PCS_SYNCED)) {
4827 tp->serdes_counter--;
4828 current_link_up = 1;
4833 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4834 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4836 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4838 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4839 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4840 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4841 MAC_STATUS_SIGNAL_DET)) {
4842 sg_dig_status = tr32(SG_DIG_STATUS);
4843 mac_status = tr32(MAC_STATUS);
4845 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4846 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4847 u32 local_adv = 0, remote_adv = 0;
4849 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4850 local_adv |= ADVERTISE_1000XPAUSE;
4851 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4852 local_adv |= ADVERTISE_1000XPSE_ASYM;
4854 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4855 remote_adv |= LPA_1000XPAUSE;
4856 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4857 remote_adv |= LPA_1000XPAUSE_ASYM;
4859 tp->link_config.rmt_adv =
4860 mii_adv_to_ethtool_adv_x(remote_adv);
4862 tg3_setup_flow_control(tp, local_adv, remote_adv);
4863 current_link_up = 1;
4864 tp->serdes_counter = 0;
4865 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4866 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4867 if (tp->serdes_counter)
4868 tp->serdes_counter--;
4871 u32 val = serdes_cfg;
4878 tw32_f(MAC_SERDES_CFG, val);
4881 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4884 /* Link parallel detection - link is up */
4885 /* only if we have PCS_SYNC and not */
4886 /* receiving config code words */
4887 mac_status = tr32(MAC_STATUS);
4888 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4889 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4890 tg3_setup_flow_control(tp, 0, 0);
4891 current_link_up = 1;
4893 TG3_PHYFLG_PARALLEL_DETECT;
4894 tp->serdes_counter =
4895 SERDES_PARALLEL_DET_TIMEOUT;
4897 goto restart_autoneg;
4901 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4902 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4906 return current_link_up;
4909 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4911 int current_link_up = 0;
4913 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4916 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4917 u32 txflags, rxflags;
4920 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4921 u32 local_adv = 0, remote_adv = 0;
4923 if (txflags & ANEG_CFG_PS1)
4924 local_adv |= ADVERTISE_1000XPAUSE;
4925 if (txflags & ANEG_CFG_PS2)
4926 local_adv |= ADVERTISE_1000XPSE_ASYM;
4928 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4929 remote_adv |= LPA_1000XPAUSE;
4930 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4931 remote_adv |= LPA_1000XPAUSE_ASYM;
4933 tp->link_config.rmt_adv =
4934 mii_adv_to_ethtool_adv_x(remote_adv);
4936 tg3_setup_flow_control(tp, local_adv, remote_adv);
4938 current_link_up = 1;
4940 for (i = 0; i < 30; i++) {
4943 (MAC_STATUS_SYNC_CHANGED |
4944 MAC_STATUS_CFG_CHANGED));
4946 if ((tr32(MAC_STATUS) &
4947 (MAC_STATUS_SYNC_CHANGED |
4948 MAC_STATUS_CFG_CHANGED)) == 0)
4952 mac_status = tr32(MAC_STATUS);
4953 if (current_link_up == 0 &&
4954 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4955 !(mac_status & MAC_STATUS_RCVD_CFG))
4956 current_link_up = 1;
4958 tg3_setup_flow_control(tp, 0, 0);
4960 /* Forcing 1000FD link up. */
4961 current_link_up = 1;
4963 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4966 tw32_f(MAC_MODE, tp->mac_mode);
4971 return current_link_up;
4974 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4977 u16 orig_active_speed;
4978 u8 orig_active_duplex;
4980 int current_link_up;
4983 orig_pause_cfg = tp->link_config.active_flowctrl;
4984 orig_active_speed = tp->link_config.active_speed;
4985 orig_active_duplex = tp->link_config.active_duplex;
4987 if (!tg3_flag(tp, HW_AUTONEG) &&
4988 netif_carrier_ok(tp->dev) &&
4989 tg3_flag(tp, INIT_COMPLETE)) {
4990 mac_status = tr32(MAC_STATUS);
4991 mac_status &= (MAC_STATUS_PCS_SYNCED |
4992 MAC_STATUS_SIGNAL_DET |
4993 MAC_STATUS_CFG_CHANGED |
4994 MAC_STATUS_RCVD_CFG);
4995 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4996 MAC_STATUS_SIGNAL_DET)) {
4997 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4998 MAC_STATUS_CFG_CHANGED));
5003 tw32_f(MAC_TX_AUTO_NEG, 0);
5005 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5006 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5007 tw32_f(MAC_MODE, tp->mac_mode);
5010 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5011 tg3_init_bcm8002(tp);
5013 /* Enable link change event even when serdes polling. */
5014 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5017 current_link_up = 0;
5018 tp->link_config.rmt_adv = 0;
5019 mac_status = tr32(MAC_STATUS);
5021 if (tg3_flag(tp, HW_AUTONEG))
5022 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5024 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5026 tp->napi[0].hw_status->status =
5027 (SD_STATUS_UPDATED |
5028 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5030 for (i = 0; i < 100; i++) {
5031 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5032 MAC_STATUS_CFG_CHANGED));
5034 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5035 MAC_STATUS_CFG_CHANGED |
5036 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5040 mac_status = tr32(MAC_STATUS);
5041 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5042 current_link_up = 0;
5043 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5044 tp->serdes_counter == 0) {
5045 tw32_f(MAC_MODE, (tp->mac_mode |
5046 MAC_MODE_SEND_CONFIGS));
5048 tw32_f(MAC_MODE, tp->mac_mode);
5052 if (current_link_up == 1) {
5053 tp->link_config.active_speed = SPEED_1000;
5054 tp->link_config.active_duplex = DUPLEX_FULL;
5055 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5056 LED_CTRL_LNKLED_OVERRIDE |
5057 LED_CTRL_1000MBPS_ON));
5059 tp->link_config.active_speed = SPEED_UNKNOWN;
5060 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5061 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5062 LED_CTRL_LNKLED_OVERRIDE |
5063 LED_CTRL_TRAFFIC_OVERRIDE));
5066 if (current_link_up != netif_carrier_ok(tp->dev)) {
5067 if (current_link_up)
5068 netif_carrier_on(tp->dev);
5070 netif_carrier_off(tp->dev);
5071 tg3_link_report(tp);
5073 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5074 if (orig_pause_cfg != now_pause_cfg ||
5075 orig_active_speed != tp->link_config.active_speed ||
5076 orig_active_duplex != tp->link_config.active_duplex)
5077 tg3_link_report(tp);
5083 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5085 int current_link_up, err = 0;
5089 u32 local_adv, remote_adv;
5091 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5092 tw32_f(MAC_MODE, tp->mac_mode);
5098 (MAC_STATUS_SYNC_CHANGED |
5099 MAC_STATUS_CFG_CHANGED |
5100 MAC_STATUS_MI_COMPLETION |
5101 MAC_STATUS_LNKSTATE_CHANGED));
5107 current_link_up = 0;
5108 current_speed = SPEED_UNKNOWN;
5109 current_duplex = DUPLEX_UNKNOWN;
5110 tp->link_config.rmt_adv = 0;
5112 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5113 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5114 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5115 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5116 bmsr |= BMSR_LSTATUS;
5118 bmsr &= ~BMSR_LSTATUS;
5121 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5123 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5124 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5125 /* do nothing, just check for link up at the end */
5126 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5129 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5130 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5131 ADVERTISE_1000XPAUSE |
5132 ADVERTISE_1000XPSE_ASYM |
5135 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5136 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5138 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5139 tg3_writephy(tp, MII_ADVERTISE, newadv);
5140 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5141 tg3_writephy(tp, MII_BMCR, bmcr);
5143 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5144 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5145 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5152 bmcr &= ~BMCR_SPEED1000;
5153 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5155 if (tp->link_config.duplex == DUPLEX_FULL)
5156 new_bmcr |= BMCR_FULLDPLX;
5158 if (new_bmcr != bmcr) {
5159 /* BMCR_SPEED1000 is a reserved bit that needs
5160 * to be set on write.
5162 new_bmcr |= BMCR_SPEED1000;
5164 /* Force a linkdown */
5165 if (netif_carrier_ok(tp->dev)) {
5168 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5169 adv &= ~(ADVERTISE_1000XFULL |
5170 ADVERTISE_1000XHALF |
5172 tg3_writephy(tp, MII_ADVERTISE, adv);
5173 tg3_writephy(tp, MII_BMCR, bmcr |
5177 netif_carrier_off(tp->dev);
5179 tg3_writephy(tp, MII_BMCR, new_bmcr);
5181 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5182 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5183 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5185 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5186 bmsr |= BMSR_LSTATUS;
5188 bmsr &= ~BMSR_LSTATUS;
5190 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5194 if (bmsr & BMSR_LSTATUS) {
5195 current_speed = SPEED_1000;
5196 current_link_up = 1;
5197 if (bmcr & BMCR_FULLDPLX)
5198 current_duplex = DUPLEX_FULL;
5200 current_duplex = DUPLEX_HALF;
5205 if (bmcr & BMCR_ANENABLE) {
5208 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5209 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5210 common = local_adv & remote_adv;
5211 if (common & (ADVERTISE_1000XHALF |
5212 ADVERTISE_1000XFULL)) {
5213 if (common & ADVERTISE_1000XFULL)
5214 current_duplex = DUPLEX_FULL;
5216 current_duplex = DUPLEX_HALF;
5218 tp->link_config.rmt_adv =
5219 mii_adv_to_ethtool_adv_x(remote_adv);
5220 } else if (!tg3_flag(tp, 5780_CLASS)) {
5221 /* Link is up via parallel detect */
5223 current_link_up = 0;
5228 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5229 tg3_setup_flow_control(tp, local_adv, remote_adv);
5231 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5232 if (tp->link_config.active_duplex == DUPLEX_HALF)
5233 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5235 tw32_f(MAC_MODE, tp->mac_mode);
5238 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5240 tp->link_config.active_speed = current_speed;
5241 tp->link_config.active_duplex = current_duplex;
5243 if (current_link_up != netif_carrier_ok(tp->dev)) {
5244 if (current_link_up)
5245 netif_carrier_on(tp->dev);
5247 netif_carrier_off(tp->dev);
5248 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5250 tg3_link_report(tp);
5255 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5257 if (tp->serdes_counter) {
5258 /* Give autoneg time to complete. */
5259 tp->serdes_counter--;
5263 if (!netif_carrier_ok(tp->dev) &&
5264 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5267 tg3_readphy(tp, MII_BMCR, &bmcr);
5268 if (bmcr & BMCR_ANENABLE) {
5271 /* Select shadow register 0x1f */
5272 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5273 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5275 /* Select expansion interrupt status register */
5276 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5277 MII_TG3_DSP_EXP1_INT_STAT);
5278 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5279 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5281 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5282 /* We have signal detect and not receiving
5283 * config code words, link is up by parallel
5287 bmcr &= ~BMCR_ANENABLE;
5288 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5289 tg3_writephy(tp, MII_BMCR, bmcr);
5290 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5293 } else if (netif_carrier_ok(tp->dev) &&
5294 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5295 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5298 /* Select expansion interrupt status register */
5299 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5300 MII_TG3_DSP_EXP1_INT_STAT);
5301 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5305 /* Config code words received, turn on autoneg. */
5306 tg3_readphy(tp, MII_BMCR, &bmcr);
5307 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5309 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5315 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5320 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5321 err = tg3_setup_fiber_phy(tp, force_reset);
5322 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5323 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5325 err = tg3_setup_copper_phy(tp, force_reset);
5327 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5330 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5331 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5333 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5338 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5339 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5340 tw32(GRC_MISC_CFG, val);
5343 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5344 (6 << TX_LENGTHS_IPG_SHIFT);
5345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5346 val |= tr32(MAC_TX_LENGTHS) &
5347 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5348 TX_LENGTHS_CNT_DWN_VAL_MSK);
5350 if (tp->link_config.active_speed == SPEED_1000 &&
5351 tp->link_config.active_duplex == DUPLEX_HALF)
5352 tw32(MAC_TX_LENGTHS, val |
5353 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5355 tw32(MAC_TX_LENGTHS, val |
5356 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5358 if (!tg3_flag(tp, 5705_PLUS)) {
5359 if (netif_carrier_ok(tp->dev)) {
5360 tw32(HOSTCC_STAT_COAL_TICKS,
5361 tp->coal.stats_block_coalesce_usecs);
5363 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5367 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5368 val = tr32(PCIE_PWR_MGMT_THRESH);
5369 if (!netif_carrier_ok(tp->dev))
5370 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5373 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5374 tw32(PCIE_PWR_MGMT_THRESH, val);
5380 static inline int tg3_irq_sync(struct tg3 *tp)
5382 return tp->irq_sync;
5385 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5389 dst = (u32 *)((u8 *)dst + off);
5390 for (i = 0; i < len; i += sizeof(u32))
5391 *dst++ = tr32(off + i);
5394 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5396 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5397 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5398 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5399 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5400 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5401 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5402 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5403 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5404 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5405 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5406 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5407 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5408 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5409 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5410 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5411 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5412 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5413 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5414 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5416 if (tg3_flag(tp, SUPPORT_MSIX))
5417 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5419 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5420 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5421 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5422 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5423 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5424 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5425 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5426 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5428 if (!tg3_flag(tp, 5705_PLUS)) {
5429 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5430 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5431 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5434 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5435 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5436 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5437 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5438 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5440 if (tg3_flag(tp, NVRAM))
5441 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5444 static void tg3_dump_state(struct tg3 *tp)
5449 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5451 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5455 if (tg3_flag(tp, PCI_EXPRESS)) {
5456 /* Read up to but not including private PCI registers */
5457 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5458 regs[i / sizeof(u32)] = tr32(i);
5460 tg3_dump_legacy_regs(tp, regs);
5462 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5463 if (!regs[i + 0] && !regs[i + 1] &&
5464 !regs[i + 2] && !regs[i + 3])
5467 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5469 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5474 for (i = 0; i < tp->irq_cnt; i++) {
5475 struct tg3_napi *tnapi = &tp->napi[i];
5477 /* SW status block */
5479 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5481 tnapi->hw_status->status,
5482 tnapi->hw_status->status_tag,
5483 tnapi->hw_status->rx_jumbo_consumer,
5484 tnapi->hw_status->rx_consumer,
5485 tnapi->hw_status->rx_mini_consumer,
5486 tnapi->hw_status->idx[0].rx_producer,
5487 tnapi->hw_status->idx[0].tx_consumer);
5490 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5492 tnapi->last_tag, tnapi->last_irq_tag,
5493 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5495 tnapi->prodring.rx_std_prod_idx,
5496 tnapi->prodring.rx_std_cons_idx,
5497 tnapi->prodring.rx_jmb_prod_idx,
5498 tnapi->prodring.rx_jmb_cons_idx);
5502 /* This is called whenever we suspect that the system chipset is re-
5503 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5504 * is bogus tx completions. We try to recover by setting the
5505 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5508 static void tg3_tx_recover(struct tg3 *tp)
5510 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5511 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5513 netdev_warn(tp->dev,
5514 "The system may be re-ordering memory-mapped I/O "
5515 "cycles to the network device, attempting to recover. "
5516 "Please report the problem to the driver maintainer "
5517 "and include system chipset information.\n");
5519 spin_lock(&tp->lock);
5520 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5521 spin_unlock(&tp->lock);
5524 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5526 /* Tell compiler to fetch tx indices from memory. */
5528 return tnapi->tx_pending -
5529 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5532 /* Tigon3 never reports partial packet sends. So we do not
5533 * need special logic to handle SKBs that have not had all
5534 * of their frags sent yet, like SunGEM does.
5536 static void tg3_tx(struct tg3_napi *tnapi)
5538 struct tg3 *tp = tnapi->tp;
5539 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5540 u32 sw_idx = tnapi->tx_cons;
5541 struct netdev_queue *txq;
5542 int index = tnapi - tp->napi;
5543 unsigned int pkts_compl = 0, bytes_compl = 0;
5545 if (tg3_flag(tp, ENABLE_TSS))
5548 txq = netdev_get_tx_queue(tp->dev, index);
5550 while (sw_idx != hw_idx) {
5551 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5552 struct sk_buff *skb = ri->skb;
5555 if (unlikely(skb == NULL)) {
5560 pci_unmap_single(tp->pdev,
5561 dma_unmap_addr(ri, mapping),
5567 while (ri->fragmented) {
5568 ri->fragmented = false;
5569 sw_idx = NEXT_TX(sw_idx);
5570 ri = &tnapi->tx_buffers[sw_idx];
5573 sw_idx = NEXT_TX(sw_idx);
5575 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5576 ri = &tnapi->tx_buffers[sw_idx];
5577 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5580 pci_unmap_page(tp->pdev,
5581 dma_unmap_addr(ri, mapping),
5582 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5585 while (ri->fragmented) {
5586 ri->fragmented = false;
5587 sw_idx = NEXT_TX(sw_idx);
5588 ri = &tnapi->tx_buffers[sw_idx];
5591 sw_idx = NEXT_TX(sw_idx);
5595 bytes_compl += skb->len;
5599 if (unlikely(tx_bug)) {
5605 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5607 tnapi->tx_cons = sw_idx;
5609 /* Need to make the tx_cons update visible to tg3_start_xmit()
5610 * before checking for netif_queue_stopped(). Without the
5611 * memory barrier, there is a small possibility that tg3_start_xmit()
5612 * will miss it and cause the queue to be stopped forever.
5616 if (unlikely(netif_tx_queue_stopped(txq) &&
5617 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5618 __netif_tx_lock(txq, smp_processor_id());
5619 if (netif_tx_queue_stopped(txq) &&
5620 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5621 netif_tx_wake_queue(txq);
5622 __netif_tx_unlock(txq);
5626 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5631 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5632 map_sz, PCI_DMA_FROMDEVICE);
5637 /* Returns size of skb allocated or < 0 on error.
5639 * We only need to fill in the address because the other members
5640 * of the RX descriptor are invariant, see tg3_init_rings.
5642 * Note the purposeful assymetry of cpu vs. chip accesses. For
5643 * posting buffers we only dirty the first cache line of the RX
5644 * descriptor (containing the address). Whereas for the RX status
5645 * buffers the cpu only reads the last cacheline of the RX descriptor
5646 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5648 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5649 u32 opaque_key, u32 dest_idx_unmasked)
5651 struct tg3_rx_buffer_desc *desc;
5652 struct ring_info *map;
5655 int skb_size, data_size, dest_idx;
5657 switch (opaque_key) {
5658 case RXD_OPAQUE_RING_STD:
5659 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5660 desc = &tpr->rx_std[dest_idx];
5661 map = &tpr->rx_std_buffers[dest_idx];
5662 data_size = tp->rx_pkt_map_sz;
5665 case RXD_OPAQUE_RING_JUMBO:
5666 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5667 desc = &tpr->rx_jmb[dest_idx].std;
5668 map = &tpr->rx_jmb_buffers[dest_idx];
5669 data_size = TG3_RX_JMB_MAP_SZ;
5676 /* Do not overwrite any of the map or rp information
5677 * until we are sure we can commit to a new buffer.
5679 * Callers depend upon this behavior and assume that
5680 * we leave everything unchanged if we fail.
5682 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5683 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5684 data = kmalloc(skb_size, GFP_ATOMIC);
5688 mapping = pci_map_single(tp->pdev,
5689 data + TG3_RX_OFFSET(tp),
5691 PCI_DMA_FROMDEVICE);
5692 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5698 dma_unmap_addr_set(map, mapping, mapping);
5700 desc->addr_hi = ((u64)mapping >> 32);
5701 desc->addr_lo = ((u64)mapping & 0xffffffff);
5706 /* We only need to move over in the address because the other
5707 * members of the RX descriptor are invariant. See notes above
5708 * tg3_alloc_rx_data for full details.
5710 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5711 struct tg3_rx_prodring_set *dpr,
5712 u32 opaque_key, int src_idx,
5713 u32 dest_idx_unmasked)
5715 struct tg3 *tp = tnapi->tp;
5716 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5717 struct ring_info *src_map, *dest_map;
5718 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5721 switch (opaque_key) {
5722 case RXD_OPAQUE_RING_STD:
5723 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5724 dest_desc = &dpr->rx_std[dest_idx];
5725 dest_map = &dpr->rx_std_buffers[dest_idx];
5726 src_desc = &spr->rx_std[src_idx];
5727 src_map = &spr->rx_std_buffers[src_idx];
5730 case RXD_OPAQUE_RING_JUMBO:
5731 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5732 dest_desc = &dpr->rx_jmb[dest_idx].std;
5733 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5734 src_desc = &spr->rx_jmb[src_idx].std;
5735 src_map = &spr->rx_jmb_buffers[src_idx];
5742 dest_map->data = src_map->data;
5743 dma_unmap_addr_set(dest_map, mapping,
5744 dma_unmap_addr(src_map, mapping));
5745 dest_desc->addr_hi = src_desc->addr_hi;
5746 dest_desc->addr_lo = src_desc->addr_lo;
5748 /* Ensure that the update to the skb happens after the physical
5749 * addresses have been transferred to the new BD location.
5753 src_map->data = NULL;
5756 /* The RX ring scheme is composed of multiple rings which post fresh
5757 * buffers to the chip, and one special ring the chip uses to report
5758 * status back to the host.
5760 * The special ring reports the status of received packets to the
5761 * host. The chip does not write into the original descriptor the
5762 * RX buffer was obtained from. The chip simply takes the original
5763 * descriptor as provided by the host, updates the status and length
5764 * field, then writes this into the next status ring entry.
5766 * Each ring the host uses to post buffers to the chip is described
5767 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5768 * it is first placed into the on-chip ram. When the packet's length
5769 * is known, it walks down the TG3_BDINFO entries to select the ring.
5770 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5771 * which is within the range of the new packet's length is chosen.
5773 * The "separate ring for rx status" scheme may sound queer, but it makes
5774 * sense from a cache coherency perspective. If only the host writes
5775 * to the buffer post rings, and only the chip writes to the rx status
5776 * rings, then cache lines never move beyond shared-modified state.
5777 * If both the host and chip were to write into the same ring, cache line
5778 * eviction could occur since both entities want it in an exclusive state.
5780 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5782 struct tg3 *tp = tnapi->tp;
5783 u32 work_mask, rx_std_posted = 0;
5784 u32 std_prod_idx, jmb_prod_idx;
5785 u32 sw_idx = tnapi->rx_rcb_ptr;
5788 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5790 hw_idx = *(tnapi->rx_rcb_prod_idx);
5792 * We need to order the read of hw_idx and the read of
5793 * the opaque cookie.
5798 std_prod_idx = tpr->rx_std_prod_idx;
5799 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5800 while (sw_idx != hw_idx && budget > 0) {
5801 struct ring_info *ri;
5802 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5804 struct sk_buff *skb;
5805 dma_addr_t dma_addr;
5806 u32 opaque_key, desc_idx, *post_ptr;
5809 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5810 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5811 if (opaque_key == RXD_OPAQUE_RING_STD) {
5812 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5813 dma_addr = dma_unmap_addr(ri, mapping);
5815 post_ptr = &std_prod_idx;
5817 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5818 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5819 dma_addr = dma_unmap_addr(ri, mapping);
5821 post_ptr = &jmb_prod_idx;
5823 goto next_pkt_nopost;
5825 work_mask |= opaque_key;
5827 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5828 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5830 tg3_recycle_rx(tnapi, tpr, opaque_key,
5831 desc_idx, *post_ptr);
5833 /* Other statistics kept track of by card. */
5838 prefetch(data + TG3_RX_OFFSET(tp));
5839 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5842 if (len > TG3_RX_COPY_THRESH(tp)) {
5845 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5850 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5851 PCI_DMA_FROMDEVICE);
5853 skb = build_skb(data);
5856 goto drop_it_no_recycle;
5858 skb_reserve(skb, TG3_RX_OFFSET(tp));
5859 /* Ensure that the update to the data happens
5860 * after the usage of the old DMA mapping.
5867 tg3_recycle_rx(tnapi, tpr, opaque_key,
5868 desc_idx, *post_ptr);
5870 skb = netdev_alloc_skb(tp->dev,
5871 len + TG3_RAW_IP_ALIGN);
5873 goto drop_it_no_recycle;
5875 skb_reserve(skb, TG3_RAW_IP_ALIGN);
5876 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5878 data + TG3_RX_OFFSET(tp),
5880 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5884 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5885 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5886 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5887 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5888 skb->ip_summed = CHECKSUM_UNNECESSARY;
5890 skb_checksum_none_assert(skb);
5892 skb->protocol = eth_type_trans(skb, tp->dev);
5894 if (len > (tp->dev->mtu + ETH_HLEN) &&
5895 skb->protocol != htons(ETH_P_8021Q)) {
5897 goto drop_it_no_recycle;
5900 if (desc->type_flags & RXD_FLAG_VLAN &&
5901 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5902 __vlan_hwaccel_put_tag(skb,
5903 desc->err_vlan & RXD_VLAN_MASK);
5905 napi_gro_receive(&tnapi->napi, skb);
5913 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5914 tpr->rx_std_prod_idx = std_prod_idx &
5915 tp->rx_std_ring_mask;
5916 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5917 tpr->rx_std_prod_idx);
5918 work_mask &= ~RXD_OPAQUE_RING_STD;
5923 sw_idx &= tp->rx_ret_ring_mask;
5925 /* Refresh hw_idx to see if there is new work */
5926 if (sw_idx == hw_idx) {
5927 hw_idx = *(tnapi->rx_rcb_prod_idx);
5932 /* ACK the status ring. */
5933 tnapi->rx_rcb_ptr = sw_idx;
5934 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5936 /* Refill RX ring(s). */
5937 if (!tg3_flag(tp, ENABLE_RSS)) {
5938 /* Sync BD data before updating mailbox */
5941 if (work_mask & RXD_OPAQUE_RING_STD) {
5942 tpr->rx_std_prod_idx = std_prod_idx &
5943 tp->rx_std_ring_mask;
5944 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5945 tpr->rx_std_prod_idx);
5947 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5948 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5949 tp->rx_jmb_ring_mask;
5950 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5951 tpr->rx_jmb_prod_idx);
5954 } else if (work_mask) {
5955 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5956 * updated before the producer indices can be updated.
5960 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5961 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5963 if (tnapi != &tp->napi[1]) {
5964 tp->rx_refill = true;
5965 napi_schedule(&tp->napi[1].napi);
5972 static void tg3_poll_link(struct tg3 *tp)
5974 /* handle link change and other phy events */
5975 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5976 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5978 if (sblk->status & SD_STATUS_LINK_CHG) {
5979 sblk->status = SD_STATUS_UPDATED |
5980 (sblk->status & ~SD_STATUS_LINK_CHG);
5981 spin_lock(&tp->lock);
5982 if (tg3_flag(tp, USE_PHYLIB)) {
5984 (MAC_STATUS_SYNC_CHANGED |
5985 MAC_STATUS_CFG_CHANGED |
5986 MAC_STATUS_MI_COMPLETION |
5987 MAC_STATUS_LNKSTATE_CHANGED));
5990 tg3_setup_phy(tp, 0);
5991 spin_unlock(&tp->lock);
5996 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5997 struct tg3_rx_prodring_set *dpr,
5998 struct tg3_rx_prodring_set *spr)
6000 u32 si, di, cpycnt, src_prod_idx;
6004 src_prod_idx = spr->rx_std_prod_idx;
6006 /* Make sure updates to the rx_std_buffers[] entries and the
6007 * standard producer index are seen in the correct order.
6011 if (spr->rx_std_cons_idx == src_prod_idx)
6014 if (spr->rx_std_cons_idx < src_prod_idx)
6015 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6017 cpycnt = tp->rx_std_ring_mask + 1 -
6018 spr->rx_std_cons_idx;
6020 cpycnt = min(cpycnt,
6021 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6023 si = spr->rx_std_cons_idx;
6024 di = dpr->rx_std_prod_idx;
6026 for (i = di; i < di + cpycnt; i++) {
6027 if (dpr->rx_std_buffers[i].data) {
6037 /* Ensure that updates to the rx_std_buffers ring and the
6038 * shadowed hardware producer ring from tg3_recycle_skb() are
6039 * ordered correctly WRT the skb check above.
6043 memcpy(&dpr->rx_std_buffers[di],
6044 &spr->rx_std_buffers[si],
6045 cpycnt * sizeof(struct ring_info));
6047 for (i = 0; i < cpycnt; i++, di++, si++) {
6048 struct tg3_rx_buffer_desc *sbd, *dbd;
6049 sbd = &spr->rx_std[si];
6050 dbd = &dpr->rx_std[di];
6051 dbd->addr_hi = sbd->addr_hi;
6052 dbd->addr_lo = sbd->addr_lo;
6055 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6056 tp->rx_std_ring_mask;
6057 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6058 tp->rx_std_ring_mask;
6062 src_prod_idx = spr->rx_jmb_prod_idx;
6064 /* Make sure updates to the rx_jmb_buffers[] entries and
6065 * the jumbo producer index are seen in the correct order.
6069 if (spr->rx_jmb_cons_idx == src_prod_idx)
6072 if (spr->rx_jmb_cons_idx < src_prod_idx)
6073 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6075 cpycnt = tp->rx_jmb_ring_mask + 1 -
6076 spr->rx_jmb_cons_idx;
6078 cpycnt = min(cpycnt,
6079 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6081 si = spr->rx_jmb_cons_idx;
6082 di = dpr->rx_jmb_prod_idx;
6084 for (i = di; i < di + cpycnt; i++) {
6085 if (dpr->rx_jmb_buffers[i].data) {
6095 /* Ensure that updates to the rx_jmb_buffers ring and the
6096 * shadowed hardware producer ring from tg3_recycle_skb() are
6097 * ordered correctly WRT the skb check above.
6101 memcpy(&dpr->rx_jmb_buffers[di],
6102 &spr->rx_jmb_buffers[si],
6103 cpycnt * sizeof(struct ring_info));
6105 for (i = 0; i < cpycnt; i++, di++, si++) {
6106 struct tg3_rx_buffer_desc *sbd, *dbd;
6107 sbd = &spr->rx_jmb[si].std;
6108 dbd = &dpr->rx_jmb[di].std;
6109 dbd->addr_hi = sbd->addr_hi;
6110 dbd->addr_lo = sbd->addr_lo;
6113 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6114 tp->rx_jmb_ring_mask;
6115 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6116 tp->rx_jmb_ring_mask;
6122 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6124 struct tg3 *tp = tnapi->tp;
6126 /* run TX completion thread */
6127 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6129 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6133 if (!tnapi->rx_rcb_prod_idx)
6136 /* run RX thread, within the bounds set by NAPI.
6137 * All RX "locking" is done by ensuring outside
6138 * code synchronizes with tg3->napi.poll()
6140 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6141 work_done += tg3_rx(tnapi, budget - work_done);
6143 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6144 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6146 u32 std_prod_idx = dpr->rx_std_prod_idx;
6147 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6149 tp->rx_refill = false;
6150 for (i = 1; i < tp->irq_cnt; i++)
6151 err |= tg3_rx_prodring_xfer(tp, dpr,
6152 &tp->napi[i].prodring);
6156 if (std_prod_idx != dpr->rx_std_prod_idx)
6157 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6158 dpr->rx_std_prod_idx);
6160 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6161 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6162 dpr->rx_jmb_prod_idx);
6167 tw32_f(HOSTCC_MODE, tp->coal_now);
6173 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6175 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6176 schedule_work(&tp->reset_task);
6179 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6181 cancel_work_sync(&tp->reset_task);
6182 tg3_flag_clear(tp, RESET_TASK_PENDING);
6183 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6186 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6188 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6189 struct tg3 *tp = tnapi->tp;
6191 struct tg3_hw_status *sblk = tnapi->hw_status;
6194 work_done = tg3_poll_work(tnapi, work_done, budget);
6196 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6199 if (unlikely(work_done >= budget))
6202 /* tp->last_tag is used in tg3_int_reenable() below
6203 * to tell the hw how much work has been processed,
6204 * so we must read it before checking for more work.
6206 tnapi->last_tag = sblk->status_tag;
6207 tnapi->last_irq_tag = tnapi->last_tag;
6210 /* check for RX/TX work to do */
6211 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6212 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6214 /* This test here is not race free, but will reduce
6215 * the number of interrupts by looping again.
6217 if (tnapi == &tp->napi[1] && tp->rx_refill)
6220 napi_complete(napi);
6221 /* Reenable interrupts. */
6222 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6224 /* This test here is synchronized by napi_schedule()
6225 * and napi_complete() to close the race condition.
6227 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6228 tw32(HOSTCC_MODE, tp->coalesce_mode |
6229 HOSTCC_MODE_ENABLE |
6240 /* work_done is guaranteed to be less than budget. */
6241 napi_complete(napi);
6242 tg3_reset_task_schedule(tp);
6246 static void tg3_process_error(struct tg3 *tp)
6249 bool real_error = false;
6251 if (tg3_flag(tp, ERROR_PROCESSED))
6254 /* Check Flow Attention register */
6255 val = tr32(HOSTCC_FLOW_ATTN);
6256 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6257 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6261 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6262 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6266 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6267 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6276 tg3_flag_set(tp, ERROR_PROCESSED);
6277 tg3_reset_task_schedule(tp);
6280 static int tg3_poll(struct napi_struct *napi, int budget)
6282 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6283 struct tg3 *tp = tnapi->tp;
6285 struct tg3_hw_status *sblk = tnapi->hw_status;
6288 if (sblk->status & SD_STATUS_ERROR)
6289 tg3_process_error(tp);
6293 work_done = tg3_poll_work(tnapi, work_done, budget);
6295 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6298 if (unlikely(work_done >= budget))
6301 if (tg3_flag(tp, TAGGED_STATUS)) {
6302 /* tp->last_tag is used in tg3_int_reenable() below
6303 * to tell the hw how much work has been processed,
6304 * so we must read it before checking for more work.
6306 tnapi->last_tag = sblk->status_tag;
6307 tnapi->last_irq_tag = tnapi->last_tag;
6310 sblk->status &= ~SD_STATUS_UPDATED;
6312 if (likely(!tg3_has_work(tnapi))) {
6313 napi_complete(napi);
6314 tg3_int_reenable(tnapi);
6322 /* work_done is guaranteed to be less than budget. */
6323 napi_complete(napi);
6324 tg3_reset_task_schedule(tp);
6328 static void tg3_napi_disable(struct tg3 *tp)
6332 for (i = tp->irq_cnt - 1; i >= 0; i--)
6333 napi_disable(&tp->napi[i].napi);
6336 static void tg3_napi_enable(struct tg3 *tp)
6340 for (i = 0; i < tp->irq_cnt; i++)
6341 napi_enable(&tp->napi[i].napi);
6344 static void tg3_napi_init(struct tg3 *tp)
6348 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6349 for (i = 1; i < tp->irq_cnt; i++)
6350 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6353 static void tg3_napi_fini(struct tg3 *tp)
6357 for (i = 0; i < tp->irq_cnt; i++)
6358 netif_napi_del(&tp->napi[i].napi);
6361 static inline void tg3_netif_stop(struct tg3 *tp)
6363 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6364 tg3_napi_disable(tp);
6365 netif_tx_disable(tp->dev);
6368 static inline void tg3_netif_start(struct tg3 *tp)
6370 /* NOTE: unconditional netif_tx_wake_all_queues is only
6371 * appropriate so long as all callers are assured to
6372 * have free tx slots (such as after tg3_init_hw)
6374 netif_tx_wake_all_queues(tp->dev);
6376 tg3_napi_enable(tp);
6377 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6378 tg3_enable_ints(tp);
6381 static void tg3_irq_quiesce(struct tg3 *tp)
6385 BUG_ON(tp->irq_sync);
6390 for (i = 0; i < tp->irq_cnt; i++)
6391 synchronize_irq(tp->napi[i].irq_vec);
6394 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6395 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6396 * with as well. Most of the time, this is not necessary except when
6397 * shutting down the device.
6399 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6401 spin_lock_bh(&tp->lock);
6403 tg3_irq_quiesce(tp);
6406 static inline void tg3_full_unlock(struct tg3 *tp)
6408 spin_unlock_bh(&tp->lock);
6411 /* One-shot MSI handler - Chip automatically disables interrupt
6412 * after sending MSI so driver doesn't have to do it.
6414 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6416 struct tg3_napi *tnapi = dev_id;
6417 struct tg3 *tp = tnapi->tp;
6419 prefetch(tnapi->hw_status);
6421 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6423 if (likely(!tg3_irq_sync(tp)))
6424 napi_schedule(&tnapi->napi);
6429 /* MSI ISR - No need to check for interrupt sharing and no need to
6430 * flush status block and interrupt mailbox. PCI ordering rules
6431 * guarantee that MSI will arrive after the status block.
6433 static irqreturn_t tg3_msi(int irq, void *dev_id)
6435 struct tg3_napi *tnapi = dev_id;
6436 struct tg3 *tp = tnapi->tp;
6438 prefetch(tnapi->hw_status);
6440 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6442 * Writing any value to intr-mbox-0 clears PCI INTA# and
6443 * chip-internal interrupt pending events.
6444 * Writing non-zero to intr-mbox-0 additional tells the
6445 * NIC to stop sending us irqs, engaging "in-intr-handler"
6448 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6449 if (likely(!tg3_irq_sync(tp)))
6450 napi_schedule(&tnapi->napi);
6452 return IRQ_RETVAL(1);
6455 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6457 struct tg3_napi *tnapi = dev_id;
6458 struct tg3 *tp = tnapi->tp;
6459 struct tg3_hw_status *sblk = tnapi->hw_status;
6460 unsigned int handled = 1;
6462 /* In INTx mode, it is possible for the interrupt to arrive at
6463 * the CPU before the status block posted prior to the interrupt.
6464 * Reading the PCI State register will confirm whether the
6465 * interrupt is ours and will flush the status block.
6467 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6468 if (tg3_flag(tp, CHIP_RESETTING) ||
6469 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6476 * Writing any value to intr-mbox-0 clears PCI INTA# and
6477 * chip-internal interrupt pending events.
6478 * Writing non-zero to intr-mbox-0 additional tells the
6479 * NIC to stop sending us irqs, engaging "in-intr-handler"
6482 * Flush the mailbox to de-assert the IRQ immediately to prevent
6483 * spurious interrupts. The flush impacts performance but
6484 * excessive spurious interrupts can be worse in some cases.
6486 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6487 if (tg3_irq_sync(tp))
6489 sblk->status &= ~SD_STATUS_UPDATED;
6490 if (likely(tg3_has_work(tnapi))) {
6491 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6492 napi_schedule(&tnapi->napi);
6494 /* No work, shared interrupt perhaps? re-enable
6495 * interrupts, and flush that PCI write
6497 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6501 return IRQ_RETVAL(handled);
6504 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6506 struct tg3_napi *tnapi = dev_id;
6507 struct tg3 *tp = tnapi->tp;
6508 struct tg3_hw_status *sblk = tnapi->hw_status;
6509 unsigned int handled = 1;
6511 /* In INTx mode, it is possible for the interrupt to arrive at
6512 * the CPU before the status block posted prior to the interrupt.
6513 * Reading the PCI State register will confirm whether the
6514 * interrupt is ours and will flush the status block.
6516 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6517 if (tg3_flag(tp, CHIP_RESETTING) ||
6518 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6525 * writing any value to intr-mbox-0 clears PCI INTA# and
6526 * chip-internal interrupt pending events.
6527 * writing non-zero to intr-mbox-0 additional tells the
6528 * NIC to stop sending us irqs, engaging "in-intr-handler"
6531 * Flush the mailbox to de-assert the IRQ immediately to prevent
6532 * spurious interrupts. The flush impacts performance but
6533 * excessive spurious interrupts can be worse in some cases.
6535 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6538 * In a shared interrupt configuration, sometimes other devices'
6539 * interrupts will scream. We record the current status tag here
6540 * so that the above check can report that the screaming interrupts
6541 * are unhandled. Eventually they will be silenced.
6543 tnapi->last_irq_tag = sblk->status_tag;
6545 if (tg3_irq_sync(tp))
6548 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6550 napi_schedule(&tnapi->napi);
6553 return IRQ_RETVAL(handled);
6556 /* ISR for interrupt test */
6557 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6559 struct tg3_napi *tnapi = dev_id;
6560 struct tg3 *tp = tnapi->tp;
6561 struct tg3_hw_status *sblk = tnapi->hw_status;
6563 if ((sblk->status & SD_STATUS_UPDATED) ||
6564 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6565 tg3_disable_ints(tp);
6566 return IRQ_RETVAL(1);
6568 return IRQ_RETVAL(0);
6571 #ifdef CONFIG_NET_POLL_CONTROLLER
6572 static void tg3_poll_controller(struct net_device *dev)
6575 struct tg3 *tp = netdev_priv(dev);
6577 for (i = 0; i < tp->irq_cnt; i++)
6578 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6582 static void tg3_tx_timeout(struct net_device *dev)
6584 struct tg3 *tp = netdev_priv(dev);
6586 if (netif_msg_tx_err(tp)) {
6587 netdev_err(dev, "transmit timed out, resetting\n");
6591 tg3_reset_task_schedule(tp);
6594 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6595 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6597 u32 base = (u32) mapping & 0xffffffff;
6599 return (base > 0xffffdcc0) && (base + len + 8 < base);
6602 /* Test for DMA addresses > 40-bit */
6603 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6606 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6607 if (tg3_flag(tp, 40BIT_DMA_BUG))
6608 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6615 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6616 dma_addr_t mapping, u32 len, u32 flags,
6619 txbd->addr_hi = ((u64) mapping >> 32);
6620 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6621 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6622 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6625 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6626 dma_addr_t map, u32 len, u32 flags,
6629 struct tg3 *tp = tnapi->tp;
6632 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6635 if (tg3_4g_overflow_test(map, len))
6638 if (tg3_40bit_overflow_test(tp, map, len))
6641 if (tp->dma_limit) {
6642 u32 prvidx = *entry;
6643 u32 tmp_flag = flags & ~TXD_FLAG_END;
6644 while (len > tp->dma_limit && *budget) {
6645 u32 frag_len = tp->dma_limit;
6646 len -= tp->dma_limit;
6648 /* Avoid the 8byte DMA problem */
6650 len += tp->dma_limit / 2;
6651 frag_len = tp->dma_limit / 2;
6654 tnapi->tx_buffers[*entry].fragmented = true;
6656 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6657 frag_len, tmp_flag, mss, vlan);
6660 *entry = NEXT_TX(*entry);
6667 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6668 len, flags, mss, vlan);
6670 *entry = NEXT_TX(*entry);
6673 tnapi->tx_buffers[prvidx].fragmented = false;
6677 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6678 len, flags, mss, vlan);
6679 *entry = NEXT_TX(*entry);
6685 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6688 struct sk_buff *skb;
6689 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6694 pci_unmap_single(tnapi->tp->pdev,
6695 dma_unmap_addr(txb, mapping),
6699 while (txb->fragmented) {
6700 txb->fragmented = false;
6701 entry = NEXT_TX(entry);
6702 txb = &tnapi->tx_buffers[entry];
6705 for (i = 0; i <= last; i++) {
6706 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6708 entry = NEXT_TX(entry);
6709 txb = &tnapi->tx_buffers[entry];
6711 pci_unmap_page(tnapi->tp->pdev,
6712 dma_unmap_addr(txb, mapping),
6713 skb_frag_size(frag), PCI_DMA_TODEVICE);
6715 while (txb->fragmented) {
6716 txb->fragmented = false;
6717 entry = NEXT_TX(entry);
6718 txb = &tnapi->tx_buffers[entry];
6723 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6724 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6725 struct sk_buff **pskb,
6726 u32 *entry, u32 *budget,
6727 u32 base_flags, u32 mss, u32 vlan)
6729 struct tg3 *tp = tnapi->tp;
6730 struct sk_buff *new_skb, *skb = *pskb;
6731 dma_addr_t new_addr = 0;
6734 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6735 new_skb = skb_copy(skb, GFP_ATOMIC);
6737 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6739 new_skb = skb_copy_expand(skb,
6740 skb_headroom(skb) + more_headroom,
6741 skb_tailroom(skb), GFP_ATOMIC);
6747 /* New SKB is guaranteed to be linear. */
6748 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6750 /* Make sure the mapping succeeded */
6751 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6752 dev_kfree_skb(new_skb);
6755 u32 save_entry = *entry;
6757 base_flags |= TXD_FLAG_END;
6759 tnapi->tx_buffers[*entry].skb = new_skb;
6760 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6763 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6764 new_skb->len, base_flags,
6766 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6767 dev_kfree_skb(new_skb);
6778 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6780 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6781 * TSO header is greater than 80 bytes.
6783 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6785 struct sk_buff *segs, *nskb;
6786 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6788 /* Estimate the number of fragments in the worst case */
6789 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6790 netif_stop_queue(tp->dev);
6792 /* netif_tx_stop_queue() must be done before checking
6793 * checking tx index in tg3_tx_avail() below, because in
6794 * tg3_tx(), we update tx index before checking for
6795 * netif_tx_queue_stopped().
6798 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6799 return NETDEV_TX_BUSY;
6801 netif_wake_queue(tp->dev);
6804 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6806 goto tg3_tso_bug_end;
6812 tg3_start_xmit(nskb, tp->dev);
6818 return NETDEV_TX_OK;
6821 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6822 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6824 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6826 struct tg3 *tp = netdev_priv(dev);
6827 u32 len, entry, base_flags, mss, vlan = 0;
6829 int i = -1, would_hit_hwbug;
6831 struct tg3_napi *tnapi;
6832 struct netdev_queue *txq;
6835 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6836 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6837 if (tg3_flag(tp, ENABLE_TSS))
6840 budget = tg3_tx_avail(tnapi);
6842 /* We are running in BH disabled context with netif_tx_lock
6843 * and TX reclaim runs via tp->napi.poll inside of a software
6844 * interrupt. Furthermore, IRQ processing runs lockless so we have
6845 * no IRQ context deadlocks to worry about either. Rejoice!
6847 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6848 if (!netif_tx_queue_stopped(txq)) {
6849 netif_tx_stop_queue(txq);
6851 /* This is a hard error, log it. */
6853 "BUG! Tx Ring full when queue awake!\n");
6855 return NETDEV_TX_BUSY;
6858 entry = tnapi->tx_prod;
6860 if (skb->ip_summed == CHECKSUM_PARTIAL)
6861 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6863 mss = skb_shinfo(skb)->gso_size;
6866 u32 tcp_opt_len, hdr_len;
6868 if (skb_header_cloned(skb) &&
6869 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6873 tcp_opt_len = tcp_optlen(skb);
6875 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6877 if (!skb_is_gso_v6(skb)) {
6879 iph->tot_len = htons(mss + hdr_len);
6882 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6883 tg3_flag(tp, TSO_BUG))
6884 return tg3_tso_bug(tp, skb);
6886 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6887 TXD_FLAG_CPU_POST_DMA);
6889 if (tg3_flag(tp, HW_TSO_1) ||
6890 tg3_flag(tp, HW_TSO_2) ||
6891 tg3_flag(tp, HW_TSO_3)) {
6892 tcp_hdr(skb)->check = 0;
6893 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6895 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6900 if (tg3_flag(tp, HW_TSO_3)) {
6901 mss |= (hdr_len & 0xc) << 12;
6903 base_flags |= 0x00000010;
6904 base_flags |= (hdr_len & 0x3e0) << 5;
6905 } else if (tg3_flag(tp, HW_TSO_2))
6906 mss |= hdr_len << 9;
6907 else if (tg3_flag(tp, HW_TSO_1) ||
6908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6909 if (tcp_opt_len || iph->ihl > 5) {
6912 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6913 mss |= (tsflags << 11);
6916 if (tcp_opt_len || iph->ihl > 5) {
6919 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6920 base_flags |= tsflags << 12;
6925 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6926 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6927 base_flags |= TXD_FLAG_JMB_PKT;
6929 if (vlan_tx_tag_present(skb)) {
6930 base_flags |= TXD_FLAG_VLAN;
6931 vlan = vlan_tx_tag_get(skb);
6934 len = skb_headlen(skb);
6936 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6937 if (pci_dma_mapping_error(tp->pdev, mapping))
6941 tnapi->tx_buffers[entry].skb = skb;
6942 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6944 would_hit_hwbug = 0;
6946 if (tg3_flag(tp, 5701_DMA_BUG))
6947 would_hit_hwbug = 1;
6949 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6950 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6952 would_hit_hwbug = 1;
6953 } else if (skb_shinfo(skb)->nr_frags > 0) {
6956 if (!tg3_flag(tp, HW_TSO_1) &&
6957 !tg3_flag(tp, HW_TSO_2) &&
6958 !tg3_flag(tp, HW_TSO_3))
6961 /* Now loop through additional data
6962 * fragments, and queue them.
6964 last = skb_shinfo(skb)->nr_frags - 1;
6965 for (i = 0; i <= last; i++) {
6966 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6968 len = skb_frag_size(frag);
6969 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6970 len, DMA_TO_DEVICE);
6972 tnapi->tx_buffers[entry].skb = NULL;
6973 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6975 if (dma_mapping_error(&tp->pdev->dev, mapping))
6979 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6981 ((i == last) ? TXD_FLAG_END : 0),
6983 would_hit_hwbug = 1;
6989 if (would_hit_hwbug) {
6990 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6992 /* If the workaround fails due to memory/mapping
6993 * failure, silently drop this packet.
6995 entry = tnapi->tx_prod;
6996 budget = tg3_tx_avail(tnapi);
6997 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6998 base_flags, mss, vlan))
7002 skb_tx_timestamp(skb);
7003 netdev_tx_sent_queue(txq, skb->len);
7005 /* Sync BD data before updating mailbox */
7008 /* Packets are ready, update Tx producer idx local and on card. */
7009 tw32_tx_mbox(tnapi->prodmbox, entry);
7011 tnapi->tx_prod = entry;
7012 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7013 netif_tx_stop_queue(txq);
7015 /* netif_tx_stop_queue() must be done before checking
7016 * checking tx index in tg3_tx_avail() below, because in
7017 * tg3_tx(), we update tx index before checking for
7018 * netif_tx_queue_stopped().
7021 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7022 netif_tx_wake_queue(txq);
7026 return NETDEV_TX_OK;
7029 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7030 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7035 return NETDEV_TX_OK;
7038 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7041 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7042 MAC_MODE_PORT_MODE_MASK);
7044 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7046 if (!tg3_flag(tp, 5705_PLUS))
7047 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7049 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7050 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7052 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7054 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7056 if (tg3_flag(tp, 5705_PLUS) ||
7057 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7058 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7059 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7062 tw32(MAC_MODE, tp->mac_mode);
7066 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7068 u32 val, bmcr, mac_mode, ptest = 0;
7070 tg3_phy_toggle_apd(tp, false);
7071 tg3_phy_toggle_automdix(tp, 0);
7073 if (extlpbk && tg3_phy_set_extloopbk(tp))
7076 bmcr = BMCR_FULLDPLX;
7081 bmcr |= BMCR_SPEED100;
7085 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7087 bmcr |= BMCR_SPEED100;
7090 bmcr |= BMCR_SPEED1000;
7095 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7096 tg3_readphy(tp, MII_CTRL1000, &val);
7097 val |= CTL1000_AS_MASTER |
7098 CTL1000_ENABLE_MASTER;
7099 tg3_writephy(tp, MII_CTRL1000, val);
7101 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7102 MII_TG3_FET_PTEST_TRIM_2;
7103 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7106 bmcr |= BMCR_LOOPBACK;
7108 tg3_writephy(tp, MII_BMCR, bmcr);
7110 /* The write needs to be flushed for the FETs */
7111 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7112 tg3_readphy(tp, MII_BMCR, &bmcr);
7116 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7117 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7118 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7119 MII_TG3_FET_PTEST_FRC_TX_LINK |
7120 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7122 /* The write needs to be flushed for the AC131 */
7123 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7126 /* Reset to prevent losing 1st rx packet intermittently */
7127 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7128 tg3_flag(tp, 5780_CLASS)) {
7129 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7131 tw32_f(MAC_RX_MODE, tp->rx_mode);
7134 mac_mode = tp->mac_mode &
7135 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7136 if (speed == SPEED_1000)
7137 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7139 mac_mode |= MAC_MODE_PORT_MODE_MII;
7141 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7142 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7144 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7145 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7146 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7147 mac_mode |= MAC_MODE_LINK_POLARITY;
7149 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7150 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7153 tw32(MAC_MODE, mac_mode);
7159 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7161 struct tg3 *tp = netdev_priv(dev);
7163 if (features & NETIF_F_LOOPBACK) {
7164 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7167 spin_lock_bh(&tp->lock);
7168 tg3_mac_loopback(tp, true);
7169 netif_carrier_on(tp->dev);
7170 spin_unlock_bh(&tp->lock);
7171 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7173 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7176 spin_lock_bh(&tp->lock);
7177 tg3_mac_loopback(tp, false);
7178 /* Force link status check */
7179 tg3_setup_phy(tp, 1);
7180 spin_unlock_bh(&tp->lock);
7181 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7185 static netdev_features_t tg3_fix_features(struct net_device *dev,
7186 netdev_features_t features)
7188 struct tg3 *tp = netdev_priv(dev);
7190 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7191 features &= ~NETIF_F_ALL_TSO;
7196 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7198 netdev_features_t changed = dev->features ^ features;
7200 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7201 tg3_set_loopback(dev, features);
7206 static void tg3_rx_prodring_free(struct tg3 *tp,
7207 struct tg3_rx_prodring_set *tpr)
7211 if (tpr != &tp->napi[0].prodring) {
7212 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7213 i = (i + 1) & tp->rx_std_ring_mask)
7214 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7217 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7218 for (i = tpr->rx_jmb_cons_idx;
7219 i != tpr->rx_jmb_prod_idx;
7220 i = (i + 1) & tp->rx_jmb_ring_mask) {
7221 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7229 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7230 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7233 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7234 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7235 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7240 /* Initialize rx rings for packet processing.
7242 * The chip has been shut down and the driver detached from
7243 * the networking, so no interrupts or new tx packets will
7244 * end up in the driver. tp->{tx,}lock are held and thus
7247 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7248 struct tg3_rx_prodring_set *tpr)
7250 u32 i, rx_pkt_dma_sz;
7252 tpr->rx_std_cons_idx = 0;
7253 tpr->rx_std_prod_idx = 0;
7254 tpr->rx_jmb_cons_idx = 0;
7255 tpr->rx_jmb_prod_idx = 0;
7257 if (tpr != &tp->napi[0].prodring) {
7258 memset(&tpr->rx_std_buffers[0], 0,
7259 TG3_RX_STD_BUFF_RING_SIZE(tp));
7260 if (tpr->rx_jmb_buffers)
7261 memset(&tpr->rx_jmb_buffers[0], 0,
7262 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7266 /* Zero out all descriptors. */
7267 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7269 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7270 if (tg3_flag(tp, 5780_CLASS) &&
7271 tp->dev->mtu > ETH_DATA_LEN)
7272 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7273 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7275 /* Initialize invariants of the rings, we only set this
7276 * stuff once. This works because the card does not
7277 * write into the rx buffer posting rings.
7279 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7280 struct tg3_rx_buffer_desc *rxd;
7282 rxd = &tpr->rx_std[i];
7283 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7284 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7285 rxd->opaque = (RXD_OPAQUE_RING_STD |
7286 (i << RXD_OPAQUE_INDEX_SHIFT));
7289 /* Now allocate fresh SKBs for each rx ring. */
7290 for (i = 0; i < tp->rx_pending; i++) {
7291 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7292 netdev_warn(tp->dev,
7293 "Using a smaller RX standard ring. Only "
7294 "%d out of %d buffers were allocated "
7295 "successfully\n", i, tp->rx_pending);
7303 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7306 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7308 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7311 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7312 struct tg3_rx_buffer_desc *rxd;
7314 rxd = &tpr->rx_jmb[i].std;
7315 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7316 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7318 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7319 (i << RXD_OPAQUE_INDEX_SHIFT));
7322 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7323 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7324 netdev_warn(tp->dev,
7325 "Using a smaller RX jumbo ring. Only %d "
7326 "out of %d buffers were allocated "
7327 "successfully\n", i, tp->rx_jumbo_pending);
7330 tp->rx_jumbo_pending = i;
7339 tg3_rx_prodring_free(tp, tpr);
7343 static void tg3_rx_prodring_fini(struct tg3 *tp,
7344 struct tg3_rx_prodring_set *tpr)
7346 kfree(tpr->rx_std_buffers);
7347 tpr->rx_std_buffers = NULL;
7348 kfree(tpr->rx_jmb_buffers);
7349 tpr->rx_jmb_buffers = NULL;
7351 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7352 tpr->rx_std, tpr->rx_std_mapping);
7356 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7357 tpr->rx_jmb, tpr->rx_jmb_mapping);
7362 static int tg3_rx_prodring_init(struct tg3 *tp,
7363 struct tg3_rx_prodring_set *tpr)
7365 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7367 if (!tpr->rx_std_buffers)
7370 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7371 TG3_RX_STD_RING_BYTES(tp),
7372 &tpr->rx_std_mapping,
7377 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7378 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7380 if (!tpr->rx_jmb_buffers)
7383 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7384 TG3_RX_JMB_RING_BYTES(tp),
7385 &tpr->rx_jmb_mapping,
7394 tg3_rx_prodring_fini(tp, tpr);
7398 /* Free up pending packets in all rx/tx rings.
7400 * The chip has been shut down and the driver detached from
7401 * the networking, so no interrupts or new tx packets will
7402 * end up in the driver. tp->{tx,}lock is not held and we are not
7403 * in an interrupt context and thus may sleep.
7405 static void tg3_free_rings(struct tg3 *tp)
7409 for (j = 0; j < tp->irq_cnt; j++) {
7410 struct tg3_napi *tnapi = &tp->napi[j];
7412 tg3_rx_prodring_free(tp, &tnapi->prodring);
7414 if (!tnapi->tx_buffers)
7417 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7418 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7423 tg3_tx_skb_unmap(tnapi, i,
7424 skb_shinfo(skb)->nr_frags - 1);
7426 dev_kfree_skb_any(skb);
7428 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7432 /* Initialize tx/rx rings for packet processing.
7434 * The chip has been shut down and the driver detached from
7435 * the networking, so no interrupts or new tx packets will
7436 * end up in the driver. tp->{tx,}lock are held and thus
7439 static int tg3_init_rings(struct tg3 *tp)
7443 /* Free up all the SKBs. */
7446 for (i = 0; i < tp->irq_cnt; i++) {
7447 struct tg3_napi *tnapi = &tp->napi[i];
7449 tnapi->last_tag = 0;
7450 tnapi->last_irq_tag = 0;
7451 tnapi->hw_status->status = 0;
7452 tnapi->hw_status->status_tag = 0;
7453 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7458 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7460 tnapi->rx_rcb_ptr = 0;
7462 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7464 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7474 * Must not be invoked with interrupt sources disabled and
7475 * the hardware shutdown down.
7477 static void tg3_free_consistent(struct tg3 *tp)
7481 for (i = 0; i < tp->irq_cnt; i++) {
7482 struct tg3_napi *tnapi = &tp->napi[i];
7484 if (tnapi->tx_ring) {
7485 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7486 tnapi->tx_ring, tnapi->tx_desc_mapping);
7487 tnapi->tx_ring = NULL;
7490 kfree(tnapi->tx_buffers);
7491 tnapi->tx_buffers = NULL;
7493 if (tnapi->rx_rcb) {
7494 dma_free_coherent(&tp->pdev->dev,
7495 TG3_RX_RCB_RING_BYTES(tp),
7497 tnapi->rx_rcb_mapping);
7498 tnapi->rx_rcb = NULL;
7501 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7503 if (tnapi->hw_status) {
7504 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7506 tnapi->status_mapping);
7507 tnapi->hw_status = NULL;
7512 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7513 tp->hw_stats, tp->stats_mapping);
7514 tp->hw_stats = NULL;
7519 * Must not be invoked with interrupt sources disabled and
7520 * the hardware shutdown down. Can sleep.
7522 static int tg3_alloc_consistent(struct tg3 *tp)
7526 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7527 sizeof(struct tg3_hw_stats),
7533 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7535 for (i = 0; i < tp->irq_cnt; i++) {
7536 struct tg3_napi *tnapi = &tp->napi[i];
7537 struct tg3_hw_status *sblk;
7539 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7541 &tnapi->status_mapping,
7543 if (!tnapi->hw_status)
7546 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7547 sblk = tnapi->hw_status;
7549 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7552 /* If multivector TSS is enabled, vector 0 does not handle
7553 * tx interrupts. Don't allocate any resources for it.
7555 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7556 (i && tg3_flag(tp, ENABLE_TSS))) {
7557 tnapi->tx_buffers = kzalloc(
7558 sizeof(struct tg3_tx_ring_info) *
7559 TG3_TX_RING_SIZE, GFP_KERNEL);
7560 if (!tnapi->tx_buffers)
7563 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7565 &tnapi->tx_desc_mapping,
7567 if (!tnapi->tx_ring)
7572 * When RSS is enabled, the status block format changes
7573 * slightly. The "rx_jumbo_consumer", "reserved",
7574 * and "rx_mini_consumer" members get mapped to the
7575 * other three rx return ring producer indexes.
7579 if (tg3_flag(tp, ENABLE_RSS)) {
7580 tnapi->rx_rcb_prod_idx = NULL;
7585 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7588 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7591 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7594 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7599 * If multivector RSS is enabled, vector 0 does not handle
7600 * rx or tx interrupts. Don't allocate any resources for it.
7602 if (!i && tg3_flag(tp, ENABLE_RSS))
7605 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7606 TG3_RX_RCB_RING_BYTES(tp),
7607 &tnapi->rx_rcb_mapping,
7612 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7618 tg3_free_consistent(tp);
7622 #define MAX_WAIT_CNT 1000
7624 /* To stop a block, clear the enable bit and poll till it
7625 * clears. tp->lock is held.
7627 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7632 if (tg3_flag(tp, 5705_PLUS)) {
7639 /* We can't enable/disable these bits of the
7640 * 5705/5750, just say success.
7653 for (i = 0; i < MAX_WAIT_CNT; i++) {
7656 if ((val & enable_bit) == 0)
7660 if (i == MAX_WAIT_CNT && !silent) {
7661 dev_err(&tp->pdev->dev,
7662 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7670 /* tp->lock is held. */
7671 static int tg3_abort_hw(struct tg3 *tp, int silent)
7675 tg3_disable_ints(tp);
7677 tp->rx_mode &= ~RX_MODE_ENABLE;
7678 tw32_f(MAC_RX_MODE, tp->rx_mode);
7681 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7682 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7683 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7684 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7685 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7686 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7688 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7689 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7690 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7691 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7692 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7693 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7694 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7696 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7697 tw32_f(MAC_MODE, tp->mac_mode);
7700 tp->tx_mode &= ~TX_MODE_ENABLE;
7701 tw32_f(MAC_TX_MODE, tp->tx_mode);
7703 for (i = 0; i < MAX_WAIT_CNT; i++) {
7705 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7708 if (i >= MAX_WAIT_CNT) {
7709 dev_err(&tp->pdev->dev,
7710 "%s timed out, TX_MODE_ENABLE will not clear "
7711 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7715 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7716 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7717 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7719 tw32(FTQ_RESET, 0xffffffff);
7720 tw32(FTQ_RESET, 0x00000000);
7722 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7723 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7725 for (i = 0; i < tp->irq_cnt; i++) {
7726 struct tg3_napi *tnapi = &tp->napi[i];
7727 if (tnapi->hw_status)
7728 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7734 /* Save PCI command register before chip reset */
7735 static void tg3_save_pci_state(struct tg3 *tp)
7737 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7740 /* Restore PCI state after chip reset */
7741 static void tg3_restore_pci_state(struct tg3 *tp)
7745 /* Re-enable indirect register accesses. */
7746 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7747 tp->misc_host_ctrl);
7749 /* Set MAX PCI retry to zero. */
7750 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7751 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7752 tg3_flag(tp, PCIX_MODE))
7753 val |= PCISTATE_RETRY_SAME_DMA;
7754 /* Allow reads and writes to the APE register and memory space. */
7755 if (tg3_flag(tp, ENABLE_APE))
7756 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7757 PCISTATE_ALLOW_APE_SHMEM_WR |
7758 PCISTATE_ALLOW_APE_PSPACE_WR;
7759 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7761 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7763 if (!tg3_flag(tp, PCI_EXPRESS)) {
7764 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7765 tp->pci_cacheline_sz);
7766 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7770 /* Make sure PCI-X relaxed ordering bit is clear. */
7771 if (tg3_flag(tp, PCIX_MODE)) {
7774 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7776 pcix_cmd &= ~PCI_X_CMD_ERO;
7777 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7781 if (tg3_flag(tp, 5780_CLASS)) {
7783 /* Chip reset on 5780 will reset MSI enable bit,
7784 * so need to restore it.
7786 if (tg3_flag(tp, USING_MSI)) {
7789 pci_read_config_word(tp->pdev,
7790 tp->msi_cap + PCI_MSI_FLAGS,
7792 pci_write_config_word(tp->pdev,
7793 tp->msi_cap + PCI_MSI_FLAGS,
7794 ctrl | PCI_MSI_FLAGS_ENABLE);
7795 val = tr32(MSGINT_MODE);
7796 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7801 /* tp->lock is held. */
7802 static int tg3_chip_reset(struct tg3 *tp)
7805 void (*write_op)(struct tg3 *, u32, u32);
7810 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7812 /* No matching tg3_nvram_unlock() after this because
7813 * chip reset below will undo the nvram lock.
7815 tp->nvram_lock_cnt = 0;
7817 /* GRC_MISC_CFG core clock reset will clear the memory
7818 * enable bit in PCI register 4 and the MSI enable bit
7819 * on some chips, so we save relevant registers here.
7821 tg3_save_pci_state(tp);
7823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7824 tg3_flag(tp, 5755_PLUS))
7825 tw32(GRC_FASTBOOT_PC, 0);
7828 * We must avoid the readl() that normally takes place.
7829 * It locks machines, causes machine checks, and other
7830 * fun things. So, temporarily disable the 5701
7831 * hardware workaround, while we do the reset.
7833 write_op = tp->write32;
7834 if (write_op == tg3_write_flush_reg32)
7835 tp->write32 = tg3_write32;
7837 /* Prevent the irq handler from reading or writing PCI registers
7838 * during chip reset when the memory enable bit in the PCI command
7839 * register may be cleared. The chip does not generate interrupt
7840 * at this time, but the irq handler may still be called due to irq
7841 * sharing or irqpoll.
7843 tg3_flag_set(tp, CHIP_RESETTING);
7844 for (i = 0; i < tp->irq_cnt; i++) {
7845 struct tg3_napi *tnapi = &tp->napi[i];
7846 if (tnapi->hw_status) {
7847 tnapi->hw_status->status = 0;
7848 tnapi->hw_status->status_tag = 0;
7850 tnapi->last_tag = 0;
7851 tnapi->last_irq_tag = 0;
7855 for (i = 0; i < tp->irq_cnt; i++)
7856 synchronize_irq(tp->napi[i].irq_vec);
7858 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7859 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7860 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7864 val = GRC_MISC_CFG_CORECLK_RESET;
7866 if (tg3_flag(tp, PCI_EXPRESS)) {
7867 /* Force PCIe 1.0a mode */
7868 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7869 !tg3_flag(tp, 57765_PLUS) &&
7870 tr32(TG3_PCIE_PHY_TSTCTL) ==
7871 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7872 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7874 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7875 tw32(GRC_MISC_CFG, (1 << 29));
7880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7881 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7882 tw32(GRC_VCPU_EXT_CTRL,
7883 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7886 /* Manage gphy power for all CPMU absent PCIe devices. */
7887 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7888 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7890 tw32(GRC_MISC_CFG, val);
7892 /* restore 5701 hardware bug workaround write method */
7893 tp->write32 = write_op;
7895 /* Unfortunately, we have to delay before the PCI read back.
7896 * Some 575X chips even will not respond to a PCI cfg access
7897 * when the reset command is given to the chip.
7899 * How do these hardware designers expect things to work
7900 * properly if the PCI write is posted for a long period
7901 * of time? It is always necessary to have some method by
7902 * which a register read back can occur to push the write
7903 * out which does the reset.
7905 * For most tg3 variants the trick below was working.
7910 /* Flush PCI posted writes. The normal MMIO registers
7911 * are inaccessible at this time so this is the only
7912 * way to make this reliably (actually, this is no longer
7913 * the case, see above). I tried to use indirect
7914 * register read/write but this upset some 5701 variants.
7916 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7920 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7923 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7927 /* Wait for link training to complete. */
7928 for (i = 0; i < 5000; i++)
7931 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7932 pci_write_config_dword(tp->pdev, 0xc4,
7933 cfg_val | (1 << 15));
7936 /* Clear the "no snoop" and "relaxed ordering" bits. */
7937 pci_read_config_word(tp->pdev,
7938 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7940 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7941 PCI_EXP_DEVCTL_NOSNOOP_EN);
7943 * Older PCIe devices only support the 128 byte
7944 * MPS setting. Enforce the restriction.
7946 if (!tg3_flag(tp, CPMU_PRESENT))
7947 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7948 pci_write_config_word(tp->pdev,
7949 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7952 /* Clear error status */
7953 pci_write_config_word(tp->pdev,
7954 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7955 PCI_EXP_DEVSTA_CED |
7956 PCI_EXP_DEVSTA_NFED |
7957 PCI_EXP_DEVSTA_FED |
7958 PCI_EXP_DEVSTA_URD);
7961 tg3_restore_pci_state(tp);
7963 tg3_flag_clear(tp, CHIP_RESETTING);
7964 tg3_flag_clear(tp, ERROR_PROCESSED);
7967 if (tg3_flag(tp, 5780_CLASS))
7968 val = tr32(MEMARB_MODE);
7969 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7971 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7973 tw32(0x5000, 0x400);
7976 tw32(GRC_MODE, tp->grc_mode);
7978 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7981 tw32(0xc4, val | (1 << 15));
7984 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7985 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7986 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7987 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7988 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7989 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7992 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7993 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7995 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7996 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8001 tw32_f(MAC_MODE, val);
8004 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8006 err = tg3_poll_fw(tp);
8012 if (tg3_flag(tp, PCI_EXPRESS) &&
8013 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8014 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8015 !tg3_flag(tp, 57765_PLUS)) {
8018 tw32(0x7c00, val | (1 << 25));
8021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8022 val = tr32(TG3_CPMU_CLCK_ORIDE);
8023 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8026 /* Reprobe ASF enable state. */
8027 tg3_flag_clear(tp, ENABLE_ASF);
8028 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8029 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8030 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8033 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8034 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8035 tg3_flag_set(tp, ENABLE_ASF);
8036 tp->last_event_jiffies = jiffies;
8037 if (tg3_flag(tp, 5750_PLUS))
8038 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8045 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8046 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8048 /* tp->lock is held. */
8049 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8055 tg3_write_sig_pre_reset(tp, kind);
8057 tg3_abort_hw(tp, silent);
8058 err = tg3_chip_reset(tp);
8060 __tg3_set_mac_addr(tp, 0);
8062 tg3_write_sig_legacy(tp, kind);
8063 tg3_write_sig_post_reset(tp, kind);
8066 /* Save the stats across chip resets... */
8067 tg3_get_nstats(tp, &tp->net_stats_prev);
8068 tg3_get_estats(tp, &tp->estats_prev);
8070 /* And make sure the next sample is new data */
8071 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8080 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8082 struct tg3 *tp = netdev_priv(dev);
8083 struct sockaddr *addr = p;
8084 int err = 0, skip_mac_1 = 0;
8086 if (!is_valid_ether_addr(addr->sa_data))
8087 return -EADDRNOTAVAIL;
8089 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8091 if (!netif_running(dev))
8094 if (tg3_flag(tp, ENABLE_ASF)) {
8095 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8097 addr0_high = tr32(MAC_ADDR_0_HIGH);
8098 addr0_low = tr32(MAC_ADDR_0_LOW);
8099 addr1_high = tr32(MAC_ADDR_1_HIGH);
8100 addr1_low = tr32(MAC_ADDR_1_LOW);
8102 /* Skip MAC addr 1 if ASF is using it. */
8103 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8104 !(addr1_high == 0 && addr1_low == 0))
8107 spin_lock_bh(&tp->lock);
8108 __tg3_set_mac_addr(tp, skip_mac_1);
8109 spin_unlock_bh(&tp->lock);
8114 /* tp->lock is held. */
8115 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8116 dma_addr_t mapping, u32 maxlen_flags,
8120 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8121 ((u64) mapping >> 32));
8123 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8124 ((u64) mapping & 0xffffffff));
8126 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8129 if (!tg3_flag(tp, 5705_PLUS))
8131 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8135 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8139 if (!tg3_flag(tp, ENABLE_TSS)) {
8140 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8141 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8142 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8144 tw32(HOSTCC_TXCOL_TICKS, 0);
8145 tw32(HOSTCC_TXMAX_FRAMES, 0);
8146 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8149 if (!tg3_flag(tp, ENABLE_RSS)) {
8150 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8151 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8152 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8154 tw32(HOSTCC_RXCOL_TICKS, 0);
8155 tw32(HOSTCC_RXMAX_FRAMES, 0);
8156 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8159 if (!tg3_flag(tp, 5705_PLUS)) {
8160 u32 val = ec->stats_block_coalesce_usecs;
8162 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8163 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8165 if (!netif_carrier_ok(tp->dev))
8168 tw32(HOSTCC_STAT_COAL_TICKS, val);
8171 for (i = 0; i < tp->irq_cnt - 1; i++) {
8174 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8175 tw32(reg, ec->rx_coalesce_usecs);
8176 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8177 tw32(reg, ec->rx_max_coalesced_frames);
8178 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8179 tw32(reg, ec->rx_max_coalesced_frames_irq);
8181 if (tg3_flag(tp, ENABLE_TSS)) {
8182 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8183 tw32(reg, ec->tx_coalesce_usecs);
8184 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8185 tw32(reg, ec->tx_max_coalesced_frames);
8186 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8187 tw32(reg, ec->tx_max_coalesced_frames_irq);
8191 for (; i < tp->irq_max - 1; i++) {
8192 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8193 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8194 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8196 if (tg3_flag(tp, ENABLE_TSS)) {
8197 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8198 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8199 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8204 /* tp->lock is held. */
8205 static void tg3_rings_reset(struct tg3 *tp)
8208 u32 stblk, txrcb, rxrcb, limit;
8209 struct tg3_napi *tnapi = &tp->napi[0];
8211 /* Disable all transmit rings but the first. */
8212 if (!tg3_flag(tp, 5705_PLUS))
8213 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8214 else if (tg3_flag(tp, 5717_PLUS))
8215 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8216 else if (tg3_flag(tp, 57765_CLASS))
8217 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8219 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8221 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8222 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8223 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8224 BDINFO_FLAGS_DISABLED);
8227 /* Disable all receive return rings but the first. */
8228 if (tg3_flag(tp, 5717_PLUS))
8229 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8230 else if (!tg3_flag(tp, 5705_PLUS))
8231 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8232 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8233 tg3_flag(tp, 57765_CLASS))
8234 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8236 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8238 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8239 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8240 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8241 BDINFO_FLAGS_DISABLED);
8243 /* Disable interrupts */
8244 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8245 tp->napi[0].chk_msi_cnt = 0;
8246 tp->napi[0].last_rx_cons = 0;
8247 tp->napi[0].last_tx_cons = 0;
8249 /* Zero mailbox registers. */
8250 if (tg3_flag(tp, SUPPORT_MSIX)) {
8251 for (i = 1; i < tp->irq_max; i++) {
8252 tp->napi[i].tx_prod = 0;
8253 tp->napi[i].tx_cons = 0;
8254 if (tg3_flag(tp, ENABLE_TSS))
8255 tw32_mailbox(tp->napi[i].prodmbox, 0);
8256 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8257 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8258 tp->napi[i].chk_msi_cnt = 0;
8259 tp->napi[i].last_rx_cons = 0;
8260 tp->napi[i].last_tx_cons = 0;
8262 if (!tg3_flag(tp, ENABLE_TSS))
8263 tw32_mailbox(tp->napi[0].prodmbox, 0);
8265 tp->napi[0].tx_prod = 0;
8266 tp->napi[0].tx_cons = 0;
8267 tw32_mailbox(tp->napi[0].prodmbox, 0);
8268 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8271 /* Make sure the NIC-based send BD rings are disabled. */
8272 if (!tg3_flag(tp, 5705_PLUS)) {
8273 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8274 for (i = 0; i < 16; i++)
8275 tw32_tx_mbox(mbox + i * 8, 0);
8278 txrcb = NIC_SRAM_SEND_RCB;
8279 rxrcb = NIC_SRAM_RCV_RET_RCB;
8281 /* Clear status block in ram. */
8282 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8284 /* Set status block DMA address */
8285 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8286 ((u64) tnapi->status_mapping >> 32));
8287 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8288 ((u64) tnapi->status_mapping & 0xffffffff));
8290 if (tnapi->tx_ring) {
8291 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8292 (TG3_TX_RING_SIZE <<
8293 BDINFO_FLAGS_MAXLEN_SHIFT),
8294 NIC_SRAM_TX_BUFFER_DESC);
8295 txrcb += TG3_BDINFO_SIZE;
8298 if (tnapi->rx_rcb) {
8299 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8300 (tp->rx_ret_ring_mask + 1) <<
8301 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8302 rxrcb += TG3_BDINFO_SIZE;
8305 stblk = HOSTCC_STATBLCK_RING1;
8307 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8308 u64 mapping = (u64)tnapi->status_mapping;
8309 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8310 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8312 /* Clear status block in ram. */
8313 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8315 if (tnapi->tx_ring) {
8316 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8317 (TG3_TX_RING_SIZE <<
8318 BDINFO_FLAGS_MAXLEN_SHIFT),
8319 NIC_SRAM_TX_BUFFER_DESC);
8320 txrcb += TG3_BDINFO_SIZE;
8323 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8324 ((tp->rx_ret_ring_mask + 1) <<
8325 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8328 rxrcb += TG3_BDINFO_SIZE;
8332 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8334 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8336 if (!tg3_flag(tp, 5750_PLUS) ||
8337 tg3_flag(tp, 5780_CLASS) ||
8338 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8339 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8340 tg3_flag(tp, 57765_PLUS))
8341 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8342 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8343 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8344 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8346 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8348 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8349 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8351 val = min(nic_rep_thresh, host_rep_thresh);
8352 tw32(RCVBDI_STD_THRESH, val);
8354 if (tg3_flag(tp, 57765_PLUS))
8355 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8357 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8360 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8362 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8364 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8365 tw32(RCVBDI_JUMBO_THRESH, val);
8367 if (tg3_flag(tp, 57765_PLUS))
8368 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8371 static inline u32 calc_crc(unsigned char *buf, int len)
8379 for (j = 0; j < len; j++) {
8382 for (k = 0; k < 8; k++) {
8395 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8397 /* accept or reject all multicast frames */
8398 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8399 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8400 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8401 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8404 static void __tg3_set_rx_mode(struct net_device *dev)
8406 struct tg3 *tp = netdev_priv(dev);
8409 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8410 RX_MODE_KEEP_VLAN_TAG);
8412 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8413 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8416 if (!tg3_flag(tp, ENABLE_ASF))
8417 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8420 if (dev->flags & IFF_PROMISC) {
8421 /* Promiscuous mode. */
8422 rx_mode |= RX_MODE_PROMISC;
8423 } else if (dev->flags & IFF_ALLMULTI) {
8424 /* Accept all multicast. */
8425 tg3_set_multi(tp, 1);
8426 } else if (netdev_mc_empty(dev)) {
8427 /* Reject all multicast. */
8428 tg3_set_multi(tp, 0);
8430 /* Accept one or more multicast(s). */
8431 struct netdev_hw_addr *ha;
8432 u32 mc_filter[4] = { 0, };
8437 netdev_for_each_mc_addr(ha, dev) {
8438 crc = calc_crc(ha->addr, ETH_ALEN);
8440 regidx = (bit & 0x60) >> 5;
8442 mc_filter[regidx] |= (1 << bit);
8445 tw32(MAC_HASH_REG_0, mc_filter[0]);
8446 tw32(MAC_HASH_REG_1, mc_filter[1]);
8447 tw32(MAC_HASH_REG_2, mc_filter[2]);
8448 tw32(MAC_HASH_REG_3, mc_filter[3]);
8451 if (rx_mode != tp->rx_mode) {
8452 tp->rx_mode = rx_mode;
8453 tw32_f(MAC_RX_MODE, rx_mode);
8458 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8462 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8463 tp->rss_ind_tbl[i] =
8464 ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8467 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8471 if (!tg3_flag(tp, SUPPORT_MSIX))
8474 if (tp->irq_cnt <= 2) {
8475 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8479 /* Validate table against current IRQ count */
8480 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8481 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8485 if (i != TG3_RSS_INDIR_TBL_SIZE)
8486 tg3_rss_init_dflt_indir_tbl(tp);
8489 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8492 u32 reg = MAC_RSS_INDIR_TBL_0;
8494 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8495 u32 val = tp->rss_ind_tbl[i];
8497 for (; i % 8; i++) {
8499 val |= tp->rss_ind_tbl[i];
8506 /* tp->lock is held. */
8507 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8509 u32 val, rdmac_mode;
8511 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8513 tg3_disable_ints(tp);
8517 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8519 if (tg3_flag(tp, INIT_COMPLETE))
8520 tg3_abort_hw(tp, 1);
8522 /* Enable MAC control of LPI */
8523 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8524 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8525 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8526 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8528 tw32_f(TG3_CPMU_EEE_CTRL,
8529 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8531 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8532 TG3_CPMU_EEEMD_LPI_IN_TX |
8533 TG3_CPMU_EEEMD_LPI_IN_RX |
8534 TG3_CPMU_EEEMD_EEE_ENABLE;
8536 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8537 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8539 if (tg3_flag(tp, ENABLE_APE))
8540 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8542 tw32_f(TG3_CPMU_EEE_MODE, val);
8544 tw32_f(TG3_CPMU_EEE_DBTMR1,
8545 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8546 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8548 tw32_f(TG3_CPMU_EEE_DBTMR2,
8549 TG3_CPMU_DBTMR2_APE_TX_2047US |
8550 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8556 err = tg3_chip_reset(tp);
8560 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8562 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8563 val = tr32(TG3_CPMU_CTRL);
8564 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8565 tw32(TG3_CPMU_CTRL, val);
8567 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8568 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8569 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8570 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8572 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8573 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8574 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8575 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8577 val = tr32(TG3_CPMU_HST_ACC);
8578 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8579 val |= CPMU_HST_ACC_MACCLK_6_25;
8580 tw32(TG3_CPMU_HST_ACC, val);
8583 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8584 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8585 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8586 PCIE_PWR_MGMT_L1_THRESH_4MS;
8587 tw32(PCIE_PWR_MGMT_THRESH, val);
8589 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8590 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8592 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8594 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8595 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8598 if (tg3_flag(tp, L1PLLPD_EN)) {
8599 u32 grc_mode = tr32(GRC_MODE);
8601 /* Access the lower 1K of PL PCIE block registers. */
8602 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8603 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8605 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8606 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8607 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8609 tw32(GRC_MODE, grc_mode);
8612 if (tg3_flag(tp, 57765_CLASS)) {
8613 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8614 u32 grc_mode = tr32(GRC_MODE);
8616 /* Access the lower 1K of PL PCIE block registers. */
8617 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8618 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8620 val = tr32(TG3_PCIE_TLDLPL_PORT +
8621 TG3_PCIE_PL_LO_PHYCTL5);
8622 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8623 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8625 tw32(GRC_MODE, grc_mode);
8628 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8629 u32 grc_mode = tr32(GRC_MODE);
8631 /* Access the lower 1K of DL PCIE block registers. */
8632 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8633 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8635 val = tr32(TG3_PCIE_TLDLPL_PORT +
8636 TG3_PCIE_DL_LO_FTSMAX);
8637 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8638 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8639 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8641 tw32(GRC_MODE, grc_mode);
8644 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8645 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8646 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8647 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8650 /* This works around an issue with Athlon chipsets on
8651 * B3 tigon3 silicon. This bit has no effect on any
8652 * other revision. But do not set this on PCI Express
8653 * chips and don't even touch the clocks if the CPMU is present.
8655 if (!tg3_flag(tp, CPMU_PRESENT)) {
8656 if (!tg3_flag(tp, PCI_EXPRESS))
8657 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8658 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8661 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8662 tg3_flag(tp, PCIX_MODE)) {
8663 val = tr32(TG3PCI_PCISTATE);
8664 val |= PCISTATE_RETRY_SAME_DMA;
8665 tw32(TG3PCI_PCISTATE, val);
8668 if (tg3_flag(tp, ENABLE_APE)) {
8669 /* Allow reads and writes to the
8670 * APE register and memory space.
8672 val = tr32(TG3PCI_PCISTATE);
8673 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8674 PCISTATE_ALLOW_APE_SHMEM_WR |
8675 PCISTATE_ALLOW_APE_PSPACE_WR;
8676 tw32(TG3PCI_PCISTATE, val);
8679 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8680 /* Enable some hw fixes. */
8681 val = tr32(TG3PCI_MSI_DATA);
8682 val |= (1 << 26) | (1 << 28) | (1 << 29);
8683 tw32(TG3PCI_MSI_DATA, val);
8686 /* Descriptor ring init may make accesses to the
8687 * NIC SRAM area to setup the TX descriptors, so we
8688 * can only do this after the hardware has been
8689 * successfully reset.
8691 err = tg3_init_rings(tp);
8695 if (tg3_flag(tp, 57765_PLUS)) {
8696 val = tr32(TG3PCI_DMA_RW_CTRL) &
8697 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8698 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8699 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8700 if (!tg3_flag(tp, 57765_CLASS) &&
8701 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8702 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8703 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8704 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8705 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8706 /* This value is determined during the probe time DMA
8707 * engine test, tg3_test_dma.
8709 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8712 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8713 GRC_MODE_4X_NIC_SEND_RINGS |
8714 GRC_MODE_NO_TX_PHDR_CSUM |
8715 GRC_MODE_NO_RX_PHDR_CSUM);
8716 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8718 /* Pseudo-header checksum is done by hardware logic and not
8719 * the offload processers, so make the chip do the pseudo-
8720 * header checksums on receive. For transmit it is more
8721 * convenient to do the pseudo-header checksum in software
8722 * as Linux does that on transmit for us in all cases.
8724 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8728 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8730 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8731 val = tr32(GRC_MISC_CFG);
8733 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8734 tw32(GRC_MISC_CFG, val);
8736 /* Initialize MBUF/DESC pool. */
8737 if (tg3_flag(tp, 5750_PLUS)) {
8739 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8740 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8741 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8742 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8744 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8745 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8746 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8747 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8750 fw_len = tp->fw_len;
8751 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8752 tw32(BUFMGR_MB_POOL_ADDR,
8753 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8754 tw32(BUFMGR_MB_POOL_SIZE,
8755 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8758 if (tp->dev->mtu <= ETH_DATA_LEN) {
8759 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8760 tp->bufmgr_config.mbuf_read_dma_low_water);
8761 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8762 tp->bufmgr_config.mbuf_mac_rx_low_water);
8763 tw32(BUFMGR_MB_HIGH_WATER,
8764 tp->bufmgr_config.mbuf_high_water);
8766 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8767 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8768 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8769 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8770 tw32(BUFMGR_MB_HIGH_WATER,
8771 tp->bufmgr_config.mbuf_high_water_jumbo);
8773 tw32(BUFMGR_DMA_LOW_WATER,
8774 tp->bufmgr_config.dma_low_water);
8775 tw32(BUFMGR_DMA_HIGH_WATER,
8776 tp->bufmgr_config.dma_high_water);
8778 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8779 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8780 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8782 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8783 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8784 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8785 tw32(BUFMGR_MODE, val);
8786 for (i = 0; i < 2000; i++) {
8787 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8792 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8796 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8797 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8799 tg3_setup_rxbd_thresholds(tp);
8801 /* Initialize TG3_BDINFO's at:
8802 * RCVDBDI_STD_BD: standard eth size rx ring
8803 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8804 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8807 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8808 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8809 * ring attribute flags
8810 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8812 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8813 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8815 * The size of each ring is fixed in the firmware, but the location is
8818 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8819 ((u64) tpr->rx_std_mapping >> 32));
8820 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8821 ((u64) tpr->rx_std_mapping & 0xffffffff));
8822 if (!tg3_flag(tp, 5717_PLUS))
8823 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8824 NIC_SRAM_RX_BUFFER_DESC);
8826 /* Disable the mini ring */
8827 if (!tg3_flag(tp, 5705_PLUS))
8828 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8829 BDINFO_FLAGS_DISABLED);
8831 /* Program the jumbo buffer descriptor ring control
8832 * blocks on those devices that have them.
8834 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8835 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8837 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8838 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8839 ((u64) tpr->rx_jmb_mapping >> 32));
8840 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8841 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8842 val = TG3_RX_JMB_RING_SIZE(tp) <<
8843 BDINFO_FLAGS_MAXLEN_SHIFT;
8844 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8845 val | BDINFO_FLAGS_USE_EXT_RECV);
8846 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8847 tg3_flag(tp, 57765_CLASS))
8848 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8849 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8851 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8852 BDINFO_FLAGS_DISABLED);
8855 if (tg3_flag(tp, 57765_PLUS)) {
8856 val = TG3_RX_STD_RING_SIZE(tp);
8857 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8858 val |= (TG3_RX_STD_DMA_SZ << 2);
8860 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8862 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8864 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8866 tpr->rx_std_prod_idx = tp->rx_pending;
8867 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8869 tpr->rx_jmb_prod_idx =
8870 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8871 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8873 tg3_rings_reset(tp);
8875 /* Initialize MAC address and backoff seed. */
8876 __tg3_set_mac_addr(tp, 0);
8878 /* MTU + ethernet header + FCS + optional VLAN tag */
8879 tw32(MAC_RX_MTU_SIZE,
8880 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8882 /* The slot time is changed by tg3_setup_phy if we
8883 * run at gigabit with half duplex.
8885 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8886 (6 << TX_LENGTHS_IPG_SHIFT) |
8887 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8889 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8890 val |= tr32(MAC_TX_LENGTHS) &
8891 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8892 TX_LENGTHS_CNT_DWN_VAL_MSK);
8894 tw32(MAC_TX_LENGTHS, val);
8896 /* Receive rules. */
8897 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8898 tw32(RCVLPC_CONFIG, 0x0181);
8900 /* Calculate RDMAC_MODE setting early, we need it to determine
8901 * the RCVLPC_STATE_ENABLE mask.
8903 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8904 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8905 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8906 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8907 RDMAC_MODE_LNGREAD_ENAB);
8909 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8910 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8912 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8913 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8914 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8915 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8916 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8917 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8920 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8921 if (tg3_flag(tp, TSO_CAPABLE) &&
8922 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8923 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8924 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8925 !tg3_flag(tp, IS_5788)) {
8926 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8930 if (tg3_flag(tp, PCI_EXPRESS))
8931 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8933 if (tg3_flag(tp, HW_TSO_1) ||
8934 tg3_flag(tp, HW_TSO_2) ||
8935 tg3_flag(tp, HW_TSO_3))
8936 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8938 if (tg3_flag(tp, 57765_PLUS) ||
8939 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8940 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8941 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8943 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8944 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8950 tg3_flag(tp, 57765_PLUS)) {
8951 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8952 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
8953 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8954 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8955 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8956 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8957 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8958 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8960 tw32(TG3_RDMA_RSRVCTRL_REG,
8961 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8964 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8965 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8966 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8967 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8968 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8969 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8972 /* Receive/send statistics. */
8973 if (tg3_flag(tp, 5750_PLUS)) {
8974 val = tr32(RCVLPC_STATS_ENABLE);
8975 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8976 tw32(RCVLPC_STATS_ENABLE, val);
8977 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8978 tg3_flag(tp, TSO_CAPABLE)) {
8979 val = tr32(RCVLPC_STATS_ENABLE);
8980 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8981 tw32(RCVLPC_STATS_ENABLE, val);
8983 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8985 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8986 tw32(SNDDATAI_STATSENAB, 0xffffff);
8987 tw32(SNDDATAI_STATSCTRL,
8988 (SNDDATAI_SCTRL_ENABLE |
8989 SNDDATAI_SCTRL_FASTUPD));
8991 /* Setup host coalescing engine. */
8992 tw32(HOSTCC_MODE, 0);
8993 for (i = 0; i < 2000; i++) {
8994 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8999 __tg3_set_coalesce(tp, &tp->coal);
9001 if (!tg3_flag(tp, 5705_PLUS)) {
9002 /* Status/statistics block address. See tg3_timer,
9003 * the tg3_periodic_fetch_stats call there, and
9004 * tg3_get_stats to see how this works for 5705/5750 chips.
9006 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9007 ((u64) tp->stats_mapping >> 32));
9008 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9009 ((u64) tp->stats_mapping & 0xffffffff));
9010 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9012 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9014 /* Clear statistics and status block memory areas */
9015 for (i = NIC_SRAM_STATS_BLK;
9016 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9018 tg3_write_mem(tp, i, 0);
9023 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9025 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9026 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9027 if (!tg3_flag(tp, 5705_PLUS))
9028 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9030 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9031 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9032 /* reset to prevent losing 1st rx packet intermittently */
9033 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9037 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9038 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9039 MAC_MODE_FHDE_ENABLE;
9040 if (tg3_flag(tp, ENABLE_APE))
9041 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9042 if (!tg3_flag(tp, 5705_PLUS) &&
9043 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9044 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9045 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9046 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9049 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9050 * If TG3_FLAG_IS_NIC is zero, we should read the
9051 * register to preserve the GPIO settings for LOMs. The GPIOs,
9052 * whether used as inputs or outputs, are set by boot code after
9055 if (!tg3_flag(tp, IS_NIC)) {
9058 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9059 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9060 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9063 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9064 GRC_LCLCTRL_GPIO_OUTPUT3;
9066 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9067 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9069 tp->grc_local_ctrl &= ~gpio_mask;
9070 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9072 /* GPIO1 must be driven high for eeprom write protect */
9073 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9074 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9075 GRC_LCLCTRL_GPIO_OUTPUT1);
9077 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9080 if (tg3_flag(tp, USING_MSIX)) {
9081 val = tr32(MSGINT_MODE);
9082 val |= MSGINT_MODE_ENABLE;
9083 if (tp->irq_cnt > 1)
9084 val |= MSGINT_MODE_MULTIVEC_EN;
9085 if (!tg3_flag(tp, 1SHOT_MSI))
9086 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9087 tw32(MSGINT_MODE, val);
9090 if (!tg3_flag(tp, 5705_PLUS)) {
9091 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9095 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9096 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9097 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9098 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9099 WDMAC_MODE_LNGREAD_ENAB);
9101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9102 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9103 if (tg3_flag(tp, TSO_CAPABLE) &&
9104 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9105 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9107 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9108 !tg3_flag(tp, IS_5788)) {
9109 val |= WDMAC_MODE_RX_ACCEL;
9113 /* Enable host coalescing bug fix */
9114 if (tg3_flag(tp, 5755_PLUS))
9115 val |= WDMAC_MODE_STATUS_TAG_FIX;
9117 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9118 val |= WDMAC_MODE_BURST_ALL_DATA;
9120 tw32_f(WDMAC_MODE, val);
9123 if (tg3_flag(tp, PCIX_MODE)) {
9126 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9129 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9130 pcix_cmd |= PCI_X_CMD_READ_2K;
9131 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9132 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9133 pcix_cmd |= PCI_X_CMD_READ_2K;
9135 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9139 tw32_f(RDMAC_MODE, rdmac_mode);
9142 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9143 if (!tg3_flag(tp, 5705_PLUS))
9144 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9148 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9150 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9152 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9153 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9154 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9155 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9156 val |= RCVDBDI_MODE_LRG_RING_SZ;
9157 tw32(RCVDBDI_MODE, val);
9158 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9159 if (tg3_flag(tp, HW_TSO_1) ||
9160 tg3_flag(tp, HW_TSO_2) ||
9161 tg3_flag(tp, HW_TSO_3))
9162 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9163 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9164 if (tg3_flag(tp, ENABLE_TSS))
9165 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9166 tw32(SNDBDI_MODE, val);
9167 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9169 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9170 err = tg3_load_5701_a0_firmware_fix(tp);
9175 if (tg3_flag(tp, TSO_CAPABLE)) {
9176 err = tg3_load_tso_firmware(tp);
9181 tp->tx_mode = TX_MODE_ENABLE;
9183 if (tg3_flag(tp, 5755_PLUS) ||
9184 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9185 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9187 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9188 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9189 tp->tx_mode &= ~val;
9190 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9193 tw32_f(MAC_TX_MODE, tp->tx_mode);
9196 if (tg3_flag(tp, ENABLE_RSS)) {
9197 tg3_rss_write_indir_tbl(tp);
9199 /* Setup the "secret" hash key. */
9200 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9201 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9202 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9203 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9204 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9205 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9206 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9207 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9208 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9209 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9212 tp->rx_mode = RX_MODE_ENABLE;
9213 if (tg3_flag(tp, 5755_PLUS))
9214 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9216 if (tg3_flag(tp, ENABLE_RSS))
9217 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9218 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9219 RX_MODE_RSS_IPV6_HASH_EN |
9220 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9221 RX_MODE_RSS_IPV4_HASH_EN |
9222 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9224 tw32_f(MAC_RX_MODE, tp->rx_mode);
9227 tw32(MAC_LED_CTRL, tp->led_ctrl);
9229 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9230 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9231 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9234 tw32_f(MAC_RX_MODE, tp->rx_mode);
9237 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9238 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9239 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9240 /* Set drive transmission level to 1.2V */
9241 /* only if the signal pre-emphasis bit is not set */
9242 val = tr32(MAC_SERDES_CFG);
9245 tw32(MAC_SERDES_CFG, val);
9247 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9248 tw32(MAC_SERDES_CFG, 0x616000);
9251 /* Prevent chip from dropping frames when flow control
9254 if (tg3_flag(tp, 57765_CLASS))
9258 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9261 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9262 /* Use hardware link auto-negotiation */
9263 tg3_flag_set(tp, HW_AUTONEG);
9266 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9270 tmp = tr32(SERDES_RX_CTRL);
9271 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9272 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9273 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9274 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9277 if (!tg3_flag(tp, USE_PHYLIB)) {
9278 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9279 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9281 err = tg3_setup_phy(tp, 0);
9285 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9286 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9289 /* Clear CRC stats. */
9290 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9291 tg3_writephy(tp, MII_TG3_TEST1,
9292 tmp | MII_TG3_TEST1_CRC_EN);
9293 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9298 __tg3_set_rx_mode(tp->dev);
9300 /* Initialize receive rules. */
9301 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9302 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9303 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9304 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9306 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9310 if (tg3_flag(tp, ENABLE_ASF))
9314 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9316 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9318 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9320 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9322 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9324 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9326 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9328 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9330 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9332 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9334 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9336 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9338 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9340 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9348 if (tg3_flag(tp, ENABLE_APE))
9349 /* Write our heartbeat update interval to APE. */
9350 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9351 APE_HOST_HEARTBEAT_INT_DISABLE);
9353 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9358 /* Called at device open time to get the chip ready for
9359 * packet processing. Invoked with tp->lock held.
9361 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9363 tg3_switch_clocks(tp);
9365 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9367 return tg3_reset_hw(tp, reset_phy);
9370 #define TG3_STAT_ADD32(PSTAT, REG) \
9371 do { u32 __val = tr32(REG); \
9372 (PSTAT)->low += __val; \
9373 if ((PSTAT)->low < __val) \
9374 (PSTAT)->high += 1; \
9377 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9379 struct tg3_hw_stats *sp = tp->hw_stats;
9381 if (!netif_carrier_ok(tp->dev))
9384 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9385 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9386 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9387 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9388 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9389 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9390 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9391 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9392 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9393 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9394 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9395 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9396 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9398 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9399 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9400 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9401 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9402 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9403 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9404 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9405 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9406 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9407 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9408 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9409 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9410 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9411 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9413 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9414 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9415 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9416 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9417 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9419 u32 val = tr32(HOSTCC_FLOW_ATTN);
9420 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9422 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9423 sp->rx_discards.low += val;
9424 if (sp->rx_discards.low < val)
9425 sp->rx_discards.high += 1;
9427 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9429 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9432 static void tg3_chk_missed_msi(struct tg3 *tp)
9436 for (i = 0; i < tp->irq_cnt; i++) {
9437 struct tg3_napi *tnapi = &tp->napi[i];
9439 if (tg3_has_work(tnapi)) {
9440 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9441 tnapi->last_tx_cons == tnapi->tx_cons) {
9442 if (tnapi->chk_msi_cnt < 1) {
9443 tnapi->chk_msi_cnt++;
9449 tnapi->chk_msi_cnt = 0;
9450 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9451 tnapi->last_tx_cons = tnapi->tx_cons;
9455 static void tg3_timer(unsigned long __opaque)
9457 struct tg3 *tp = (struct tg3 *) __opaque;
9459 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9462 spin_lock(&tp->lock);
9464 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9465 tg3_flag(tp, 57765_CLASS))
9466 tg3_chk_missed_msi(tp);
9468 if (!tg3_flag(tp, TAGGED_STATUS)) {
9469 /* All of this garbage is because when using non-tagged
9470 * IRQ status the mailbox/status_block protocol the chip
9471 * uses with the cpu is race prone.
9473 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9474 tw32(GRC_LOCAL_CTRL,
9475 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9477 tw32(HOSTCC_MODE, tp->coalesce_mode |
9478 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9481 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9482 spin_unlock(&tp->lock);
9483 tg3_reset_task_schedule(tp);
9488 /* This part only runs once per second. */
9489 if (!--tp->timer_counter) {
9490 if (tg3_flag(tp, 5705_PLUS))
9491 tg3_periodic_fetch_stats(tp);
9493 if (tp->setlpicnt && !--tp->setlpicnt)
9494 tg3_phy_eee_enable(tp);
9496 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9500 mac_stat = tr32(MAC_STATUS);
9503 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9504 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9506 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9510 tg3_setup_phy(tp, 0);
9511 } else if (tg3_flag(tp, POLL_SERDES)) {
9512 u32 mac_stat = tr32(MAC_STATUS);
9515 if (netif_carrier_ok(tp->dev) &&
9516 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9519 if (!netif_carrier_ok(tp->dev) &&
9520 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9521 MAC_STATUS_SIGNAL_DET))) {
9525 if (!tp->serdes_counter) {
9528 ~MAC_MODE_PORT_MODE_MASK));
9530 tw32_f(MAC_MODE, tp->mac_mode);
9533 tg3_setup_phy(tp, 0);
9535 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9536 tg3_flag(tp, 5780_CLASS)) {
9537 tg3_serdes_parallel_detect(tp);
9540 tp->timer_counter = tp->timer_multiplier;
9543 /* Heartbeat is only sent once every 2 seconds.
9545 * The heartbeat is to tell the ASF firmware that the host
9546 * driver is still alive. In the event that the OS crashes,
9547 * ASF needs to reset the hardware to free up the FIFO space
9548 * that may be filled with rx packets destined for the host.
9549 * If the FIFO is full, ASF will no longer function properly.
9551 * Unintended resets have been reported on real time kernels
9552 * where the timer doesn't run on time. Netpoll will also have
9555 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9556 * to check the ring condition when the heartbeat is expiring
9557 * before doing the reset. This will prevent most unintended
9560 if (!--tp->asf_counter) {
9561 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9562 tg3_wait_for_event_ack(tp);
9564 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9565 FWCMD_NICDRV_ALIVE3);
9566 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9567 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9568 TG3_FW_UPDATE_TIMEOUT_SEC);
9570 tg3_generate_fw_event(tp);
9572 tp->asf_counter = tp->asf_multiplier;
9575 spin_unlock(&tp->lock);
9578 tp->timer.expires = jiffies + tp->timer_offset;
9579 add_timer(&tp->timer);
9582 static void __devinit tg3_timer_init(struct tg3 *tp)
9584 if (tg3_flag(tp, TAGGED_STATUS) &&
9585 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9586 !tg3_flag(tp, 57765_CLASS))
9587 tp->timer_offset = HZ;
9589 tp->timer_offset = HZ / 10;
9591 BUG_ON(tp->timer_offset > HZ);
9593 tp->timer_multiplier = (HZ / tp->timer_offset);
9594 tp->asf_multiplier = (HZ / tp->timer_offset) *
9595 TG3_FW_UPDATE_FREQ_SEC;
9597 init_timer(&tp->timer);
9598 tp->timer.data = (unsigned long) tp;
9599 tp->timer.function = tg3_timer;
9602 static void tg3_timer_start(struct tg3 *tp)
9604 tp->asf_counter = tp->asf_multiplier;
9605 tp->timer_counter = tp->timer_multiplier;
9607 tp->timer.expires = jiffies + tp->timer_offset;
9608 add_timer(&tp->timer);
9611 static void tg3_timer_stop(struct tg3 *tp)
9613 del_timer_sync(&tp->timer);
9616 /* Restart hardware after configuration changes, self-test, etc.
9617 * Invoked with tp->lock held.
9619 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9620 __releases(tp->lock)
9621 __acquires(tp->lock)
9625 err = tg3_init_hw(tp, reset_phy);
9628 "Failed to re-initialize device, aborting\n");
9629 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9630 tg3_full_unlock(tp);
9633 tg3_napi_enable(tp);
9635 tg3_full_lock(tp, 0);
9640 static void tg3_reset_task(struct work_struct *work)
9642 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9645 tg3_full_lock(tp, 0);
9647 if (!netif_running(tp->dev)) {
9648 tg3_flag_clear(tp, RESET_TASK_PENDING);
9649 tg3_full_unlock(tp);
9653 tg3_full_unlock(tp);
9659 tg3_full_lock(tp, 1);
9661 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9662 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9663 tp->write32_rx_mbox = tg3_write_flush_reg32;
9664 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9665 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9668 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9669 err = tg3_init_hw(tp, 1);
9673 tg3_netif_start(tp);
9676 tg3_full_unlock(tp);
9681 tg3_flag_clear(tp, RESET_TASK_PENDING);
9684 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9687 unsigned long flags;
9689 struct tg3_napi *tnapi = &tp->napi[irq_num];
9691 if (tp->irq_cnt == 1)
9692 name = tp->dev->name;
9694 name = &tnapi->irq_lbl[0];
9695 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9696 name[IFNAMSIZ-1] = 0;
9699 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9701 if (tg3_flag(tp, 1SHOT_MSI))
9706 if (tg3_flag(tp, TAGGED_STATUS))
9707 fn = tg3_interrupt_tagged;
9708 flags = IRQF_SHARED;
9711 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9714 static int tg3_test_interrupt(struct tg3 *tp)
9716 struct tg3_napi *tnapi = &tp->napi[0];
9717 struct net_device *dev = tp->dev;
9718 int err, i, intr_ok = 0;
9721 if (!netif_running(dev))
9724 tg3_disable_ints(tp);
9726 free_irq(tnapi->irq_vec, tnapi);
9729 * Turn off MSI one shot mode. Otherwise this test has no
9730 * observable way to know whether the interrupt was delivered.
9732 if (tg3_flag(tp, 57765_PLUS)) {
9733 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9734 tw32(MSGINT_MODE, val);
9737 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9738 IRQF_SHARED, dev->name, tnapi);
9742 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9743 tg3_enable_ints(tp);
9745 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9748 for (i = 0; i < 5; i++) {
9749 u32 int_mbox, misc_host_ctrl;
9751 int_mbox = tr32_mailbox(tnapi->int_mbox);
9752 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9754 if ((int_mbox != 0) ||
9755 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9760 if (tg3_flag(tp, 57765_PLUS) &&
9761 tnapi->hw_status->status_tag != tnapi->last_tag)
9762 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9767 tg3_disable_ints(tp);
9769 free_irq(tnapi->irq_vec, tnapi);
9771 err = tg3_request_irq(tp, 0);
9777 /* Reenable MSI one shot mode. */
9778 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9779 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9780 tw32(MSGINT_MODE, val);
9788 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9789 * successfully restored
9791 static int tg3_test_msi(struct tg3 *tp)
9796 if (!tg3_flag(tp, USING_MSI))
9799 /* Turn off SERR reporting in case MSI terminates with Master
9802 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9803 pci_write_config_word(tp->pdev, PCI_COMMAND,
9804 pci_cmd & ~PCI_COMMAND_SERR);
9806 err = tg3_test_interrupt(tp);
9808 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9813 /* other failures */
9817 /* MSI test failed, go back to INTx mode */
9818 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9819 "to INTx mode. Please report this failure to the PCI "
9820 "maintainer and include system chipset information\n");
9822 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9824 pci_disable_msi(tp->pdev);
9826 tg3_flag_clear(tp, USING_MSI);
9827 tp->napi[0].irq_vec = tp->pdev->irq;
9829 err = tg3_request_irq(tp, 0);
9833 /* Need to reset the chip because the MSI cycle may have terminated
9834 * with Master Abort.
9836 tg3_full_lock(tp, 1);
9838 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9839 err = tg3_init_hw(tp, 1);
9841 tg3_full_unlock(tp);
9844 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9849 static int tg3_request_firmware(struct tg3 *tp)
9851 const __be32 *fw_data;
9853 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9854 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9859 fw_data = (void *)tp->fw->data;
9861 /* Firmware blob starts with version numbers, followed by
9862 * start address and _full_ length including BSS sections
9863 * (which must be longer than the actual data, of course
9866 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9867 if (tp->fw_len < (tp->fw->size - 12)) {
9868 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9869 tp->fw_len, tp->fw_needed);
9870 release_firmware(tp->fw);
9875 /* We no longer need firmware; we have it. */
9876 tp->fw_needed = NULL;
9880 static bool tg3_enable_msix(struct tg3 *tp)
9883 struct msix_entry msix_ent[tp->irq_max];
9885 tp->irq_cnt = num_online_cpus();
9886 if (tp->irq_cnt > 1) {
9887 /* We want as many rx rings enabled as there are cpus.
9888 * In multiqueue MSI-X mode, the first MSI-X vector
9889 * only deals with link interrupts, etc, so we add
9890 * one to the number of vectors we are requesting.
9892 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9895 for (i = 0; i < tp->irq_max; i++) {
9896 msix_ent[i].entry = i;
9897 msix_ent[i].vector = 0;
9900 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9903 } else if (rc != 0) {
9904 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9906 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9911 for (i = 0; i < tp->irq_max; i++)
9912 tp->napi[i].irq_vec = msix_ent[i].vector;
9914 netif_set_real_num_tx_queues(tp->dev, 1);
9915 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9916 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9917 pci_disable_msix(tp->pdev);
9921 if (tp->irq_cnt > 1) {
9922 tg3_flag_set(tp, ENABLE_RSS);
9924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9926 tg3_flag_set(tp, ENABLE_TSS);
9927 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9934 static void tg3_ints_init(struct tg3 *tp)
9936 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9937 !tg3_flag(tp, TAGGED_STATUS)) {
9938 /* All MSI supporting chips should support tagged
9939 * status. Assert that this is the case.
9941 netdev_warn(tp->dev,
9942 "MSI without TAGGED_STATUS? Not using MSI\n");
9946 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9947 tg3_flag_set(tp, USING_MSIX);
9948 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9949 tg3_flag_set(tp, USING_MSI);
9951 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9952 u32 msi_mode = tr32(MSGINT_MODE);
9953 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9954 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9955 if (!tg3_flag(tp, 1SHOT_MSI))
9956 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9957 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9960 if (!tg3_flag(tp, USING_MSIX)) {
9962 tp->napi[0].irq_vec = tp->pdev->irq;
9963 netif_set_real_num_tx_queues(tp->dev, 1);
9964 netif_set_real_num_rx_queues(tp->dev, 1);
9968 static void tg3_ints_fini(struct tg3 *tp)
9970 if (tg3_flag(tp, USING_MSIX))
9971 pci_disable_msix(tp->pdev);
9972 else if (tg3_flag(tp, USING_MSI))
9973 pci_disable_msi(tp->pdev);
9974 tg3_flag_clear(tp, USING_MSI);
9975 tg3_flag_clear(tp, USING_MSIX);
9976 tg3_flag_clear(tp, ENABLE_RSS);
9977 tg3_flag_clear(tp, ENABLE_TSS);
9980 static int tg3_open(struct net_device *dev)
9982 struct tg3 *tp = netdev_priv(dev);
9985 if (tp->fw_needed) {
9986 err = tg3_request_firmware(tp);
9987 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9991 netdev_warn(tp->dev, "TSO capability disabled\n");
9992 tg3_flag_clear(tp, TSO_CAPABLE);
9993 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9994 netdev_notice(tp->dev, "TSO capability restored\n");
9995 tg3_flag_set(tp, TSO_CAPABLE);
9999 netif_carrier_off(tp->dev);
10001 err = tg3_power_up(tp);
10005 tg3_full_lock(tp, 0);
10007 tg3_disable_ints(tp);
10008 tg3_flag_clear(tp, INIT_COMPLETE);
10010 tg3_full_unlock(tp);
10013 * Setup interrupts first so we know how
10014 * many NAPI resources to allocate
10018 tg3_rss_check_indir_tbl(tp);
10020 /* The placement of this call is tied
10021 * to the setup and use of Host TX descriptors.
10023 err = tg3_alloc_consistent(tp);
10029 tg3_napi_enable(tp);
10031 for (i = 0; i < tp->irq_cnt; i++) {
10032 struct tg3_napi *tnapi = &tp->napi[i];
10033 err = tg3_request_irq(tp, i);
10035 for (i--; i >= 0; i--) {
10036 tnapi = &tp->napi[i];
10037 free_irq(tnapi->irq_vec, tnapi);
10043 tg3_full_lock(tp, 0);
10045 err = tg3_init_hw(tp, 1);
10047 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10048 tg3_free_rings(tp);
10051 tg3_full_unlock(tp);
10056 if (tg3_flag(tp, USING_MSI)) {
10057 err = tg3_test_msi(tp);
10060 tg3_full_lock(tp, 0);
10061 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10062 tg3_free_rings(tp);
10063 tg3_full_unlock(tp);
10068 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10069 u32 val = tr32(PCIE_TRANSACTION_CFG);
10071 tw32(PCIE_TRANSACTION_CFG,
10072 val | PCIE_TRANS_CFG_1SHOT_MSI);
10078 tg3_full_lock(tp, 0);
10080 tg3_timer_start(tp);
10081 tg3_flag_set(tp, INIT_COMPLETE);
10082 tg3_enable_ints(tp);
10084 tg3_full_unlock(tp);
10086 netif_tx_start_all_queues(dev);
10089 * Reset loopback feature if it was turned on while the device was down
10090 * make sure that it's installed properly now.
10092 if (dev->features & NETIF_F_LOOPBACK)
10093 tg3_set_loopback(dev, dev->features);
10098 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10099 struct tg3_napi *tnapi = &tp->napi[i];
10100 free_irq(tnapi->irq_vec, tnapi);
10104 tg3_napi_disable(tp);
10106 tg3_free_consistent(tp);
10110 tg3_frob_aux_power(tp, false);
10111 pci_set_power_state(tp->pdev, PCI_D3hot);
10115 static int tg3_close(struct net_device *dev)
10118 struct tg3 *tp = netdev_priv(dev);
10120 tg3_napi_disable(tp);
10121 tg3_reset_task_cancel(tp);
10123 netif_tx_stop_all_queues(dev);
10125 tg3_timer_stop(tp);
10129 tg3_full_lock(tp, 1);
10131 tg3_disable_ints(tp);
10133 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10134 tg3_free_rings(tp);
10135 tg3_flag_clear(tp, INIT_COMPLETE);
10137 tg3_full_unlock(tp);
10139 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10140 struct tg3_napi *tnapi = &tp->napi[i];
10141 free_irq(tnapi->irq_vec, tnapi);
10146 /* Clear stats across close / open calls */
10147 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10148 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10152 tg3_free_consistent(tp);
10154 tg3_power_down(tp);
10156 netif_carrier_off(tp->dev);
10161 static inline u64 get_stat64(tg3_stat64_t *val)
10163 return ((u64)val->high << 32) | ((u64)val->low);
10166 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10168 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10170 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10171 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10175 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10176 tg3_writephy(tp, MII_TG3_TEST1,
10177 val | MII_TG3_TEST1_CRC_EN);
10178 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10182 tp->phy_crc_errors += val;
10184 return tp->phy_crc_errors;
10187 return get_stat64(&hw_stats->rx_fcs_errors);
10190 #define ESTAT_ADD(member) \
10191 estats->member = old_estats->member + \
10192 get_stat64(&hw_stats->member)
10194 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10196 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10197 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10199 ESTAT_ADD(rx_octets);
10200 ESTAT_ADD(rx_fragments);
10201 ESTAT_ADD(rx_ucast_packets);
10202 ESTAT_ADD(rx_mcast_packets);
10203 ESTAT_ADD(rx_bcast_packets);
10204 ESTAT_ADD(rx_fcs_errors);
10205 ESTAT_ADD(rx_align_errors);
10206 ESTAT_ADD(rx_xon_pause_rcvd);
10207 ESTAT_ADD(rx_xoff_pause_rcvd);
10208 ESTAT_ADD(rx_mac_ctrl_rcvd);
10209 ESTAT_ADD(rx_xoff_entered);
10210 ESTAT_ADD(rx_frame_too_long_errors);
10211 ESTAT_ADD(rx_jabbers);
10212 ESTAT_ADD(rx_undersize_packets);
10213 ESTAT_ADD(rx_in_length_errors);
10214 ESTAT_ADD(rx_out_length_errors);
10215 ESTAT_ADD(rx_64_or_less_octet_packets);
10216 ESTAT_ADD(rx_65_to_127_octet_packets);
10217 ESTAT_ADD(rx_128_to_255_octet_packets);
10218 ESTAT_ADD(rx_256_to_511_octet_packets);
10219 ESTAT_ADD(rx_512_to_1023_octet_packets);
10220 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10221 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10222 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10223 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10224 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10226 ESTAT_ADD(tx_octets);
10227 ESTAT_ADD(tx_collisions);
10228 ESTAT_ADD(tx_xon_sent);
10229 ESTAT_ADD(tx_xoff_sent);
10230 ESTAT_ADD(tx_flow_control);
10231 ESTAT_ADD(tx_mac_errors);
10232 ESTAT_ADD(tx_single_collisions);
10233 ESTAT_ADD(tx_mult_collisions);
10234 ESTAT_ADD(tx_deferred);
10235 ESTAT_ADD(tx_excessive_collisions);
10236 ESTAT_ADD(tx_late_collisions);
10237 ESTAT_ADD(tx_collide_2times);
10238 ESTAT_ADD(tx_collide_3times);
10239 ESTAT_ADD(tx_collide_4times);
10240 ESTAT_ADD(tx_collide_5times);
10241 ESTAT_ADD(tx_collide_6times);
10242 ESTAT_ADD(tx_collide_7times);
10243 ESTAT_ADD(tx_collide_8times);
10244 ESTAT_ADD(tx_collide_9times);
10245 ESTAT_ADD(tx_collide_10times);
10246 ESTAT_ADD(tx_collide_11times);
10247 ESTAT_ADD(tx_collide_12times);
10248 ESTAT_ADD(tx_collide_13times);
10249 ESTAT_ADD(tx_collide_14times);
10250 ESTAT_ADD(tx_collide_15times);
10251 ESTAT_ADD(tx_ucast_packets);
10252 ESTAT_ADD(tx_mcast_packets);
10253 ESTAT_ADD(tx_bcast_packets);
10254 ESTAT_ADD(tx_carrier_sense_errors);
10255 ESTAT_ADD(tx_discards);
10256 ESTAT_ADD(tx_errors);
10258 ESTAT_ADD(dma_writeq_full);
10259 ESTAT_ADD(dma_write_prioq_full);
10260 ESTAT_ADD(rxbds_empty);
10261 ESTAT_ADD(rx_discards);
10262 ESTAT_ADD(rx_errors);
10263 ESTAT_ADD(rx_threshold_hit);
10265 ESTAT_ADD(dma_readq_full);
10266 ESTAT_ADD(dma_read_prioq_full);
10267 ESTAT_ADD(tx_comp_queue_full);
10269 ESTAT_ADD(ring_set_send_prod_index);
10270 ESTAT_ADD(ring_status_update);
10271 ESTAT_ADD(nic_irqs);
10272 ESTAT_ADD(nic_avoided_irqs);
10273 ESTAT_ADD(nic_tx_threshold_hit);
10275 ESTAT_ADD(mbuf_lwm_thresh_hit);
10278 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10280 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10281 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10283 stats->rx_packets = old_stats->rx_packets +
10284 get_stat64(&hw_stats->rx_ucast_packets) +
10285 get_stat64(&hw_stats->rx_mcast_packets) +
10286 get_stat64(&hw_stats->rx_bcast_packets);
10288 stats->tx_packets = old_stats->tx_packets +
10289 get_stat64(&hw_stats->tx_ucast_packets) +
10290 get_stat64(&hw_stats->tx_mcast_packets) +
10291 get_stat64(&hw_stats->tx_bcast_packets);
10293 stats->rx_bytes = old_stats->rx_bytes +
10294 get_stat64(&hw_stats->rx_octets);
10295 stats->tx_bytes = old_stats->tx_bytes +
10296 get_stat64(&hw_stats->tx_octets);
10298 stats->rx_errors = old_stats->rx_errors +
10299 get_stat64(&hw_stats->rx_errors);
10300 stats->tx_errors = old_stats->tx_errors +
10301 get_stat64(&hw_stats->tx_errors) +
10302 get_stat64(&hw_stats->tx_mac_errors) +
10303 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10304 get_stat64(&hw_stats->tx_discards);
10306 stats->multicast = old_stats->multicast +
10307 get_stat64(&hw_stats->rx_mcast_packets);
10308 stats->collisions = old_stats->collisions +
10309 get_stat64(&hw_stats->tx_collisions);
10311 stats->rx_length_errors = old_stats->rx_length_errors +
10312 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10313 get_stat64(&hw_stats->rx_undersize_packets);
10315 stats->rx_over_errors = old_stats->rx_over_errors +
10316 get_stat64(&hw_stats->rxbds_empty);
10317 stats->rx_frame_errors = old_stats->rx_frame_errors +
10318 get_stat64(&hw_stats->rx_align_errors);
10319 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10320 get_stat64(&hw_stats->tx_discards);
10321 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10322 get_stat64(&hw_stats->tx_carrier_sense_errors);
10324 stats->rx_crc_errors = old_stats->rx_crc_errors +
10325 tg3_calc_crc_errors(tp);
10327 stats->rx_missed_errors = old_stats->rx_missed_errors +
10328 get_stat64(&hw_stats->rx_discards);
10330 stats->rx_dropped = tp->rx_dropped;
10331 stats->tx_dropped = tp->tx_dropped;
10334 static int tg3_get_regs_len(struct net_device *dev)
10336 return TG3_REG_BLK_SIZE;
10339 static void tg3_get_regs(struct net_device *dev,
10340 struct ethtool_regs *regs, void *_p)
10342 struct tg3 *tp = netdev_priv(dev);
10346 memset(_p, 0, TG3_REG_BLK_SIZE);
10348 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10351 tg3_full_lock(tp, 0);
10353 tg3_dump_legacy_regs(tp, (u32 *)_p);
10355 tg3_full_unlock(tp);
10358 static int tg3_get_eeprom_len(struct net_device *dev)
10360 struct tg3 *tp = netdev_priv(dev);
10362 return tp->nvram_size;
10365 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10367 struct tg3 *tp = netdev_priv(dev);
10370 u32 i, offset, len, b_offset, b_count;
10373 if (tg3_flag(tp, NO_NVRAM))
10376 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10379 offset = eeprom->offset;
10383 eeprom->magic = TG3_EEPROM_MAGIC;
10386 /* adjustments to start on required 4 byte boundary */
10387 b_offset = offset & 3;
10388 b_count = 4 - b_offset;
10389 if (b_count > len) {
10390 /* i.e. offset=1 len=2 */
10393 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10396 memcpy(data, ((char *)&val) + b_offset, b_count);
10399 eeprom->len += b_count;
10402 /* read bytes up to the last 4 byte boundary */
10403 pd = &data[eeprom->len];
10404 for (i = 0; i < (len - (len & 3)); i += 4) {
10405 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10410 memcpy(pd + i, &val, 4);
10415 /* read last bytes not ending on 4 byte boundary */
10416 pd = &data[eeprom->len];
10418 b_offset = offset + len - b_count;
10419 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10422 memcpy(pd, &val, b_count);
10423 eeprom->len += b_count;
10428 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10430 struct tg3 *tp = netdev_priv(dev);
10432 u32 offset, len, b_offset, odd_len;
10436 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10439 if (tg3_flag(tp, NO_NVRAM) ||
10440 eeprom->magic != TG3_EEPROM_MAGIC)
10443 offset = eeprom->offset;
10446 if ((b_offset = (offset & 3))) {
10447 /* adjustments to start on required 4 byte boundary */
10448 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10459 /* adjustments to end on required 4 byte boundary */
10461 len = (len + 3) & ~3;
10462 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10468 if (b_offset || odd_len) {
10469 buf = kmalloc(len, GFP_KERNEL);
10473 memcpy(buf, &start, 4);
10475 memcpy(buf+len-4, &end, 4);
10476 memcpy(buf + b_offset, data, eeprom->len);
10479 ret = tg3_nvram_write_block(tp, offset, len, buf);
10487 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10489 struct tg3 *tp = netdev_priv(dev);
10491 if (tg3_flag(tp, USE_PHYLIB)) {
10492 struct phy_device *phydev;
10493 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10495 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10496 return phy_ethtool_gset(phydev, cmd);
10499 cmd->supported = (SUPPORTED_Autoneg);
10501 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10502 cmd->supported |= (SUPPORTED_1000baseT_Half |
10503 SUPPORTED_1000baseT_Full);
10505 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10506 cmd->supported |= (SUPPORTED_100baseT_Half |
10507 SUPPORTED_100baseT_Full |
10508 SUPPORTED_10baseT_Half |
10509 SUPPORTED_10baseT_Full |
10511 cmd->port = PORT_TP;
10513 cmd->supported |= SUPPORTED_FIBRE;
10514 cmd->port = PORT_FIBRE;
10517 cmd->advertising = tp->link_config.advertising;
10518 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10519 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10520 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10521 cmd->advertising |= ADVERTISED_Pause;
10523 cmd->advertising |= ADVERTISED_Pause |
10524 ADVERTISED_Asym_Pause;
10526 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10527 cmd->advertising |= ADVERTISED_Asym_Pause;
10530 if (netif_running(dev) && netif_carrier_ok(dev)) {
10531 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10532 cmd->duplex = tp->link_config.active_duplex;
10533 cmd->lp_advertising = tp->link_config.rmt_adv;
10534 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10535 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10536 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10538 cmd->eth_tp_mdix = ETH_TP_MDI;
10541 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10542 cmd->duplex = DUPLEX_UNKNOWN;
10543 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10545 cmd->phy_address = tp->phy_addr;
10546 cmd->transceiver = XCVR_INTERNAL;
10547 cmd->autoneg = tp->link_config.autoneg;
10553 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10555 struct tg3 *tp = netdev_priv(dev);
10556 u32 speed = ethtool_cmd_speed(cmd);
10558 if (tg3_flag(tp, USE_PHYLIB)) {
10559 struct phy_device *phydev;
10560 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10562 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10563 return phy_ethtool_sset(phydev, cmd);
10566 if (cmd->autoneg != AUTONEG_ENABLE &&
10567 cmd->autoneg != AUTONEG_DISABLE)
10570 if (cmd->autoneg == AUTONEG_DISABLE &&
10571 cmd->duplex != DUPLEX_FULL &&
10572 cmd->duplex != DUPLEX_HALF)
10575 if (cmd->autoneg == AUTONEG_ENABLE) {
10576 u32 mask = ADVERTISED_Autoneg |
10578 ADVERTISED_Asym_Pause;
10580 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10581 mask |= ADVERTISED_1000baseT_Half |
10582 ADVERTISED_1000baseT_Full;
10584 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10585 mask |= ADVERTISED_100baseT_Half |
10586 ADVERTISED_100baseT_Full |
10587 ADVERTISED_10baseT_Half |
10588 ADVERTISED_10baseT_Full |
10591 mask |= ADVERTISED_FIBRE;
10593 if (cmd->advertising & ~mask)
10596 mask &= (ADVERTISED_1000baseT_Half |
10597 ADVERTISED_1000baseT_Full |
10598 ADVERTISED_100baseT_Half |
10599 ADVERTISED_100baseT_Full |
10600 ADVERTISED_10baseT_Half |
10601 ADVERTISED_10baseT_Full);
10603 cmd->advertising &= mask;
10605 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10606 if (speed != SPEED_1000)
10609 if (cmd->duplex != DUPLEX_FULL)
10612 if (speed != SPEED_100 &&
10618 tg3_full_lock(tp, 0);
10620 tp->link_config.autoneg = cmd->autoneg;
10621 if (cmd->autoneg == AUTONEG_ENABLE) {
10622 tp->link_config.advertising = (cmd->advertising |
10623 ADVERTISED_Autoneg);
10624 tp->link_config.speed = SPEED_UNKNOWN;
10625 tp->link_config.duplex = DUPLEX_UNKNOWN;
10627 tp->link_config.advertising = 0;
10628 tp->link_config.speed = speed;
10629 tp->link_config.duplex = cmd->duplex;
10632 if (netif_running(dev))
10633 tg3_setup_phy(tp, 1);
10635 tg3_full_unlock(tp);
10640 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10642 struct tg3 *tp = netdev_priv(dev);
10644 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10645 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10646 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10647 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10650 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10652 struct tg3 *tp = netdev_priv(dev);
10654 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10655 wol->supported = WAKE_MAGIC;
10657 wol->supported = 0;
10659 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10660 wol->wolopts = WAKE_MAGIC;
10661 memset(&wol->sopass, 0, sizeof(wol->sopass));
10664 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10666 struct tg3 *tp = netdev_priv(dev);
10667 struct device *dp = &tp->pdev->dev;
10669 if (wol->wolopts & ~WAKE_MAGIC)
10671 if ((wol->wolopts & WAKE_MAGIC) &&
10672 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10675 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10677 spin_lock_bh(&tp->lock);
10678 if (device_may_wakeup(dp))
10679 tg3_flag_set(tp, WOL_ENABLE);
10681 tg3_flag_clear(tp, WOL_ENABLE);
10682 spin_unlock_bh(&tp->lock);
10687 static u32 tg3_get_msglevel(struct net_device *dev)
10689 struct tg3 *tp = netdev_priv(dev);
10690 return tp->msg_enable;
10693 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10695 struct tg3 *tp = netdev_priv(dev);
10696 tp->msg_enable = value;
10699 static int tg3_nway_reset(struct net_device *dev)
10701 struct tg3 *tp = netdev_priv(dev);
10704 if (!netif_running(dev))
10707 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10710 if (tg3_flag(tp, USE_PHYLIB)) {
10711 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10713 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10717 spin_lock_bh(&tp->lock);
10719 tg3_readphy(tp, MII_BMCR, &bmcr);
10720 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10721 ((bmcr & BMCR_ANENABLE) ||
10722 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10723 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10727 spin_unlock_bh(&tp->lock);
10733 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10735 struct tg3 *tp = netdev_priv(dev);
10737 ering->rx_max_pending = tp->rx_std_ring_mask;
10738 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10739 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10741 ering->rx_jumbo_max_pending = 0;
10743 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10745 ering->rx_pending = tp->rx_pending;
10746 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10747 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10749 ering->rx_jumbo_pending = 0;
10751 ering->tx_pending = tp->napi[0].tx_pending;
10754 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10756 struct tg3 *tp = netdev_priv(dev);
10757 int i, irq_sync = 0, err = 0;
10759 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10760 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10761 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10762 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10763 (tg3_flag(tp, TSO_BUG) &&
10764 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10767 if (netif_running(dev)) {
10769 tg3_netif_stop(tp);
10773 tg3_full_lock(tp, irq_sync);
10775 tp->rx_pending = ering->rx_pending;
10777 if (tg3_flag(tp, MAX_RXPEND_64) &&
10778 tp->rx_pending > 63)
10779 tp->rx_pending = 63;
10780 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10782 for (i = 0; i < tp->irq_max; i++)
10783 tp->napi[i].tx_pending = ering->tx_pending;
10785 if (netif_running(dev)) {
10786 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10787 err = tg3_restart_hw(tp, 1);
10789 tg3_netif_start(tp);
10792 tg3_full_unlock(tp);
10794 if (irq_sync && !err)
10800 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10802 struct tg3 *tp = netdev_priv(dev);
10804 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10806 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10807 epause->rx_pause = 1;
10809 epause->rx_pause = 0;
10811 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10812 epause->tx_pause = 1;
10814 epause->tx_pause = 0;
10817 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10819 struct tg3 *tp = netdev_priv(dev);
10822 if (tg3_flag(tp, USE_PHYLIB)) {
10824 struct phy_device *phydev;
10826 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10828 if (!(phydev->supported & SUPPORTED_Pause) ||
10829 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10830 (epause->rx_pause != epause->tx_pause)))
10833 tp->link_config.flowctrl = 0;
10834 if (epause->rx_pause) {
10835 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10837 if (epause->tx_pause) {
10838 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10839 newadv = ADVERTISED_Pause;
10841 newadv = ADVERTISED_Pause |
10842 ADVERTISED_Asym_Pause;
10843 } else if (epause->tx_pause) {
10844 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10845 newadv = ADVERTISED_Asym_Pause;
10849 if (epause->autoneg)
10850 tg3_flag_set(tp, PAUSE_AUTONEG);
10852 tg3_flag_clear(tp, PAUSE_AUTONEG);
10854 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10855 u32 oldadv = phydev->advertising &
10856 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10857 if (oldadv != newadv) {
10858 phydev->advertising &=
10859 ~(ADVERTISED_Pause |
10860 ADVERTISED_Asym_Pause);
10861 phydev->advertising |= newadv;
10862 if (phydev->autoneg) {
10864 * Always renegotiate the link to
10865 * inform our link partner of our
10866 * flow control settings, even if the
10867 * flow control is forced. Let
10868 * tg3_adjust_link() do the final
10869 * flow control setup.
10871 return phy_start_aneg(phydev);
10875 if (!epause->autoneg)
10876 tg3_setup_flow_control(tp, 0, 0);
10878 tp->link_config.advertising &=
10879 ~(ADVERTISED_Pause |
10880 ADVERTISED_Asym_Pause);
10881 tp->link_config.advertising |= newadv;
10886 if (netif_running(dev)) {
10887 tg3_netif_stop(tp);
10891 tg3_full_lock(tp, irq_sync);
10893 if (epause->autoneg)
10894 tg3_flag_set(tp, PAUSE_AUTONEG);
10896 tg3_flag_clear(tp, PAUSE_AUTONEG);
10897 if (epause->rx_pause)
10898 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10900 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10901 if (epause->tx_pause)
10902 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10904 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10906 if (netif_running(dev)) {
10907 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10908 err = tg3_restart_hw(tp, 1);
10910 tg3_netif_start(tp);
10913 tg3_full_unlock(tp);
10919 static int tg3_get_sset_count(struct net_device *dev, int sset)
10923 return TG3_NUM_TEST;
10925 return TG3_NUM_STATS;
10927 return -EOPNOTSUPP;
10931 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10932 u32 *rules __always_unused)
10934 struct tg3 *tp = netdev_priv(dev);
10936 if (!tg3_flag(tp, SUPPORT_MSIX))
10937 return -EOPNOTSUPP;
10939 switch (info->cmd) {
10940 case ETHTOOL_GRXRINGS:
10941 if (netif_running(tp->dev))
10942 info->data = tp->irq_cnt;
10944 info->data = num_online_cpus();
10945 if (info->data > TG3_IRQ_MAX_VECS_RSS)
10946 info->data = TG3_IRQ_MAX_VECS_RSS;
10949 /* The first interrupt vector only
10950 * handles link interrupts.
10956 return -EOPNOTSUPP;
10960 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10963 struct tg3 *tp = netdev_priv(dev);
10965 if (tg3_flag(tp, SUPPORT_MSIX))
10966 size = TG3_RSS_INDIR_TBL_SIZE;
10971 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10973 struct tg3 *tp = netdev_priv(dev);
10976 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10977 indir[i] = tp->rss_ind_tbl[i];
10982 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10984 struct tg3 *tp = netdev_priv(dev);
10987 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10988 tp->rss_ind_tbl[i] = indir[i];
10990 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10993 /* It is legal to write the indirection
10994 * table while the device is running.
10996 tg3_full_lock(tp, 0);
10997 tg3_rss_write_indir_tbl(tp);
10998 tg3_full_unlock(tp);
11003 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11005 switch (stringset) {
11007 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
11010 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
11013 WARN_ON(1); /* we need a WARN() */
11018 static int tg3_set_phys_id(struct net_device *dev,
11019 enum ethtool_phys_id_state state)
11021 struct tg3 *tp = netdev_priv(dev);
11023 if (!netif_running(tp->dev))
11027 case ETHTOOL_ID_ACTIVE:
11028 return 1; /* cycle on/off once per second */
11030 case ETHTOOL_ID_ON:
11031 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11032 LED_CTRL_1000MBPS_ON |
11033 LED_CTRL_100MBPS_ON |
11034 LED_CTRL_10MBPS_ON |
11035 LED_CTRL_TRAFFIC_OVERRIDE |
11036 LED_CTRL_TRAFFIC_BLINK |
11037 LED_CTRL_TRAFFIC_LED);
11040 case ETHTOOL_ID_OFF:
11041 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11042 LED_CTRL_TRAFFIC_OVERRIDE);
11045 case ETHTOOL_ID_INACTIVE:
11046 tw32(MAC_LED_CTRL, tp->led_ctrl);
11053 static void tg3_get_ethtool_stats(struct net_device *dev,
11054 struct ethtool_stats *estats, u64 *tmp_stats)
11056 struct tg3 *tp = netdev_priv(dev);
11059 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11061 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11064 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11068 u32 offset = 0, len = 0;
11071 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11074 if (magic == TG3_EEPROM_MAGIC) {
11075 for (offset = TG3_NVM_DIR_START;
11076 offset < TG3_NVM_DIR_END;
11077 offset += TG3_NVM_DIRENT_SIZE) {
11078 if (tg3_nvram_read(tp, offset, &val))
11081 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11082 TG3_NVM_DIRTYPE_EXTVPD)
11086 if (offset != TG3_NVM_DIR_END) {
11087 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11088 if (tg3_nvram_read(tp, offset + 4, &offset))
11091 offset = tg3_nvram_logical_addr(tp, offset);
11095 if (!offset || !len) {
11096 offset = TG3_NVM_VPD_OFF;
11097 len = TG3_NVM_VPD_LEN;
11100 buf = kmalloc(len, GFP_KERNEL);
11104 if (magic == TG3_EEPROM_MAGIC) {
11105 for (i = 0; i < len; i += 4) {
11106 /* The data is in little-endian format in NVRAM.
11107 * Use the big-endian read routines to preserve
11108 * the byte order as it exists in NVRAM.
11110 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11116 unsigned int pos = 0;
11118 ptr = (u8 *)&buf[0];
11119 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11120 cnt = pci_read_vpd(tp->pdev, pos,
11122 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11140 #define NVRAM_TEST_SIZE 0x100
11141 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11142 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11143 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11144 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11145 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11146 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11147 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11148 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11150 static int tg3_test_nvram(struct tg3 *tp)
11152 u32 csum, magic, len;
11154 int i, j, k, err = 0, size;
11156 if (tg3_flag(tp, NO_NVRAM))
11159 if (tg3_nvram_read(tp, 0, &magic) != 0)
11162 if (magic == TG3_EEPROM_MAGIC)
11163 size = NVRAM_TEST_SIZE;
11164 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11165 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11166 TG3_EEPROM_SB_FORMAT_1) {
11167 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11168 case TG3_EEPROM_SB_REVISION_0:
11169 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11171 case TG3_EEPROM_SB_REVISION_2:
11172 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11174 case TG3_EEPROM_SB_REVISION_3:
11175 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11177 case TG3_EEPROM_SB_REVISION_4:
11178 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11180 case TG3_EEPROM_SB_REVISION_5:
11181 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11183 case TG3_EEPROM_SB_REVISION_6:
11184 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11191 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11192 size = NVRAM_SELFBOOT_HW_SIZE;
11196 buf = kmalloc(size, GFP_KERNEL);
11201 for (i = 0, j = 0; i < size; i += 4, j++) {
11202 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11209 /* Selfboot format */
11210 magic = be32_to_cpu(buf[0]);
11211 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11212 TG3_EEPROM_MAGIC_FW) {
11213 u8 *buf8 = (u8 *) buf, csum8 = 0;
11215 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11216 TG3_EEPROM_SB_REVISION_2) {
11217 /* For rev 2, the csum doesn't include the MBA. */
11218 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11220 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11223 for (i = 0; i < size; i++)
11236 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11237 TG3_EEPROM_MAGIC_HW) {
11238 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11239 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11240 u8 *buf8 = (u8 *) buf;
11242 /* Separate the parity bits and the data bytes. */
11243 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11244 if ((i == 0) || (i == 8)) {
11248 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11249 parity[k++] = buf8[i] & msk;
11251 } else if (i == 16) {
11255 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11256 parity[k++] = buf8[i] & msk;
11259 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11260 parity[k++] = buf8[i] & msk;
11263 data[j++] = buf8[i];
11267 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11268 u8 hw8 = hweight8(data[i]);
11270 if ((hw8 & 0x1) && parity[i])
11272 else if (!(hw8 & 0x1) && !parity[i])
11281 /* Bootstrap checksum at offset 0x10 */
11282 csum = calc_crc((unsigned char *) buf, 0x10);
11283 if (csum != le32_to_cpu(buf[0x10/4]))
11286 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11287 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11288 if (csum != le32_to_cpu(buf[0xfc/4]))
11293 buf = tg3_vpd_readblock(tp, &len);
11297 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11299 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11303 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11306 i += PCI_VPD_LRDT_TAG_SIZE;
11307 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11308 PCI_VPD_RO_KEYWORD_CHKSUM);
11312 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11314 for (i = 0; i <= j; i++)
11315 csum8 += ((u8 *)buf)[i];
11329 #define TG3_SERDES_TIMEOUT_SEC 2
11330 #define TG3_COPPER_TIMEOUT_SEC 6
11332 static int tg3_test_link(struct tg3 *tp)
11336 if (!netif_running(tp->dev))
11339 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11340 max = TG3_SERDES_TIMEOUT_SEC;
11342 max = TG3_COPPER_TIMEOUT_SEC;
11344 for (i = 0; i < max; i++) {
11345 if (netif_carrier_ok(tp->dev))
11348 if (msleep_interruptible(1000))
11355 /* Only test the commonly used registers */
11356 static int tg3_test_registers(struct tg3 *tp)
11358 int i, is_5705, is_5750;
11359 u32 offset, read_mask, write_mask, val, save_val, read_val;
11363 #define TG3_FL_5705 0x1
11364 #define TG3_FL_NOT_5705 0x2
11365 #define TG3_FL_NOT_5788 0x4
11366 #define TG3_FL_NOT_5750 0x8
11370 /* MAC Control Registers */
11371 { MAC_MODE, TG3_FL_NOT_5705,
11372 0x00000000, 0x00ef6f8c },
11373 { MAC_MODE, TG3_FL_5705,
11374 0x00000000, 0x01ef6b8c },
11375 { MAC_STATUS, TG3_FL_NOT_5705,
11376 0x03800107, 0x00000000 },
11377 { MAC_STATUS, TG3_FL_5705,
11378 0x03800100, 0x00000000 },
11379 { MAC_ADDR_0_HIGH, 0x0000,
11380 0x00000000, 0x0000ffff },
11381 { MAC_ADDR_0_LOW, 0x0000,
11382 0x00000000, 0xffffffff },
11383 { MAC_RX_MTU_SIZE, 0x0000,
11384 0x00000000, 0x0000ffff },
11385 { MAC_TX_MODE, 0x0000,
11386 0x00000000, 0x00000070 },
11387 { MAC_TX_LENGTHS, 0x0000,
11388 0x00000000, 0x00003fff },
11389 { MAC_RX_MODE, TG3_FL_NOT_5705,
11390 0x00000000, 0x000007fc },
11391 { MAC_RX_MODE, TG3_FL_5705,
11392 0x00000000, 0x000007dc },
11393 { MAC_HASH_REG_0, 0x0000,
11394 0x00000000, 0xffffffff },
11395 { MAC_HASH_REG_1, 0x0000,
11396 0x00000000, 0xffffffff },
11397 { MAC_HASH_REG_2, 0x0000,
11398 0x00000000, 0xffffffff },
11399 { MAC_HASH_REG_3, 0x0000,
11400 0x00000000, 0xffffffff },
11402 /* Receive Data and Receive BD Initiator Control Registers. */
11403 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11404 0x00000000, 0xffffffff },
11405 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11406 0x00000000, 0xffffffff },
11407 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11408 0x00000000, 0x00000003 },
11409 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11410 0x00000000, 0xffffffff },
11411 { RCVDBDI_STD_BD+0, 0x0000,
11412 0x00000000, 0xffffffff },
11413 { RCVDBDI_STD_BD+4, 0x0000,
11414 0x00000000, 0xffffffff },
11415 { RCVDBDI_STD_BD+8, 0x0000,
11416 0x00000000, 0xffff0002 },
11417 { RCVDBDI_STD_BD+0xc, 0x0000,
11418 0x00000000, 0xffffffff },
11420 /* Receive BD Initiator Control Registers. */
11421 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11422 0x00000000, 0xffffffff },
11423 { RCVBDI_STD_THRESH, TG3_FL_5705,
11424 0x00000000, 0x000003ff },
11425 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11426 0x00000000, 0xffffffff },
11428 /* Host Coalescing Control Registers. */
11429 { HOSTCC_MODE, TG3_FL_NOT_5705,
11430 0x00000000, 0x00000004 },
11431 { HOSTCC_MODE, TG3_FL_5705,
11432 0x00000000, 0x000000f6 },
11433 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11434 0x00000000, 0xffffffff },
11435 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11436 0x00000000, 0x000003ff },
11437 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11438 0x00000000, 0xffffffff },
11439 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11440 0x00000000, 0x000003ff },
11441 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11442 0x00000000, 0xffffffff },
11443 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11444 0x00000000, 0x000000ff },
11445 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11446 0x00000000, 0xffffffff },
11447 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11448 0x00000000, 0x000000ff },
11449 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11450 0x00000000, 0xffffffff },
11451 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11452 0x00000000, 0xffffffff },
11453 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11454 0x00000000, 0xffffffff },
11455 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11456 0x00000000, 0x000000ff },
11457 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11458 0x00000000, 0xffffffff },
11459 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11460 0x00000000, 0x000000ff },
11461 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11462 0x00000000, 0xffffffff },
11463 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11464 0x00000000, 0xffffffff },
11465 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11466 0x00000000, 0xffffffff },
11467 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11468 0x00000000, 0xffffffff },
11469 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11470 0x00000000, 0xffffffff },
11471 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11472 0xffffffff, 0x00000000 },
11473 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11474 0xffffffff, 0x00000000 },
11476 /* Buffer Manager Control Registers. */
11477 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11478 0x00000000, 0x007fff80 },
11479 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11480 0x00000000, 0x007fffff },
11481 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11482 0x00000000, 0x0000003f },
11483 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11484 0x00000000, 0x000001ff },
11485 { BUFMGR_MB_HIGH_WATER, 0x0000,
11486 0x00000000, 0x000001ff },
11487 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11488 0xffffffff, 0x00000000 },
11489 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11490 0xffffffff, 0x00000000 },
11492 /* Mailbox Registers */
11493 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11494 0x00000000, 0x000001ff },
11495 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11496 0x00000000, 0x000001ff },
11497 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11498 0x00000000, 0x000007ff },
11499 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11500 0x00000000, 0x000001ff },
11502 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11505 is_5705 = is_5750 = 0;
11506 if (tg3_flag(tp, 5705_PLUS)) {
11508 if (tg3_flag(tp, 5750_PLUS))
11512 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11513 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11516 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11519 if (tg3_flag(tp, IS_5788) &&
11520 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11523 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11526 offset = (u32) reg_tbl[i].offset;
11527 read_mask = reg_tbl[i].read_mask;
11528 write_mask = reg_tbl[i].write_mask;
11530 /* Save the original register content */
11531 save_val = tr32(offset);
11533 /* Determine the read-only value. */
11534 read_val = save_val & read_mask;
11536 /* Write zero to the register, then make sure the read-only bits
11537 * are not changed and the read/write bits are all zeros.
11541 val = tr32(offset);
11543 /* Test the read-only and read/write bits. */
11544 if (((val & read_mask) != read_val) || (val & write_mask))
11547 /* Write ones to all the bits defined by RdMask and WrMask, then
11548 * make sure the read-only bits are not changed and the
11549 * read/write bits are all ones.
11551 tw32(offset, read_mask | write_mask);
11553 val = tr32(offset);
11555 /* Test the read-only bits. */
11556 if ((val & read_mask) != read_val)
11559 /* Test the read/write bits. */
11560 if ((val & write_mask) != write_mask)
11563 tw32(offset, save_val);
11569 if (netif_msg_hw(tp))
11570 netdev_err(tp->dev,
11571 "Register test failed at offset %x\n", offset);
11572 tw32(offset, save_val);
11576 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11578 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11582 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11583 for (j = 0; j < len; j += 4) {
11586 tg3_write_mem(tp, offset + j, test_pattern[i]);
11587 tg3_read_mem(tp, offset + j, &val);
11588 if (val != test_pattern[i])
11595 static int tg3_test_memory(struct tg3 *tp)
11597 static struct mem_entry {
11600 } mem_tbl_570x[] = {
11601 { 0x00000000, 0x00b50},
11602 { 0x00002000, 0x1c000},
11603 { 0xffffffff, 0x00000}
11604 }, mem_tbl_5705[] = {
11605 { 0x00000100, 0x0000c},
11606 { 0x00000200, 0x00008},
11607 { 0x00004000, 0x00800},
11608 { 0x00006000, 0x01000},
11609 { 0x00008000, 0x02000},
11610 { 0x00010000, 0x0e000},
11611 { 0xffffffff, 0x00000}
11612 }, mem_tbl_5755[] = {
11613 { 0x00000200, 0x00008},
11614 { 0x00004000, 0x00800},
11615 { 0x00006000, 0x00800},
11616 { 0x00008000, 0x02000},
11617 { 0x00010000, 0x0c000},
11618 { 0xffffffff, 0x00000}
11619 }, mem_tbl_5906[] = {
11620 { 0x00000200, 0x00008},
11621 { 0x00004000, 0x00400},
11622 { 0x00006000, 0x00400},
11623 { 0x00008000, 0x01000},
11624 { 0x00010000, 0x01000},
11625 { 0xffffffff, 0x00000}
11626 }, mem_tbl_5717[] = {
11627 { 0x00000200, 0x00008},
11628 { 0x00010000, 0x0a000},
11629 { 0x00020000, 0x13c00},
11630 { 0xffffffff, 0x00000}
11631 }, mem_tbl_57765[] = {
11632 { 0x00000200, 0x00008},
11633 { 0x00004000, 0x00800},
11634 { 0x00006000, 0x09800},
11635 { 0x00010000, 0x0a000},
11636 { 0xffffffff, 0x00000}
11638 struct mem_entry *mem_tbl;
11642 if (tg3_flag(tp, 5717_PLUS))
11643 mem_tbl = mem_tbl_5717;
11644 else if (tg3_flag(tp, 57765_CLASS))
11645 mem_tbl = mem_tbl_57765;
11646 else if (tg3_flag(tp, 5755_PLUS))
11647 mem_tbl = mem_tbl_5755;
11648 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11649 mem_tbl = mem_tbl_5906;
11650 else if (tg3_flag(tp, 5705_PLUS))
11651 mem_tbl = mem_tbl_5705;
11653 mem_tbl = mem_tbl_570x;
11655 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11656 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11664 #define TG3_TSO_MSS 500
11666 #define TG3_TSO_IP_HDR_LEN 20
11667 #define TG3_TSO_TCP_HDR_LEN 20
11668 #define TG3_TSO_TCP_OPT_LEN 12
11670 static const u8 tg3_tso_header[] = {
11672 0x45, 0x00, 0x00, 0x00,
11673 0x00, 0x00, 0x40, 0x00,
11674 0x40, 0x06, 0x00, 0x00,
11675 0x0a, 0x00, 0x00, 0x01,
11676 0x0a, 0x00, 0x00, 0x02,
11677 0x0d, 0x00, 0xe0, 0x00,
11678 0x00, 0x00, 0x01, 0x00,
11679 0x00, 0x00, 0x02, 0x00,
11680 0x80, 0x10, 0x10, 0x00,
11681 0x14, 0x09, 0x00, 0x00,
11682 0x01, 0x01, 0x08, 0x0a,
11683 0x11, 0x11, 0x11, 0x11,
11684 0x11, 0x11, 0x11, 0x11,
11687 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11689 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11690 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11692 struct sk_buff *skb;
11693 u8 *tx_data, *rx_data;
11695 int num_pkts, tx_len, rx_len, i, err;
11696 struct tg3_rx_buffer_desc *desc;
11697 struct tg3_napi *tnapi, *rnapi;
11698 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11700 tnapi = &tp->napi[0];
11701 rnapi = &tp->napi[0];
11702 if (tp->irq_cnt > 1) {
11703 if (tg3_flag(tp, ENABLE_RSS))
11704 rnapi = &tp->napi[1];
11705 if (tg3_flag(tp, ENABLE_TSS))
11706 tnapi = &tp->napi[1];
11708 coal_now = tnapi->coal_now | rnapi->coal_now;
11713 skb = netdev_alloc_skb(tp->dev, tx_len);
11717 tx_data = skb_put(skb, tx_len);
11718 memcpy(tx_data, tp->dev->dev_addr, 6);
11719 memset(tx_data + 6, 0x0, 8);
11721 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11723 if (tso_loopback) {
11724 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11726 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11727 TG3_TSO_TCP_OPT_LEN;
11729 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11730 sizeof(tg3_tso_header));
11733 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11734 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11736 /* Set the total length field in the IP header */
11737 iph->tot_len = htons((u16)(mss + hdr_len));
11739 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11740 TXD_FLAG_CPU_POST_DMA);
11742 if (tg3_flag(tp, HW_TSO_1) ||
11743 tg3_flag(tp, HW_TSO_2) ||
11744 tg3_flag(tp, HW_TSO_3)) {
11746 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11747 th = (struct tcphdr *)&tx_data[val];
11750 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11752 if (tg3_flag(tp, HW_TSO_3)) {
11753 mss |= (hdr_len & 0xc) << 12;
11754 if (hdr_len & 0x10)
11755 base_flags |= 0x00000010;
11756 base_flags |= (hdr_len & 0x3e0) << 5;
11757 } else if (tg3_flag(tp, HW_TSO_2))
11758 mss |= hdr_len << 9;
11759 else if (tg3_flag(tp, HW_TSO_1) ||
11760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11761 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11763 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11766 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11769 data_off = ETH_HLEN;
11771 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11772 tx_len > VLAN_ETH_FRAME_LEN)
11773 base_flags |= TXD_FLAG_JMB_PKT;
11776 for (i = data_off; i < tx_len; i++)
11777 tx_data[i] = (u8) (i & 0xff);
11779 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11780 if (pci_dma_mapping_error(tp->pdev, map)) {
11781 dev_kfree_skb(skb);
11785 val = tnapi->tx_prod;
11786 tnapi->tx_buffers[val].skb = skb;
11787 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11789 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11794 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11796 budget = tg3_tx_avail(tnapi);
11797 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11798 base_flags | TXD_FLAG_END, mss, 0)) {
11799 tnapi->tx_buffers[val].skb = NULL;
11800 dev_kfree_skb(skb);
11806 /* Sync BD data before updating mailbox */
11809 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11810 tr32_mailbox(tnapi->prodmbox);
11814 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11815 for (i = 0; i < 35; i++) {
11816 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11821 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11822 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11823 if ((tx_idx == tnapi->tx_prod) &&
11824 (rx_idx == (rx_start_idx + num_pkts)))
11828 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11829 dev_kfree_skb(skb);
11831 if (tx_idx != tnapi->tx_prod)
11834 if (rx_idx != rx_start_idx + num_pkts)
11838 while (rx_idx != rx_start_idx) {
11839 desc = &rnapi->rx_rcb[rx_start_idx++];
11840 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11841 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11843 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11844 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11847 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11850 if (!tso_loopback) {
11851 if (rx_len != tx_len)
11854 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11855 if (opaque_key != RXD_OPAQUE_RING_STD)
11858 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11861 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11862 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11863 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11867 if (opaque_key == RXD_OPAQUE_RING_STD) {
11868 rx_data = tpr->rx_std_buffers[desc_idx].data;
11869 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11871 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11872 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11873 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11878 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11879 PCI_DMA_FROMDEVICE);
11881 rx_data += TG3_RX_OFFSET(tp);
11882 for (i = data_off; i < rx_len; i++, val++) {
11883 if (*(rx_data + i) != (u8) (val & 0xff))
11890 /* tg3_free_rings will unmap and free the rx_data */
11895 #define TG3_STD_LOOPBACK_FAILED 1
11896 #define TG3_JMB_LOOPBACK_FAILED 2
11897 #define TG3_TSO_LOOPBACK_FAILED 4
11898 #define TG3_LOOPBACK_FAILED \
11899 (TG3_STD_LOOPBACK_FAILED | \
11900 TG3_JMB_LOOPBACK_FAILED | \
11901 TG3_TSO_LOOPBACK_FAILED)
11903 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11907 u32 jmb_pkt_sz = 9000;
11910 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11912 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11913 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11915 if (!netif_running(tp->dev)) {
11916 data[0] = TG3_LOOPBACK_FAILED;
11917 data[1] = TG3_LOOPBACK_FAILED;
11919 data[2] = TG3_LOOPBACK_FAILED;
11923 err = tg3_reset_hw(tp, 1);
11925 data[0] = TG3_LOOPBACK_FAILED;
11926 data[1] = TG3_LOOPBACK_FAILED;
11928 data[2] = TG3_LOOPBACK_FAILED;
11932 if (tg3_flag(tp, ENABLE_RSS)) {
11935 /* Reroute all rx packets to the 1st queue */
11936 for (i = MAC_RSS_INDIR_TBL_0;
11937 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11941 /* HW errata - mac loopback fails in some cases on 5780.
11942 * Normal traffic and PHY loopback are not affected by
11943 * errata. Also, the MAC loopback test is deprecated for
11944 * all newer ASIC revisions.
11946 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11947 !tg3_flag(tp, CPMU_PRESENT)) {
11948 tg3_mac_loopback(tp, true);
11950 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11951 data[0] |= TG3_STD_LOOPBACK_FAILED;
11953 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11954 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11955 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11957 tg3_mac_loopback(tp, false);
11960 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11961 !tg3_flag(tp, USE_PHYLIB)) {
11964 tg3_phy_lpbk_set(tp, 0, false);
11966 /* Wait for link */
11967 for (i = 0; i < 100; i++) {
11968 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11973 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11974 data[1] |= TG3_STD_LOOPBACK_FAILED;
11975 if (tg3_flag(tp, TSO_CAPABLE) &&
11976 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11977 data[1] |= TG3_TSO_LOOPBACK_FAILED;
11978 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11979 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11980 data[1] |= TG3_JMB_LOOPBACK_FAILED;
11983 tg3_phy_lpbk_set(tp, 0, true);
11985 /* All link indications report up, but the hardware
11986 * isn't really ready for about 20 msec. Double it
11991 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11992 data[2] |= TG3_STD_LOOPBACK_FAILED;
11993 if (tg3_flag(tp, TSO_CAPABLE) &&
11994 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11995 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11996 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11997 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11998 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12001 /* Re-enable gphy autopowerdown. */
12002 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12003 tg3_phy_toggle_apd(tp, true);
12006 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12009 tp->phy_flags |= eee_cap;
12014 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12017 struct tg3 *tp = netdev_priv(dev);
12018 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12020 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12021 tg3_power_up(tp)) {
12022 etest->flags |= ETH_TEST_FL_FAILED;
12023 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12027 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12029 if (tg3_test_nvram(tp) != 0) {
12030 etest->flags |= ETH_TEST_FL_FAILED;
12033 if (!doextlpbk && tg3_test_link(tp)) {
12034 etest->flags |= ETH_TEST_FL_FAILED;
12037 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12038 int err, err2 = 0, irq_sync = 0;
12040 if (netif_running(dev)) {
12042 tg3_netif_stop(tp);
12046 tg3_full_lock(tp, irq_sync);
12048 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12049 err = tg3_nvram_lock(tp);
12050 tg3_halt_cpu(tp, RX_CPU_BASE);
12051 if (!tg3_flag(tp, 5705_PLUS))
12052 tg3_halt_cpu(tp, TX_CPU_BASE);
12054 tg3_nvram_unlock(tp);
12056 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12059 if (tg3_test_registers(tp) != 0) {
12060 etest->flags |= ETH_TEST_FL_FAILED;
12064 if (tg3_test_memory(tp) != 0) {
12065 etest->flags |= ETH_TEST_FL_FAILED;
12070 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12072 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12073 etest->flags |= ETH_TEST_FL_FAILED;
12075 tg3_full_unlock(tp);
12077 if (tg3_test_interrupt(tp) != 0) {
12078 etest->flags |= ETH_TEST_FL_FAILED;
12082 tg3_full_lock(tp, 0);
12084 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12085 if (netif_running(dev)) {
12086 tg3_flag_set(tp, INIT_COMPLETE);
12087 err2 = tg3_restart_hw(tp, 1);
12089 tg3_netif_start(tp);
12092 tg3_full_unlock(tp);
12094 if (irq_sync && !err2)
12097 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12098 tg3_power_down(tp);
12102 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12104 struct mii_ioctl_data *data = if_mii(ifr);
12105 struct tg3 *tp = netdev_priv(dev);
12108 if (tg3_flag(tp, USE_PHYLIB)) {
12109 struct phy_device *phydev;
12110 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12112 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12113 return phy_mii_ioctl(phydev, ifr, cmd);
12118 data->phy_id = tp->phy_addr;
12121 case SIOCGMIIREG: {
12124 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12125 break; /* We have no PHY */
12127 if (!netif_running(dev))
12130 spin_lock_bh(&tp->lock);
12131 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12132 spin_unlock_bh(&tp->lock);
12134 data->val_out = mii_regval;
12140 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12141 break; /* We have no PHY */
12143 if (!netif_running(dev))
12146 spin_lock_bh(&tp->lock);
12147 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12148 spin_unlock_bh(&tp->lock);
12156 return -EOPNOTSUPP;
12159 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12161 struct tg3 *tp = netdev_priv(dev);
12163 memcpy(ec, &tp->coal, sizeof(*ec));
12167 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12169 struct tg3 *tp = netdev_priv(dev);
12170 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12171 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12173 if (!tg3_flag(tp, 5705_PLUS)) {
12174 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12175 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12176 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12177 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12180 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12181 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12182 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12183 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12184 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12185 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12186 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12187 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12188 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12189 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12192 /* No rx interrupts will be generated if both are zero */
12193 if ((ec->rx_coalesce_usecs == 0) &&
12194 (ec->rx_max_coalesced_frames == 0))
12197 /* No tx interrupts will be generated if both are zero */
12198 if ((ec->tx_coalesce_usecs == 0) &&
12199 (ec->tx_max_coalesced_frames == 0))
12202 /* Only copy relevant parameters, ignore all others. */
12203 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12204 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12205 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12206 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12207 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12208 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12209 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12210 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12211 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12213 if (netif_running(dev)) {
12214 tg3_full_lock(tp, 0);
12215 __tg3_set_coalesce(tp, &tp->coal);
12216 tg3_full_unlock(tp);
12221 static const struct ethtool_ops tg3_ethtool_ops = {
12222 .get_settings = tg3_get_settings,
12223 .set_settings = tg3_set_settings,
12224 .get_drvinfo = tg3_get_drvinfo,
12225 .get_regs_len = tg3_get_regs_len,
12226 .get_regs = tg3_get_regs,
12227 .get_wol = tg3_get_wol,
12228 .set_wol = tg3_set_wol,
12229 .get_msglevel = tg3_get_msglevel,
12230 .set_msglevel = tg3_set_msglevel,
12231 .nway_reset = tg3_nway_reset,
12232 .get_link = ethtool_op_get_link,
12233 .get_eeprom_len = tg3_get_eeprom_len,
12234 .get_eeprom = tg3_get_eeprom,
12235 .set_eeprom = tg3_set_eeprom,
12236 .get_ringparam = tg3_get_ringparam,
12237 .set_ringparam = tg3_set_ringparam,
12238 .get_pauseparam = tg3_get_pauseparam,
12239 .set_pauseparam = tg3_set_pauseparam,
12240 .self_test = tg3_self_test,
12241 .get_strings = tg3_get_strings,
12242 .set_phys_id = tg3_set_phys_id,
12243 .get_ethtool_stats = tg3_get_ethtool_stats,
12244 .get_coalesce = tg3_get_coalesce,
12245 .set_coalesce = tg3_set_coalesce,
12246 .get_sset_count = tg3_get_sset_count,
12247 .get_rxnfc = tg3_get_rxnfc,
12248 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12249 .get_rxfh_indir = tg3_get_rxfh_indir,
12250 .set_rxfh_indir = tg3_set_rxfh_indir,
12253 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12254 struct rtnl_link_stats64 *stats)
12256 struct tg3 *tp = netdev_priv(dev);
12259 return &tp->net_stats_prev;
12261 spin_lock_bh(&tp->lock);
12262 tg3_get_nstats(tp, stats);
12263 spin_unlock_bh(&tp->lock);
12268 static void tg3_set_rx_mode(struct net_device *dev)
12270 struct tg3 *tp = netdev_priv(dev);
12272 if (!netif_running(dev))
12275 tg3_full_lock(tp, 0);
12276 __tg3_set_rx_mode(dev);
12277 tg3_full_unlock(tp);
12280 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12283 dev->mtu = new_mtu;
12285 if (new_mtu > ETH_DATA_LEN) {
12286 if (tg3_flag(tp, 5780_CLASS)) {
12287 netdev_update_features(dev);
12288 tg3_flag_clear(tp, TSO_CAPABLE);
12290 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12293 if (tg3_flag(tp, 5780_CLASS)) {
12294 tg3_flag_set(tp, TSO_CAPABLE);
12295 netdev_update_features(dev);
12297 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12301 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12303 struct tg3 *tp = netdev_priv(dev);
12304 int err, reset_phy = 0;
12306 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12309 if (!netif_running(dev)) {
12310 /* We'll just catch it later when the
12313 tg3_set_mtu(dev, tp, new_mtu);
12319 tg3_netif_stop(tp);
12321 tg3_full_lock(tp, 1);
12323 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12325 tg3_set_mtu(dev, tp, new_mtu);
12327 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12328 * breaks all requests to 256 bytes.
12330 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12333 err = tg3_restart_hw(tp, reset_phy);
12336 tg3_netif_start(tp);
12338 tg3_full_unlock(tp);
12346 static const struct net_device_ops tg3_netdev_ops = {
12347 .ndo_open = tg3_open,
12348 .ndo_stop = tg3_close,
12349 .ndo_start_xmit = tg3_start_xmit,
12350 .ndo_get_stats64 = tg3_get_stats64,
12351 .ndo_validate_addr = eth_validate_addr,
12352 .ndo_set_rx_mode = tg3_set_rx_mode,
12353 .ndo_set_mac_address = tg3_set_mac_addr,
12354 .ndo_do_ioctl = tg3_ioctl,
12355 .ndo_tx_timeout = tg3_tx_timeout,
12356 .ndo_change_mtu = tg3_change_mtu,
12357 .ndo_fix_features = tg3_fix_features,
12358 .ndo_set_features = tg3_set_features,
12359 #ifdef CONFIG_NET_POLL_CONTROLLER
12360 .ndo_poll_controller = tg3_poll_controller,
12364 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12366 u32 cursize, val, magic;
12368 tp->nvram_size = EEPROM_CHIP_SIZE;
12370 if (tg3_nvram_read(tp, 0, &magic) != 0)
12373 if ((magic != TG3_EEPROM_MAGIC) &&
12374 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12375 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12379 * Size the chip by reading offsets at increasing powers of two.
12380 * When we encounter our validation signature, we know the addressing
12381 * has wrapped around, and thus have our chip size.
12385 while (cursize < tp->nvram_size) {
12386 if (tg3_nvram_read(tp, cursize, &val) != 0)
12395 tp->nvram_size = cursize;
12398 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12402 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12405 /* Selfboot format */
12406 if (val != TG3_EEPROM_MAGIC) {
12407 tg3_get_eeprom_size(tp);
12411 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12413 /* This is confusing. We want to operate on the
12414 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12415 * call will read from NVRAM and byteswap the data
12416 * according to the byteswapping settings for all
12417 * other register accesses. This ensures the data we
12418 * want will always reside in the lower 16-bits.
12419 * However, the data in NVRAM is in LE format, which
12420 * means the data from the NVRAM read will always be
12421 * opposite the endianness of the CPU. The 16-bit
12422 * byteswap then brings the data to CPU endianness.
12424 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12428 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12431 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12435 nvcfg1 = tr32(NVRAM_CFG1);
12436 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12437 tg3_flag_set(tp, FLASH);
12439 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12440 tw32(NVRAM_CFG1, nvcfg1);
12443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12444 tg3_flag(tp, 5780_CLASS)) {
12445 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12446 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12447 tp->nvram_jedecnum = JEDEC_ATMEL;
12448 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12449 tg3_flag_set(tp, NVRAM_BUFFERED);
12451 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12452 tp->nvram_jedecnum = JEDEC_ATMEL;
12453 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12455 case FLASH_VENDOR_ATMEL_EEPROM:
12456 tp->nvram_jedecnum = JEDEC_ATMEL;
12457 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12458 tg3_flag_set(tp, NVRAM_BUFFERED);
12460 case FLASH_VENDOR_ST:
12461 tp->nvram_jedecnum = JEDEC_ST;
12462 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12463 tg3_flag_set(tp, NVRAM_BUFFERED);
12465 case FLASH_VENDOR_SAIFUN:
12466 tp->nvram_jedecnum = JEDEC_SAIFUN;
12467 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12469 case FLASH_VENDOR_SST_SMALL:
12470 case FLASH_VENDOR_SST_LARGE:
12471 tp->nvram_jedecnum = JEDEC_SST;
12472 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12476 tp->nvram_jedecnum = JEDEC_ATMEL;
12477 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12478 tg3_flag_set(tp, NVRAM_BUFFERED);
12482 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12484 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12485 case FLASH_5752PAGE_SIZE_256:
12486 tp->nvram_pagesize = 256;
12488 case FLASH_5752PAGE_SIZE_512:
12489 tp->nvram_pagesize = 512;
12491 case FLASH_5752PAGE_SIZE_1K:
12492 tp->nvram_pagesize = 1024;
12494 case FLASH_5752PAGE_SIZE_2K:
12495 tp->nvram_pagesize = 2048;
12497 case FLASH_5752PAGE_SIZE_4K:
12498 tp->nvram_pagesize = 4096;
12500 case FLASH_5752PAGE_SIZE_264:
12501 tp->nvram_pagesize = 264;
12503 case FLASH_5752PAGE_SIZE_528:
12504 tp->nvram_pagesize = 528;
12509 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12513 nvcfg1 = tr32(NVRAM_CFG1);
12515 /* NVRAM protection for TPM */
12516 if (nvcfg1 & (1 << 27))
12517 tg3_flag_set(tp, PROTECTED_NVRAM);
12519 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12520 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12521 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12522 tp->nvram_jedecnum = JEDEC_ATMEL;
12523 tg3_flag_set(tp, NVRAM_BUFFERED);
12525 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12526 tp->nvram_jedecnum = JEDEC_ATMEL;
12527 tg3_flag_set(tp, NVRAM_BUFFERED);
12528 tg3_flag_set(tp, FLASH);
12530 case FLASH_5752VENDOR_ST_M45PE10:
12531 case FLASH_5752VENDOR_ST_M45PE20:
12532 case FLASH_5752VENDOR_ST_M45PE40:
12533 tp->nvram_jedecnum = JEDEC_ST;
12534 tg3_flag_set(tp, NVRAM_BUFFERED);
12535 tg3_flag_set(tp, FLASH);
12539 if (tg3_flag(tp, FLASH)) {
12540 tg3_nvram_get_pagesize(tp, nvcfg1);
12542 /* For eeprom, set pagesize to maximum eeprom size */
12543 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12545 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12546 tw32(NVRAM_CFG1, nvcfg1);
12550 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12552 u32 nvcfg1, protect = 0;
12554 nvcfg1 = tr32(NVRAM_CFG1);
12556 /* NVRAM protection for TPM */
12557 if (nvcfg1 & (1 << 27)) {
12558 tg3_flag_set(tp, PROTECTED_NVRAM);
12562 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12564 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12565 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12566 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12567 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12568 tp->nvram_jedecnum = JEDEC_ATMEL;
12569 tg3_flag_set(tp, NVRAM_BUFFERED);
12570 tg3_flag_set(tp, FLASH);
12571 tp->nvram_pagesize = 264;
12572 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12573 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12574 tp->nvram_size = (protect ? 0x3e200 :
12575 TG3_NVRAM_SIZE_512KB);
12576 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12577 tp->nvram_size = (protect ? 0x1f200 :
12578 TG3_NVRAM_SIZE_256KB);
12580 tp->nvram_size = (protect ? 0x1f200 :
12581 TG3_NVRAM_SIZE_128KB);
12583 case FLASH_5752VENDOR_ST_M45PE10:
12584 case FLASH_5752VENDOR_ST_M45PE20:
12585 case FLASH_5752VENDOR_ST_M45PE40:
12586 tp->nvram_jedecnum = JEDEC_ST;
12587 tg3_flag_set(tp, NVRAM_BUFFERED);
12588 tg3_flag_set(tp, FLASH);
12589 tp->nvram_pagesize = 256;
12590 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12591 tp->nvram_size = (protect ?
12592 TG3_NVRAM_SIZE_64KB :
12593 TG3_NVRAM_SIZE_128KB);
12594 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12595 tp->nvram_size = (protect ?
12596 TG3_NVRAM_SIZE_64KB :
12597 TG3_NVRAM_SIZE_256KB);
12599 tp->nvram_size = (protect ?
12600 TG3_NVRAM_SIZE_128KB :
12601 TG3_NVRAM_SIZE_512KB);
12606 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12610 nvcfg1 = tr32(NVRAM_CFG1);
12612 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12613 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12614 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12615 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12616 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12617 tp->nvram_jedecnum = JEDEC_ATMEL;
12618 tg3_flag_set(tp, NVRAM_BUFFERED);
12619 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12621 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12622 tw32(NVRAM_CFG1, nvcfg1);
12624 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12625 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12626 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12627 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12628 tp->nvram_jedecnum = JEDEC_ATMEL;
12629 tg3_flag_set(tp, NVRAM_BUFFERED);
12630 tg3_flag_set(tp, FLASH);
12631 tp->nvram_pagesize = 264;
12633 case FLASH_5752VENDOR_ST_M45PE10:
12634 case FLASH_5752VENDOR_ST_M45PE20:
12635 case FLASH_5752VENDOR_ST_M45PE40:
12636 tp->nvram_jedecnum = JEDEC_ST;
12637 tg3_flag_set(tp, NVRAM_BUFFERED);
12638 tg3_flag_set(tp, FLASH);
12639 tp->nvram_pagesize = 256;
12644 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12646 u32 nvcfg1, protect = 0;
12648 nvcfg1 = tr32(NVRAM_CFG1);
12650 /* NVRAM protection for TPM */
12651 if (nvcfg1 & (1 << 27)) {
12652 tg3_flag_set(tp, PROTECTED_NVRAM);
12656 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12658 case FLASH_5761VENDOR_ATMEL_ADB021D:
12659 case FLASH_5761VENDOR_ATMEL_ADB041D:
12660 case FLASH_5761VENDOR_ATMEL_ADB081D:
12661 case FLASH_5761VENDOR_ATMEL_ADB161D:
12662 case FLASH_5761VENDOR_ATMEL_MDB021D:
12663 case FLASH_5761VENDOR_ATMEL_MDB041D:
12664 case FLASH_5761VENDOR_ATMEL_MDB081D:
12665 case FLASH_5761VENDOR_ATMEL_MDB161D:
12666 tp->nvram_jedecnum = JEDEC_ATMEL;
12667 tg3_flag_set(tp, NVRAM_BUFFERED);
12668 tg3_flag_set(tp, FLASH);
12669 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12670 tp->nvram_pagesize = 256;
12672 case FLASH_5761VENDOR_ST_A_M45PE20:
12673 case FLASH_5761VENDOR_ST_A_M45PE40:
12674 case FLASH_5761VENDOR_ST_A_M45PE80:
12675 case FLASH_5761VENDOR_ST_A_M45PE16:
12676 case FLASH_5761VENDOR_ST_M_M45PE20:
12677 case FLASH_5761VENDOR_ST_M_M45PE40:
12678 case FLASH_5761VENDOR_ST_M_M45PE80:
12679 case FLASH_5761VENDOR_ST_M_M45PE16:
12680 tp->nvram_jedecnum = JEDEC_ST;
12681 tg3_flag_set(tp, NVRAM_BUFFERED);
12682 tg3_flag_set(tp, FLASH);
12683 tp->nvram_pagesize = 256;
12688 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12691 case FLASH_5761VENDOR_ATMEL_ADB161D:
12692 case FLASH_5761VENDOR_ATMEL_MDB161D:
12693 case FLASH_5761VENDOR_ST_A_M45PE16:
12694 case FLASH_5761VENDOR_ST_M_M45PE16:
12695 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12697 case FLASH_5761VENDOR_ATMEL_ADB081D:
12698 case FLASH_5761VENDOR_ATMEL_MDB081D:
12699 case FLASH_5761VENDOR_ST_A_M45PE80:
12700 case FLASH_5761VENDOR_ST_M_M45PE80:
12701 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12703 case FLASH_5761VENDOR_ATMEL_ADB041D:
12704 case FLASH_5761VENDOR_ATMEL_MDB041D:
12705 case FLASH_5761VENDOR_ST_A_M45PE40:
12706 case FLASH_5761VENDOR_ST_M_M45PE40:
12707 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12709 case FLASH_5761VENDOR_ATMEL_ADB021D:
12710 case FLASH_5761VENDOR_ATMEL_MDB021D:
12711 case FLASH_5761VENDOR_ST_A_M45PE20:
12712 case FLASH_5761VENDOR_ST_M_M45PE20:
12713 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12719 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12721 tp->nvram_jedecnum = JEDEC_ATMEL;
12722 tg3_flag_set(tp, NVRAM_BUFFERED);
12723 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12726 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12730 nvcfg1 = tr32(NVRAM_CFG1);
12732 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12733 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12734 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12735 tp->nvram_jedecnum = JEDEC_ATMEL;
12736 tg3_flag_set(tp, NVRAM_BUFFERED);
12737 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12739 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12740 tw32(NVRAM_CFG1, nvcfg1);
12742 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12743 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12744 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12745 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12746 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12747 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12748 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12749 tp->nvram_jedecnum = JEDEC_ATMEL;
12750 tg3_flag_set(tp, NVRAM_BUFFERED);
12751 tg3_flag_set(tp, FLASH);
12753 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12754 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12755 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12756 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12757 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12759 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12760 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12761 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12763 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12764 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12765 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12769 case FLASH_5752VENDOR_ST_M45PE10:
12770 case FLASH_5752VENDOR_ST_M45PE20:
12771 case FLASH_5752VENDOR_ST_M45PE40:
12772 tp->nvram_jedecnum = JEDEC_ST;
12773 tg3_flag_set(tp, NVRAM_BUFFERED);
12774 tg3_flag_set(tp, FLASH);
12776 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12777 case FLASH_5752VENDOR_ST_M45PE10:
12778 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12780 case FLASH_5752VENDOR_ST_M45PE20:
12781 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12783 case FLASH_5752VENDOR_ST_M45PE40:
12784 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12789 tg3_flag_set(tp, NO_NVRAM);
12793 tg3_nvram_get_pagesize(tp, nvcfg1);
12794 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12795 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12799 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12803 nvcfg1 = tr32(NVRAM_CFG1);
12805 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12806 case FLASH_5717VENDOR_ATMEL_EEPROM:
12807 case FLASH_5717VENDOR_MICRO_EEPROM:
12808 tp->nvram_jedecnum = JEDEC_ATMEL;
12809 tg3_flag_set(tp, NVRAM_BUFFERED);
12810 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12812 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12813 tw32(NVRAM_CFG1, nvcfg1);
12815 case FLASH_5717VENDOR_ATMEL_MDB011D:
12816 case FLASH_5717VENDOR_ATMEL_ADB011B:
12817 case FLASH_5717VENDOR_ATMEL_ADB011D:
12818 case FLASH_5717VENDOR_ATMEL_MDB021D:
12819 case FLASH_5717VENDOR_ATMEL_ADB021B:
12820 case FLASH_5717VENDOR_ATMEL_ADB021D:
12821 case FLASH_5717VENDOR_ATMEL_45USPT:
12822 tp->nvram_jedecnum = JEDEC_ATMEL;
12823 tg3_flag_set(tp, NVRAM_BUFFERED);
12824 tg3_flag_set(tp, FLASH);
12826 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12827 case FLASH_5717VENDOR_ATMEL_MDB021D:
12828 /* Detect size with tg3_nvram_get_size() */
12830 case FLASH_5717VENDOR_ATMEL_ADB021B:
12831 case FLASH_5717VENDOR_ATMEL_ADB021D:
12832 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12835 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12839 case FLASH_5717VENDOR_ST_M_M25PE10:
12840 case FLASH_5717VENDOR_ST_A_M25PE10:
12841 case FLASH_5717VENDOR_ST_M_M45PE10:
12842 case FLASH_5717VENDOR_ST_A_M45PE10:
12843 case FLASH_5717VENDOR_ST_M_M25PE20:
12844 case FLASH_5717VENDOR_ST_A_M25PE20:
12845 case FLASH_5717VENDOR_ST_M_M45PE20:
12846 case FLASH_5717VENDOR_ST_A_M45PE20:
12847 case FLASH_5717VENDOR_ST_25USPT:
12848 case FLASH_5717VENDOR_ST_45USPT:
12849 tp->nvram_jedecnum = JEDEC_ST;
12850 tg3_flag_set(tp, NVRAM_BUFFERED);
12851 tg3_flag_set(tp, FLASH);
12853 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12854 case FLASH_5717VENDOR_ST_M_M25PE20:
12855 case FLASH_5717VENDOR_ST_M_M45PE20:
12856 /* Detect size with tg3_nvram_get_size() */
12858 case FLASH_5717VENDOR_ST_A_M25PE20:
12859 case FLASH_5717VENDOR_ST_A_M45PE20:
12860 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12863 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12868 tg3_flag_set(tp, NO_NVRAM);
12872 tg3_nvram_get_pagesize(tp, nvcfg1);
12873 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12874 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12877 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12879 u32 nvcfg1, nvmpinstrp;
12881 nvcfg1 = tr32(NVRAM_CFG1);
12882 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12884 switch (nvmpinstrp) {
12885 case FLASH_5720_EEPROM_HD:
12886 case FLASH_5720_EEPROM_LD:
12887 tp->nvram_jedecnum = JEDEC_ATMEL;
12888 tg3_flag_set(tp, NVRAM_BUFFERED);
12890 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12891 tw32(NVRAM_CFG1, nvcfg1);
12892 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12893 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12895 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12897 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12898 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12899 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12900 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12901 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12902 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12903 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12904 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12905 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12906 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12907 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12908 case FLASH_5720VENDOR_ATMEL_45USPT:
12909 tp->nvram_jedecnum = JEDEC_ATMEL;
12910 tg3_flag_set(tp, NVRAM_BUFFERED);
12911 tg3_flag_set(tp, FLASH);
12913 switch (nvmpinstrp) {
12914 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12915 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12916 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12917 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12919 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12920 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12921 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12922 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12924 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12925 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12926 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12929 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12933 case FLASH_5720VENDOR_M_ST_M25PE10:
12934 case FLASH_5720VENDOR_M_ST_M45PE10:
12935 case FLASH_5720VENDOR_A_ST_M25PE10:
12936 case FLASH_5720VENDOR_A_ST_M45PE10:
12937 case FLASH_5720VENDOR_M_ST_M25PE20:
12938 case FLASH_5720VENDOR_M_ST_M45PE20:
12939 case FLASH_5720VENDOR_A_ST_M25PE20:
12940 case FLASH_5720VENDOR_A_ST_M45PE20:
12941 case FLASH_5720VENDOR_M_ST_M25PE40:
12942 case FLASH_5720VENDOR_M_ST_M45PE40:
12943 case FLASH_5720VENDOR_A_ST_M25PE40:
12944 case FLASH_5720VENDOR_A_ST_M45PE40:
12945 case FLASH_5720VENDOR_M_ST_M25PE80:
12946 case FLASH_5720VENDOR_M_ST_M45PE80:
12947 case FLASH_5720VENDOR_A_ST_M25PE80:
12948 case FLASH_5720VENDOR_A_ST_M45PE80:
12949 case FLASH_5720VENDOR_ST_25USPT:
12950 case FLASH_5720VENDOR_ST_45USPT:
12951 tp->nvram_jedecnum = JEDEC_ST;
12952 tg3_flag_set(tp, NVRAM_BUFFERED);
12953 tg3_flag_set(tp, FLASH);
12955 switch (nvmpinstrp) {
12956 case FLASH_5720VENDOR_M_ST_M25PE20:
12957 case FLASH_5720VENDOR_M_ST_M45PE20:
12958 case FLASH_5720VENDOR_A_ST_M25PE20:
12959 case FLASH_5720VENDOR_A_ST_M45PE20:
12960 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12962 case FLASH_5720VENDOR_M_ST_M25PE40:
12963 case FLASH_5720VENDOR_M_ST_M45PE40:
12964 case FLASH_5720VENDOR_A_ST_M25PE40:
12965 case FLASH_5720VENDOR_A_ST_M45PE40:
12966 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12968 case FLASH_5720VENDOR_M_ST_M25PE80:
12969 case FLASH_5720VENDOR_M_ST_M45PE80:
12970 case FLASH_5720VENDOR_A_ST_M25PE80:
12971 case FLASH_5720VENDOR_A_ST_M45PE80:
12972 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12975 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12980 tg3_flag_set(tp, NO_NVRAM);
12984 tg3_nvram_get_pagesize(tp, nvcfg1);
12985 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12986 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12989 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12990 static void __devinit tg3_nvram_init(struct tg3 *tp)
12992 tw32_f(GRC_EEPROM_ADDR,
12993 (EEPROM_ADDR_FSM_RESET |
12994 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12995 EEPROM_ADDR_CLKPERD_SHIFT)));
12999 /* Enable seeprom accesses. */
13000 tw32_f(GRC_LOCAL_CTRL,
13001 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13004 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13005 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13006 tg3_flag_set(tp, NVRAM);
13008 if (tg3_nvram_lock(tp)) {
13009 netdev_warn(tp->dev,
13010 "Cannot get nvram lock, %s failed\n",
13014 tg3_enable_nvram_access(tp);
13016 tp->nvram_size = 0;
13018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13019 tg3_get_5752_nvram_info(tp);
13020 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13021 tg3_get_5755_nvram_info(tp);
13022 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13023 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13024 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13025 tg3_get_5787_nvram_info(tp);
13026 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13027 tg3_get_5761_nvram_info(tp);
13028 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13029 tg3_get_5906_nvram_info(tp);
13030 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13031 tg3_flag(tp, 57765_CLASS))
13032 tg3_get_57780_nvram_info(tp);
13033 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13035 tg3_get_5717_nvram_info(tp);
13036 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13037 tg3_get_5720_nvram_info(tp);
13039 tg3_get_nvram_info(tp);
13041 if (tp->nvram_size == 0)
13042 tg3_get_nvram_size(tp);
13044 tg3_disable_nvram_access(tp);
13045 tg3_nvram_unlock(tp);
13048 tg3_flag_clear(tp, NVRAM);
13049 tg3_flag_clear(tp, NVRAM_BUFFERED);
13051 tg3_get_eeprom_size(tp);
13055 struct subsys_tbl_ent {
13056 u16 subsys_vendor, subsys_devid;
13060 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13061 /* Broadcom boards. */
13062 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13063 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13064 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13065 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13066 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13067 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13068 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13069 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13070 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13071 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13072 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13073 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13074 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13075 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13076 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13077 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13078 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13079 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13080 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13081 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13082 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13083 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13086 { TG3PCI_SUBVENDOR_ID_3COM,
13087 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13088 { TG3PCI_SUBVENDOR_ID_3COM,
13089 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13090 { TG3PCI_SUBVENDOR_ID_3COM,
13091 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13092 { TG3PCI_SUBVENDOR_ID_3COM,
13093 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13094 { TG3PCI_SUBVENDOR_ID_3COM,
13095 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13098 { TG3PCI_SUBVENDOR_ID_DELL,
13099 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13100 { TG3PCI_SUBVENDOR_ID_DELL,
13101 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13102 { TG3PCI_SUBVENDOR_ID_DELL,
13103 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13104 { TG3PCI_SUBVENDOR_ID_DELL,
13105 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13107 /* Compaq boards. */
13108 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13109 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13110 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13111 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13112 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13113 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13114 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13115 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13116 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13117 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13120 { TG3PCI_SUBVENDOR_ID_IBM,
13121 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13124 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13128 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13129 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13130 tp->pdev->subsystem_vendor) &&
13131 (subsys_id_to_phy_id[i].subsys_devid ==
13132 tp->pdev->subsystem_device))
13133 return &subsys_id_to_phy_id[i];
13138 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13142 tp->phy_id = TG3_PHY_ID_INVALID;
13143 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13145 /* Assume an onboard device and WOL capable by default. */
13146 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13147 tg3_flag_set(tp, WOL_CAP);
13149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13150 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13151 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13152 tg3_flag_set(tp, IS_NIC);
13154 val = tr32(VCPU_CFGSHDW);
13155 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13156 tg3_flag_set(tp, ASPM_WORKAROUND);
13157 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13158 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13159 tg3_flag_set(tp, WOL_ENABLE);
13160 device_set_wakeup_enable(&tp->pdev->dev, true);
13165 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13166 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13167 u32 nic_cfg, led_cfg;
13168 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13169 int eeprom_phy_serdes = 0;
13171 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13172 tp->nic_sram_data_cfg = nic_cfg;
13174 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13175 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13176 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13177 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13178 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13179 (ver > 0) && (ver < 0x100))
13180 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13183 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13185 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13186 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13187 eeprom_phy_serdes = 1;
13189 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13190 if (nic_phy_id != 0) {
13191 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13192 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13194 eeprom_phy_id = (id1 >> 16) << 10;
13195 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13196 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13200 tp->phy_id = eeprom_phy_id;
13201 if (eeprom_phy_serdes) {
13202 if (!tg3_flag(tp, 5705_PLUS))
13203 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13205 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13208 if (tg3_flag(tp, 5750_PLUS))
13209 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13210 SHASTA_EXT_LED_MODE_MASK);
13212 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13216 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13217 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13220 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13221 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13224 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13225 tp->led_ctrl = LED_CTRL_MODE_MAC;
13227 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13228 * read on some older 5700/5701 bootcode.
13230 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13232 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13234 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13238 case SHASTA_EXT_LED_SHARED:
13239 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13240 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13241 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13242 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13243 LED_CTRL_MODE_PHY_2);
13246 case SHASTA_EXT_LED_MAC:
13247 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13250 case SHASTA_EXT_LED_COMBO:
13251 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13252 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13253 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13254 LED_CTRL_MODE_PHY_2);
13259 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13261 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13262 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13264 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13265 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13267 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13268 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13269 if ((tp->pdev->subsystem_vendor ==
13270 PCI_VENDOR_ID_ARIMA) &&
13271 (tp->pdev->subsystem_device == 0x205a ||
13272 tp->pdev->subsystem_device == 0x2063))
13273 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13275 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13276 tg3_flag_set(tp, IS_NIC);
13279 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13280 tg3_flag_set(tp, ENABLE_ASF);
13281 if (tg3_flag(tp, 5750_PLUS))
13282 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13285 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13286 tg3_flag(tp, 5750_PLUS))
13287 tg3_flag_set(tp, ENABLE_APE);
13289 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13290 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13291 tg3_flag_clear(tp, WOL_CAP);
13293 if (tg3_flag(tp, WOL_CAP) &&
13294 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13295 tg3_flag_set(tp, WOL_ENABLE);
13296 device_set_wakeup_enable(&tp->pdev->dev, true);
13299 if (cfg2 & (1 << 17))
13300 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13302 /* serdes signal pre-emphasis in register 0x590 set by */
13303 /* bootcode if bit 18 is set */
13304 if (cfg2 & (1 << 18))
13305 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13307 if ((tg3_flag(tp, 57765_PLUS) ||
13308 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13309 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13310 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13311 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13313 if (tg3_flag(tp, PCI_EXPRESS) &&
13314 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13315 !tg3_flag(tp, 57765_PLUS)) {
13318 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13319 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13320 tg3_flag_set(tp, ASPM_WORKAROUND);
13323 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13324 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13325 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13326 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13327 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13328 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13331 if (tg3_flag(tp, WOL_CAP))
13332 device_set_wakeup_enable(&tp->pdev->dev,
13333 tg3_flag(tp, WOL_ENABLE));
13335 device_set_wakeup_capable(&tp->pdev->dev, false);
13338 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13343 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13344 tw32(OTP_CTRL, cmd);
13346 /* Wait for up to 1 ms for command to execute. */
13347 for (i = 0; i < 100; i++) {
13348 val = tr32(OTP_STATUS);
13349 if (val & OTP_STATUS_CMD_DONE)
13354 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13357 /* Read the gphy configuration from the OTP region of the chip. The gphy
13358 * configuration is a 32-bit value that straddles the alignment boundary.
13359 * We do two 32-bit reads and then shift and merge the results.
13361 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13363 u32 bhalf_otp, thalf_otp;
13365 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13367 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13370 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13372 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13375 thalf_otp = tr32(OTP_READ_DATA);
13377 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13379 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13382 bhalf_otp = tr32(OTP_READ_DATA);
13384 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13387 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13389 u32 adv = ADVERTISED_Autoneg;
13391 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13392 adv |= ADVERTISED_1000baseT_Half |
13393 ADVERTISED_1000baseT_Full;
13395 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13396 adv |= ADVERTISED_100baseT_Half |
13397 ADVERTISED_100baseT_Full |
13398 ADVERTISED_10baseT_Half |
13399 ADVERTISED_10baseT_Full |
13402 adv |= ADVERTISED_FIBRE;
13404 tp->link_config.advertising = adv;
13405 tp->link_config.speed = SPEED_UNKNOWN;
13406 tp->link_config.duplex = DUPLEX_UNKNOWN;
13407 tp->link_config.autoneg = AUTONEG_ENABLE;
13408 tp->link_config.active_speed = SPEED_UNKNOWN;
13409 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13414 static int __devinit tg3_phy_probe(struct tg3 *tp)
13416 u32 hw_phy_id_1, hw_phy_id_2;
13417 u32 hw_phy_id, hw_phy_id_masked;
13420 /* flow control autonegotiation is default behavior */
13421 tg3_flag_set(tp, PAUSE_AUTONEG);
13422 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13424 if (tg3_flag(tp, USE_PHYLIB))
13425 return tg3_phy_init(tp);
13427 /* Reading the PHY ID register can conflict with ASF
13428 * firmware access to the PHY hardware.
13431 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13432 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13434 /* Now read the physical PHY_ID from the chip and verify
13435 * that it is sane. If it doesn't look good, we fall back
13436 * to either the hard-coded table based PHY_ID and failing
13437 * that the value found in the eeprom area.
13439 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13440 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13442 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13443 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13444 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13446 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13449 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13450 tp->phy_id = hw_phy_id;
13451 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13452 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13454 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13456 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13457 /* Do nothing, phy ID already set up in
13458 * tg3_get_eeprom_hw_cfg().
13461 struct subsys_tbl_ent *p;
13463 /* No eeprom signature? Try the hardcoded
13464 * subsys device table.
13466 p = tg3_lookup_by_subsys(tp);
13470 tp->phy_id = p->phy_id;
13472 tp->phy_id == TG3_PHY_ID_BCM8002)
13473 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13477 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13478 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13480 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13481 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13482 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13483 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13484 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13486 tg3_phy_init_link_config(tp);
13488 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13489 !tg3_flag(tp, ENABLE_APE) &&
13490 !tg3_flag(tp, ENABLE_ASF)) {
13493 tg3_readphy(tp, MII_BMSR, &bmsr);
13494 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13495 (bmsr & BMSR_LSTATUS))
13496 goto skip_phy_reset;
13498 err = tg3_phy_reset(tp);
13502 tg3_phy_set_wirespeed(tp);
13504 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13505 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13506 tp->link_config.flowctrl);
13508 tg3_writephy(tp, MII_BMCR,
13509 BMCR_ANENABLE | BMCR_ANRESTART);
13514 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13515 err = tg3_init_5401phy_dsp(tp);
13519 err = tg3_init_5401phy_dsp(tp);
13525 static void __devinit tg3_read_vpd(struct tg3 *tp)
13528 unsigned int block_end, rosize, len;
13532 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13536 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13538 goto out_not_found;
13540 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13541 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13542 i += PCI_VPD_LRDT_TAG_SIZE;
13544 if (block_end > vpdlen)
13545 goto out_not_found;
13547 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13548 PCI_VPD_RO_KEYWORD_MFR_ID);
13550 len = pci_vpd_info_field_size(&vpd_data[j]);
13552 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13553 if (j + len > block_end || len != 4 ||
13554 memcmp(&vpd_data[j], "1028", 4))
13557 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13558 PCI_VPD_RO_KEYWORD_VENDOR0);
13562 len = pci_vpd_info_field_size(&vpd_data[j]);
13564 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13565 if (j + len > block_end)
13568 memcpy(tp->fw_ver, &vpd_data[j], len);
13569 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13573 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13574 PCI_VPD_RO_KEYWORD_PARTNO);
13576 goto out_not_found;
13578 len = pci_vpd_info_field_size(&vpd_data[i]);
13580 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13581 if (len > TG3_BPN_SIZE ||
13582 (len + i) > vpdlen)
13583 goto out_not_found;
13585 memcpy(tp->board_part_number, &vpd_data[i], len);
13589 if (tp->board_part_number[0])
13593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13594 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13595 strcpy(tp->board_part_number, "BCM5717");
13596 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13597 strcpy(tp->board_part_number, "BCM5718");
13600 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13601 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13602 strcpy(tp->board_part_number, "BCM57780");
13603 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13604 strcpy(tp->board_part_number, "BCM57760");
13605 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13606 strcpy(tp->board_part_number, "BCM57790");
13607 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13608 strcpy(tp->board_part_number, "BCM57788");
13611 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13612 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13613 strcpy(tp->board_part_number, "BCM57761");
13614 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13615 strcpy(tp->board_part_number, "BCM57765");
13616 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13617 strcpy(tp->board_part_number, "BCM57781");
13618 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13619 strcpy(tp->board_part_number, "BCM57785");
13620 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13621 strcpy(tp->board_part_number, "BCM57791");
13622 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13623 strcpy(tp->board_part_number, "BCM57795");
13626 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13627 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13628 strcpy(tp->board_part_number, "BCM57762");
13629 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13630 strcpy(tp->board_part_number, "BCM57766");
13631 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13632 strcpy(tp->board_part_number, "BCM57782");
13633 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13634 strcpy(tp->board_part_number, "BCM57786");
13637 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13638 strcpy(tp->board_part_number, "BCM95906");
13641 strcpy(tp->board_part_number, "none");
13645 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13649 if (tg3_nvram_read(tp, offset, &val) ||
13650 (val & 0xfc000000) != 0x0c000000 ||
13651 tg3_nvram_read(tp, offset + 4, &val) ||
13658 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13660 u32 val, offset, start, ver_offset;
13662 bool newver = false;
13664 if (tg3_nvram_read(tp, 0xc, &offset) ||
13665 tg3_nvram_read(tp, 0x4, &start))
13668 offset = tg3_nvram_logical_addr(tp, offset);
13670 if (tg3_nvram_read(tp, offset, &val))
13673 if ((val & 0xfc000000) == 0x0c000000) {
13674 if (tg3_nvram_read(tp, offset + 4, &val))
13681 dst_off = strlen(tp->fw_ver);
13684 if (TG3_VER_SIZE - dst_off < 16 ||
13685 tg3_nvram_read(tp, offset + 8, &ver_offset))
13688 offset = offset + ver_offset - start;
13689 for (i = 0; i < 16; i += 4) {
13691 if (tg3_nvram_read_be32(tp, offset + i, &v))
13694 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13699 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13702 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13703 TG3_NVM_BCVER_MAJSFT;
13704 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13705 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13706 "v%d.%02d", major, minor);
13710 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13712 u32 val, major, minor;
13714 /* Use native endian representation */
13715 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13718 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13719 TG3_NVM_HWSB_CFG1_MAJSFT;
13720 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13721 TG3_NVM_HWSB_CFG1_MINSFT;
13723 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13726 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13728 u32 offset, major, minor, build;
13730 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13732 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13735 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13736 case TG3_EEPROM_SB_REVISION_0:
13737 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13739 case TG3_EEPROM_SB_REVISION_2:
13740 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13742 case TG3_EEPROM_SB_REVISION_3:
13743 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13745 case TG3_EEPROM_SB_REVISION_4:
13746 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13748 case TG3_EEPROM_SB_REVISION_5:
13749 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13751 case TG3_EEPROM_SB_REVISION_6:
13752 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13758 if (tg3_nvram_read(tp, offset, &val))
13761 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13762 TG3_EEPROM_SB_EDH_BLD_SHFT;
13763 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13764 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13765 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13767 if (minor > 99 || build > 26)
13770 offset = strlen(tp->fw_ver);
13771 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13772 " v%d.%02d", major, minor);
13775 offset = strlen(tp->fw_ver);
13776 if (offset < TG3_VER_SIZE - 1)
13777 tp->fw_ver[offset] = 'a' + build - 1;
13781 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13783 u32 val, offset, start;
13786 for (offset = TG3_NVM_DIR_START;
13787 offset < TG3_NVM_DIR_END;
13788 offset += TG3_NVM_DIRENT_SIZE) {
13789 if (tg3_nvram_read(tp, offset, &val))
13792 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13796 if (offset == TG3_NVM_DIR_END)
13799 if (!tg3_flag(tp, 5705_PLUS))
13800 start = 0x08000000;
13801 else if (tg3_nvram_read(tp, offset - 4, &start))
13804 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13805 !tg3_fw_img_is_valid(tp, offset) ||
13806 tg3_nvram_read(tp, offset + 8, &val))
13809 offset += val - start;
13811 vlen = strlen(tp->fw_ver);
13813 tp->fw_ver[vlen++] = ',';
13814 tp->fw_ver[vlen++] = ' ';
13816 for (i = 0; i < 4; i++) {
13818 if (tg3_nvram_read_be32(tp, offset, &v))
13821 offset += sizeof(v);
13823 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13824 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13828 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13833 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13839 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13842 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13843 if (apedata != APE_SEG_SIG_MAGIC)
13846 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13847 if (!(apedata & APE_FW_STATUS_READY))
13850 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13852 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13853 tg3_flag_set(tp, APE_HAS_NCSI);
13859 vlen = strlen(tp->fw_ver);
13861 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13863 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13864 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13865 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13866 (apedata & APE_FW_VERSION_BLDMSK));
13869 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13872 bool vpd_vers = false;
13874 if (tp->fw_ver[0] != 0)
13877 if (tg3_flag(tp, NO_NVRAM)) {
13878 strcat(tp->fw_ver, "sb");
13882 if (tg3_nvram_read(tp, 0, &val))
13885 if (val == TG3_EEPROM_MAGIC)
13886 tg3_read_bc_ver(tp);
13887 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13888 tg3_read_sb_ver(tp, val);
13889 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13890 tg3_read_hwsb_ver(tp);
13897 if (tg3_flag(tp, ENABLE_APE)) {
13898 if (tg3_flag(tp, ENABLE_ASF))
13899 tg3_read_dash_ver(tp);
13900 } else if (tg3_flag(tp, ENABLE_ASF)) {
13901 tg3_read_mgmtfw_ver(tp);
13905 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13908 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13910 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13911 return TG3_RX_RET_MAX_SIZE_5717;
13912 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13913 return TG3_RX_RET_MAX_SIZE_5700;
13915 return TG3_RX_RET_MAX_SIZE_5705;
13918 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13919 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13920 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13921 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13925 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13927 struct pci_dev *peer;
13928 unsigned int func, devnr = tp->pdev->devfn & ~7;
13930 for (func = 0; func < 8; func++) {
13931 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13932 if (peer && peer != tp->pdev)
13936 /* 5704 can be configured in single-port mode, set peer to
13937 * tp->pdev in that case.
13945 * We don't need to keep the refcount elevated; there's no way
13946 * to remove one half of this device without removing the other
13953 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13955 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13956 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13959 /* All devices that use the alternate
13960 * ASIC REV location have a CPMU.
13962 tg3_flag_set(tp, CPMU_PRESENT);
13964 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13965 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13966 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13967 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13968 reg = TG3PCI_GEN2_PRODID_ASICREV;
13969 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13970 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13971 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13972 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13973 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13974 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13975 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13976 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13977 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13978 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13979 reg = TG3PCI_GEN15_PRODID_ASICREV;
13981 reg = TG3PCI_PRODID_ASICREV;
13983 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
13986 /* Wrong chip ID in 5752 A0. This code can be removed later
13987 * as A0 is not in production.
13989 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13990 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13992 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13993 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13995 tg3_flag_set(tp, 5717_PLUS);
13997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13999 tg3_flag_set(tp, 57765_CLASS);
14001 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14002 tg3_flag_set(tp, 57765_PLUS);
14004 /* Intentionally exclude ASIC_REV_5906 */
14005 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14006 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14007 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14008 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14011 tg3_flag(tp, 57765_PLUS))
14012 tg3_flag_set(tp, 5755_PLUS);
14014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14016 tg3_flag_set(tp, 5780_CLASS);
14018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14020 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14021 tg3_flag(tp, 5755_PLUS) ||
14022 tg3_flag(tp, 5780_CLASS))
14023 tg3_flag_set(tp, 5750_PLUS);
14025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14026 tg3_flag(tp, 5750_PLUS))
14027 tg3_flag_set(tp, 5705_PLUS);
14030 static int __devinit tg3_get_invariants(struct tg3 *tp)
14033 u32 pci_state_reg, grc_misc_cfg;
14038 /* Force memory write invalidate off. If we leave it on,
14039 * then on 5700_BX chips we have to enable a workaround.
14040 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14041 * to match the cacheline size. The Broadcom driver have this
14042 * workaround but turns MWI off all the times so never uses
14043 * it. This seems to suggest that the workaround is insufficient.
14045 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14046 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14047 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14049 /* Important! -- Make sure register accesses are byteswapped
14050 * correctly. Also, for those chips that require it, make
14051 * sure that indirect register accesses are enabled before
14052 * the first operation.
14054 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14056 tp->misc_host_ctrl |= (misc_ctrl_reg &
14057 MISC_HOST_CTRL_CHIPREV);
14058 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14059 tp->misc_host_ctrl);
14061 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14063 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14064 * we need to disable memory and use config. cycles
14065 * only to access all registers. The 5702/03 chips
14066 * can mistakenly decode the special cycles from the
14067 * ICH chipsets as memory write cycles, causing corruption
14068 * of register and memory space. Only certain ICH bridges
14069 * will drive special cycles with non-zero data during the
14070 * address phase which can fall within the 5703's address
14071 * range. This is not an ICH bug as the PCI spec allows
14072 * non-zero address during special cycles. However, only
14073 * these ICH bridges are known to drive non-zero addresses
14074 * during special cycles.
14076 * Since special cycles do not cross PCI bridges, we only
14077 * enable this workaround if the 5703 is on the secondary
14078 * bus of these ICH bridges.
14080 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14081 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14082 static struct tg3_dev_id {
14086 } ich_chipsets[] = {
14087 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14089 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14091 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14093 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14097 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14098 struct pci_dev *bridge = NULL;
14100 while (pci_id->vendor != 0) {
14101 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14107 if (pci_id->rev != PCI_ANY_ID) {
14108 if (bridge->revision > pci_id->rev)
14111 if (bridge->subordinate &&
14112 (bridge->subordinate->number ==
14113 tp->pdev->bus->number)) {
14114 tg3_flag_set(tp, ICH_WORKAROUND);
14115 pci_dev_put(bridge);
14121 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14122 static struct tg3_dev_id {
14125 } bridge_chipsets[] = {
14126 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14127 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14130 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14131 struct pci_dev *bridge = NULL;
14133 while (pci_id->vendor != 0) {
14134 bridge = pci_get_device(pci_id->vendor,
14141 if (bridge->subordinate &&
14142 (bridge->subordinate->number <=
14143 tp->pdev->bus->number) &&
14144 (bridge->subordinate->subordinate >=
14145 tp->pdev->bus->number)) {
14146 tg3_flag_set(tp, 5701_DMA_BUG);
14147 pci_dev_put(bridge);
14153 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14154 * DMA addresses > 40-bit. This bridge may have other additional
14155 * 57xx devices behind it in some 4-port NIC designs for example.
14156 * Any tg3 device found behind the bridge will also need the 40-bit
14159 if (tg3_flag(tp, 5780_CLASS)) {
14160 tg3_flag_set(tp, 40BIT_DMA_BUG);
14161 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14163 struct pci_dev *bridge = NULL;
14166 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14167 PCI_DEVICE_ID_SERVERWORKS_EPB,
14169 if (bridge && bridge->subordinate &&
14170 (bridge->subordinate->number <=
14171 tp->pdev->bus->number) &&
14172 (bridge->subordinate->subordinate >=
14173 tp->pdev->bus->number)) {
14174 tg3_flag_set(tp, 40BIT_DMA_BUG);
14175 pci_dev_put(bridge);
14181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14183 tp->pdev_peer = tg3_find_peer(tp);
14185 /* Determine TSO capabilities */
14186 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14187 ; /* Do nothing. HW bug. */
14188 else if (tg3_flag(tp, 57765_PLUS))
14189 tg3_flag_set(tp, HW_TSO_3);
14190 else if (tg3_flag(tp, 5755_PLUS) ||
14191 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14192 tg3_flag_set(tp, HW_TSO_2);
14193 else if (tg3_flag(tp, 5750_PLUS)) {
14194 tg3_flag_set(tp, HW_TSO_1);
14195 tg3_flag_set(tp, TSO_BUG);
14196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14197 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14198 tg3_flag_clear(tp, TSO_BUG);
14199 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14200 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14201 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14202 tg3_flag_set(tp, TSO_BUG);
14203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14204 tp->fw_needed = FIRMWARE_TG3TSO5;
14206 tp->fw_needed = FIRMWARE_TG3TSO;
14209 /* Selectively allow TSO based on operating conditions */
14210 if (tg3_flag(tp, HW_TSO_1) ||
14211 tg3_flag(tp, HW_TSO_2) ||
14212 tg3_flag(tp, HW_TSO_3) ||
14214 /* For firmware TSO, assume ASF is disabled.
14215 * We'll disable TSO later if we discover ASF
14216 * is enabled in tg3_get_eeprom_hw_cfg().
14218 tg3_flag_set(tp, TSO_CAPABLE);
14220 tg3_flag_clear(tp, TSO_CAPABLE);
14221 tg3_flag_clear(tp, TSO_BUG);
14222 tp->fw_needed = NULL;
14225 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14226 tp->fw_needed = FIRMWARE_TG3;
14230 if (tg3_flag(tp, 5750_PLUS)) {
14231 tg3_flag_set(tp, SUPPORT_MSI);
14232 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14233 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14234 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14235 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14236 tp->pdev_peer == tp->pdev))
14237 tg3_flag_clear(tp, SUPPORT_MSI);
14239 if (tg3_flag(tp, 5755_PLUS) ||
14240 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14241 tg3_flag_set(tp, 1SHOT_MSI);
14244 if (tg3_flag(tp, 57765_PLUS)) {
14245 tg3_flag_set(tp, SUPPORT_MSIX);
14246 tp->irq_max = TG3_IRQ_MAX_VECS;
14247 tg3_rss_init_dflt_indir_tbl(tp);
14251 if (tg3_flag(tp, 5755_PLUS) ||
14252 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14253 tg3_flag_set(tp, SHORT_DMA_BUG);
14255 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14256 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14259 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14261 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14263 if (tg3_flag(tp, 57765_PLUS) &&
14264 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14265 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14267 if (!tg3_flag(tp, 5705_PLUS) ||
14268 tg3_flag(tp, 5780_CLASS) ||
14269 tg3_flag(tp, USE_JUMBO_BDFLAG))
14270 tg3_flag_set(tp, JUMBO_CAPABLE);
14272 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14275 if (pci_is_pcie(tp->pdev)) {
14278 tg3_flag_set(tp, PCI_EXPRESS);
14280 pci_read_config_word(tp->pdev,
14281 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14283 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14284 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14286 tg3_flag_clear(tp, HW_TSO_2);
14287 tg3_flag_clear(tp, TSO_CAPABLE);
14289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14291 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14292 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14293 tg3_flag_set(tp, CLKREQ_BUG);
14294 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14295 tg3_flag_set(tp, L1PLLPD_EN);
14297 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14298 /* BCM5785 devices are effectively PCIe devices, and should
14299 * follow PCIe codepaths, but do not have a PCIe capabilities
14302 tg3_flag_set(tp, PCI_EXPRESS);
14303 } else if (!tg3_flag(tp, 5705_PLUS) ||
14304 tg3_flag(tp, 5780_CLASS)) {
14305 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14306 if (!tp->pcix_cap) {
14307 dev_err(&tp->pdev->dev,
14308 "Cannot find PCI-X capability, aborting\n");
14312 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14313 tg3_flag_set(tp, PCIX_MODE);
14316 /* If we have an AMD 762 or VIA K8T800 chipset, write
14317 * reordering to the mailbox registers done by the host
14318 * controller can cause major troubles. We read back from
14319 * every mailbox register write to force the writes to be
14320 * posted to the chip in order.
14322 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14323 !tg3_flag(tp, PCI_EXPRESS))
14324 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14326 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14327 &tp->pci_cacheline_sz);
14328 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14329 &tp->pci_lat_timer);
14330 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14331 tp->pci_lat_timer < 64) {
14332 tp->pci_lat_timer = 64;
14333 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14334 tp->pci_lat_timer);
14337 /* Important! -- It is critical that the PCI-X hw workaround
14338 * situation is decided before the first MMIO register access.
14340 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14341 /* 5700 BX chips need to have their TX producer index
14342 * mailboxes written twice to workaround a bug.
14344 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14346 /* If we are in PCI-X mode, enable register write workaround.
14348 * The workaround is to use indirect register accesses
14349 * for all chip writes not to mailbox registers.
14351 if (tg3_flag(tp, PCIX_MODE)) {
14354 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14356 /* The chip can have it's power management PCI config
14357 * space registers clobbered due to this bug.
14358 * So explicitly force the chip into D0 here.
14360 pci_read_config_dword(tp->pdev,
14361 tp->pm_cap + PCI_PM_CTRL,
14363 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14364 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14365 pci_write_config_dword(tp->pdev,
14366 tp->pm_cap + PCI_PM_CTRL,
14369 /* Also, force SERR#/PERR# in PCI command. */
14370 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14371 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14372 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14376 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14377 tg3_flag_set(tp, PCI_HIGH_SPEED);
14378 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14379 tg3_flag_set(tp, PCI_32BIT);
14381 /* Chip-specific fixup from Broadcom driver */
14382 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14383 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14384 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14385 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14388 /* Default fast path register access methods */
14389 tp->read32 = tg3_read32;
14390 tp->write32 = tg3_write32;
14391 tp->read32_mbox = tg3_read32;
14392 tp->write32_mbox = tg3_write32;
14393 tp->write32_tx_mbox = tg3_write32;
14394 tp->write32_rx_mbox = tg3_write32;
14396 /* Various workaround register access methods */
14397 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14398 tp->write32 = tg3_write_indirect_reg32;
14399 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14400 (tg3_flag(tp, PCI_EXPRESS) &&
14401 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14403 * Back to back register writes can cause problems on these
14404 * chips, the workaround is to read back all reg writes
14405 * except those to mailbox regs.
14407 * See tg3_write_indirect_reg32().
14409 tp->write32 = tg3_write_flush_reg32;
14412 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14413 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14414 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14415 tp->write32_rx_mbox = tg3_write_flush_reg32;
14418 if (tg3_flag(tp, ICH_WORKAROUND)) {
14419 tp->read32 = tg3_read_indirect_reg32;
14420 tp->write32 = tg3_write_indirect_reg32;
14421 tp->read32_mbox = tg3_read_indirect_mbox;
14422 tp->write32_mbox = tg3_write_indirect_mbox;
14423 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14424 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14429 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14430 pci_cmd &= ~PCI_COMMAND_MEMORY;
14431 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14433 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14434 tp->read32_mbox = tg3_read32_mbox_5906;
14435 tp->write32_mbox = tg3_write32_mbox_5906;
14436 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14437 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14440 if (tp->write32 == tg3_write_indirect_reg32 ||
14441 (tg3_flag(tp, PCIX_MODE) &&
14442 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14444 tg3_flag_set(tp, SRAM_USE_CONFIG);
14446 /* The memory arbiter has to be enabled in order for SRAM accesses
14447 * to succeed. Normally on powerup the tg3 chip firmware will make
14448 * sure it is enabled, but other entities such as system netboot
14449 * code might disable it.
14451 val = tr32(MEMARB_MODE);
14452 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14454 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14455 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14456 tg3_flag(tp, 5780_CLASS)) {
14457 if (tg3_flag(tp, PCIX_MODE)) {
14458 pci_read_config_dword(tp->pdev,
14459 tp->pcix_cap + PCI_X_STATUS,
14461 tp->pci_fn = val & 0x7;
14463 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14464 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14465 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14466 NIC_SRAM_CPMUSTAT_SIG) {
14467 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14468 tp->pci_fn = tp->pci_fn ? 1 : 0;
14470 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14472 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14473 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14474 NIC_SRAM_CPMUSTAT_SIG) {
14475 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14476 TG3_CPMU_STATUS_FSHFT_5719;
14480 /* Get eeprom hw config before calling tg3_set_power_state().
14481 * In particular, the TG3_FLAG_IS_NIC flag must be
14482 * determined before calling tg3_set_power_state() so that
14483 * we know whether or not to switch out of Vaux power.
14484 * When the flag is set, it means that GPIO1 is used for eeprom
14485 * write protect and also implies that it is a LOM where GPIOs
14486 * are not used to switch power.
14488 tg3_get_eeprom_hw_cfg(tp);
14490 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14491 tg3_flag_clear(tp, TSO_CAPABLE);
14492 tg3_flag_clear(tp, TSO_BUG);
14493 tp->fw_needed = NULL;
14496 if (tg3_flag(tp, ENABLE_APE)) {
14497 /* Allow reads and writes to the
14498 * APE register and memory space.
14500 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14501 PCISTATE_ALLOW_APE_SHMEM_WR |
14502 PCISTATE_ALLOW_APE_PSPACE_WR;
14503 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14506 tg3_ape_lock_init(tp);
14509 /* Set up tp->grc_local_ctrl before calling
14510 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14511 * will bring 5700's external PHY out of reset.
14512 * It is also used as eeprom write protect on LOMs.
14514 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14516 tg3_flag(tp, EEPROM_WRITE_PROT))
14517 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14518 GRC_LCLCTRL_GPIO_OUTPUT1);
14519 /* Unused GPIO3 must be driven as output on 5752 because there
14520 * are no pull-up resistors on unused GPIO pins.
14522 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14523 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14527 tg3_flag(tp, 57765_CLASS))
14528 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14530 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14531 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14532 /* Turn off the debug UART. */
14533 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14534 if (tg3_flag(tp, IS_NIC))
14535 /* Keep VMain power. */
14536 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14537 GRC_LCLCTRL_GPIO_OUTPUT0;
14540 /* Switch out of Vaux if it is a NIC */
14541 tg3_pwrsrc_switch_to_vmain(tp);
14543 /* Derive initial jumbo mode from MTU assigned in
14544 * ether_setup() via the alloc_etherdev() call
14546 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14547 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14549 /* Determine WakeOnLan speed to use. */
14550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14551 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14552 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14553 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14554 tg3_flag_clear(tp, WOL_SPEED_100MB);
14556 tg3_flag_set(tp, WOL_SPEED_100MB);
14559 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14560 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14562 /* A few boards don't want Ethernet@WireSpeed phy feature */
14563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14564 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14565 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14566 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14567 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14568 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14569 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14571 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14572 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14573 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14574 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14575 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14577 if (tg3_flag(tp, 5705_PLUS) &&
14578 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14579 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14580 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14581 !tg3_flag(tp, 57765_PLUS)) {
14582 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14583 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14584 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14585 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14586 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14587 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14588 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14589 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14590 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14592 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14595 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14596 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14597 tp->phy_otp = tg3_read_otp_phycfg(tp);
14598 if (tp->phy_otp == 0)
14599 tp->phy_otp = TG3_OTP_DEFAULT;
14602 if (tg3_flag(tp, CPMU_PRESENT))
14603 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14605 tp->mi_mode = MAC_MI_MODE_BASE;
14607 tp->coalesce_mode = 0;
14608 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14609 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14610 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14612 /* Set these bits to enable statistics workaround. */
14613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14614 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14615 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14616 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14617 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14621 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14622 tg3_flag_set(tp, USE_PHYLIB);
14624 err = tg3_mdio_init(tp);
14628 /* Initialize data/descriptor byte/word swapping. */
14629 val = tr32(GRC_MODE);
14630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14631 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14632 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14633 GRC_MODE_B2HRX_ENABLE |
14634 GRC_MODE_HTX2B_ENABLE |
14635 GRC_MODE_HOST_STACKUP);
14637 val &= GRC_MODE_HOST_STACKUP;
14639 tw32(GRC_MODE, val | tp->grc_mode);
14641 tg3_switch_clocks(tp);
14643 /* Clear this out for sanity. */
14644 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14646 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14648 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14649 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14650 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14652 if (chiprevid == CHIPREV_ID_5701_A0 ||
14653 chiprevid == CHIPREV_ID_5701_B0 ||
14654 chiprevid == CHIPREV_ID_5701_B2 ||
14655 chiprevid == CHIPREV_ID_5701_B5) {
14656 void __iomem *sram_base;
14658 /* Write some dummy words into the SRAM status block
14659 * area, see if it reads back correctly. If the return
14660 * value is bad, force enable the PCIX workaround.
14662 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14664 writel(0x00000000, sram_base);
14665 writel(0x00000000, sram_base + 4);
14666 writel(0xffffffff, sram_base + 4);
14667 if (readl(sram_base) != 0x00000000)
14668 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14673 tg3_nvram_init(tp);
14675 grc_misc_cfg = tr32(GRC_MISC_CFG);
14676 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14679 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14680 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14681 tg3_flag_set(tp, IS_5788);
14683 if (!tg3_flag(tp, IS_5788) &&
14684 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14685 tg3_flag_set(tp, TAGGED_STATUS);
14686 if (tg3_flag(tp, TAGGED_STATUS)) {
14687 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14688 HOSTCC_MODE_CLRTICK_TXBD);
14690 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14691 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14692 tp->misc_host_ctrl);
14695 /* Preserve the APE MAC_MODE bits */
14696 if (tg3_flag(tp, ENABLE_APE))
14697 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14701 /* these are limited to 10/100 only */
14702 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14703 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14704 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14705 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14706 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14707 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14708 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14709 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14710 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14711 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14712 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14713 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14714 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14715 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14716 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14717 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14719 err = tg3_phy_probe(tp);
14721 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14722 /* ... but do not return immediately ... */
14727 tg3_read_fw_ver(tp);
14729 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14730 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14733 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14735 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14738 /* 5700 {AX,BX} chips have a broken status block link
14739 * change bit implementation, so we must use the
14740 * status register in those cases.
14742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14743 tg3_flag_set(tp, USE_LINKCHG_REG);
14745 tg3_flag_clear(tp, USE_LINKCHG_REG);
14747 /* The led_ctrl is set during tg3_phy_probe, here we might
14748 * have to force the link status polling mechanism based
14749 * upon subsystem IDs.
14751 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14752 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14753 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14754 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14755 tg3_flag_set(tp, USE_LINKCHG_REG);
14758 /* For all SERDES we poll the MAC status register. */
14759 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14760 tg3_flag_set(tp, POLL_SERDES);
14762 tg3_flag_clear(tp, POLL_SERDES);
14764 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14765 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14767 tg3_flag(tp, PCIX_MODE)) {
14768 tp->rx_offset = NET_SKB_PAD;
14769 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14770 tp->rx_copy_thresh = ~(u16)0;
14774 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14775 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14776 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14778 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14780 /* Increment the rx prod index on the rx std ring by at most
14781 * 8 for these chips to workaround hw errata.
14783 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14785 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14786 tp->rx_std_max_post = 8;
14788 if (tg3_flag(tp, ASPM_WORKAROUND))
14789 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14790 PCIE_PWR_MGMT_L1_THRESH_MSK;
14795 #ifdef CONFIG_SPARC
14796 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14798 struct net_device *dev = tp->dev;
14799 struct pci_dev *pdev = tp->pdev;
14800 struct device_node *dp = pci_device_to_OF_node(pdev);
14801 const unsigned char *addr;
14804 addr = of_get_property(dp, "local-mac-address", &len);
14805 if (addr && len == 6) {
14806 memcpy(dev->dev_addr, addr, 6);
14807 memcpy(dev->perm_addr, dev->dev_addr, 6);
14813 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14815 struct net_device *dev = tp->dev;
14817 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14818 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14823 static int __devinit tg3_get_device_address(struct tg3 *tp)
14825 struct net_device *dev = tp->dev;
14826 u32 hi, lo, mac_offset;
14829 #ifdef CONFIG_SPARC
14830 if (!tg3_get_macaddr_sparc(tp))
14835 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14836 tg3_flag(tp, 5780_CLASS)) {
14837 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14839 if (tg3_nvram_lock(tp))
14840 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14842 tg3_nvram_unlock(tp);
14843 } else if (tg3_flag(tp, 5717_PLUS)) {
14844 if (tp->pci_fn & 1)
14846 if (tp->pci_fn > 1)
14847 mac_offset += 0x18c;
14848 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14851 /* First try to get it from MAC address mailbox. */
14852 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14853 if ((hi >> 16) == 0x484b) {
14854 dev->dev_addr[0] = (hi >> 8) & 0xff;
14855 dev->dev_addr[1] = (hi >> 0) & 0xff;
14857 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14858 dev->dev_addr[2] = (lo >> 24) & 0xff;
14859 dev->dev_addr[3] = (lo >> 16) & 0xff;
14860 dev->dev_addr[4] = (lo >> 8) & 0xff;
14861 dev->dev_addr[5] = (lo >> 0) & 0xff;
14863 /* Some old bootcode may report a 0 MAC address in SRAM */
14864 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14867 /* Next, try NVRAM. */
14868 if (!tg3_flag(tp, NO_NVRAM) &&
14869 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14870 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14871 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14872 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14874 /* Finally just fetch it out of the MAC control regs. */
14876 hi = tr32(MAC_ADDR_0_HIGH);
14877 lo = tr32(MAC_ADDR_0_LOW);
14879 dev->dev_addr[5] = lo & 0xff;
14880 dev->dev_addr[4] = (lo >> 8) & 0xff;
14881 dev->dev_addr[3] = (lo >> 16) & 0xff;
14882 dev->dev_addr[2] = (lo >> 24) & 0xff;
14883 dev->dev_addr[1] = hi & 0xff;
14884 dev->dev_addr[0] = (hi >> 8) & 0xff;
14888 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14889 #ifdef CONFIG_SPARC
14890 if (!tg3_get_default_macaddr_sparc(tp))
14895 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14899 #define BOUNDARY_SINGLE_CACHELINE 1
14900 #define BOUNDARY_MULTI_CACHELINE 2
14902 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14904 int cacheline_size;
14908 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14910 cacheline_size = 1024;
14912 cacheline_size = (int) byte * 4;
14914 /* On 5703 and later chips, the boundary bits have no
14917 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14918 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14919 !tg3_flag(tp, PCI_EXPRESS))
14922 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14923 goal = BOUNDARY_MULTI_CACHELINE;
14925 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14926 goal = BOUNDARY_SINGLE_CACHELINE;
14932 if (tg3_flag(tp, 57765_PLUS)) {
14933 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14940 /* PCI controllers on most RISC systems tend to disconnect
14941 * when a device tries to burst across a cache-line boundary.
14942 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14944 * Unfortunately, for PCI-E there are only limited
14945 * write-side controls for this, and thus for reads
14946 * we will still get the disconnects. We'll also waste
14947 * these PCI cycles for both read and write for chips
14948 * other than 5700 and 5701 which do not implement the
14951 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14952 switch (cacheline_size) {
14957 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14958 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14959 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14961 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14962 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14967 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14968 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14972 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14973 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14976 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14977 switch (cacheline_size) {
14981 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14982 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14983 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14989 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14990 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14994 switch (cacheline_size) {
14996 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14997 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14998 DMA_RWCTRL_WRITE_BNDRY_16);
15003 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15004 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15005 DMA_RWCTRL_WRITE_BNDRY_32);
15010 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15011 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15012 DMA_RWCTRL_WRITE_BNDRY_64);
15017 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15018 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15019 DMA_RWCTRL_WRITE_BNDRY_128);
15024 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15025 DMA_RWCTRL_WRITE_BNDRY_256);
15028 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15029 DMA_RWCTRL_WRITE_BNDRY_512);
15033 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15034 DMA_RWCTRL_WRITE_BNDRY_1024);
15043 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15045 struct tg3_internal_buffer_desc test_desc;
15046 u32 sram_dma_descs;
15049 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15051 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15052 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15053 tw32(RDMAC_STATUS, 0);
15054 tw32(WDMAC_STATUS, 0);
15056 tw32(BUFMGR_MODE, 0);
15057 tw32(FTQ_RESET, 0);
15059 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15060 test_desc.addr_lo = buf_dma & 0xffffffff;
15061 test_desc.nic_mbuf = 0x00002100;
15062 test_desc.len = size;
15065 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15066 * the *second* time the tg3 driver was getting loaded after an
15069 * Broadcom tells me:
15070 * ...the DMA engine is connected to the GRC block and a DMA
15071 * reset may affect the GRC block in some unpredictable way...
15072 * The behavior of resets to individual blocks has not been tested.
15074 * Broadcom noted the GRC reset will also reset all sub-components.
15077 test_desc.cqid_sqid = (13 << 8) | 2;
15079 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15082 test_desc.cqid_sqid = (16 << 8) | 7;
15084 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15087 test_desc.flags = 0x00000005;
15089 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15092 val = *(((u32 *)&test_desc) + i);
15093 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15094 sram_dma_descs + (i * sizeof(u32)));
15095 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15097 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15100 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15102 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15105 for (i = 0; i < 40; i++) {
15109 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15111 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15112 if ((val & 0xffff) == sram_dma_descs) {
15123 #define TEST_BUFFER_SIZE 0x2000
15125 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15126 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15130 static int __devinit tg3_test_dma(struct tg3 *tp)
15132 dma_addr_t buf_dma;
15133 u32 *buf, saved_dma_rwctrl;
15136 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15137 &buf_dma, GFP_KERNEL);
15143 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15144 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15146 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15148 if (tg3_flag(tp, 57765_PLUS))
15151 if (tg3_flag(tp, PCI_EXPRESS)) {
15152 /* DMA read watermark not used on PCIE */
15153 tp->dma_rwctrl |= 0x00180000;
15154 } else if (!tg3_flag(tp, PCIX_MODE)) {
15155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15156 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15157 tp->dma_rwctrl |= 0x003f0000;
15159 tp->dma_rwctrl |= 0x003f000f;
15161 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15162 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15163 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15164 u32 read_water = 0x7;
15166 /* If the 5704 is behind the EPB bridge, we can
15167 * do the less restrictive ONE_DMA workaround for
15168 * better performance.
15170 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15172 tp->dma_rwctrl |= 0x8000;
15173 else if (ccval == 0x6 || ccval == 0x7)
15174 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15176 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15178 /* Set bit 23 to enable PCIX hw bug fix */
15180 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15181 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15183 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15184 /* 5780 always in PCIX mode */
15185 tp->dma_rwctrl |= 0x00144000;
15186 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15187 /* 5714 always in PCIX mode */
15188 tp->dma_rwctrl |= 0x00148000;
15190 tp->dma_rwctrl |= 0x001b000f;
15194 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15195 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15196 tp->dma_rwctrl &= 0xfffffff0;
15198 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15200 /* Remove this if it causes problems for some boards. */
15201 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15203 /* On 5700/5701 chips, we need to set this bit.
15204 * Otherwise the chip will issue cacheline transactions
15205 * to streamable DMA memory with not all the byte
15206 * enables turned on. This is an error on several
15207 * RISC PCI controllers, in particular sparc64.
15209 * On 5703/5704 chips, this bit has been reassigned
15210 * a different meaning. In particular, it is used
15211 * on those chips to enable a PCI-X workaround.
15213 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15216 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15219 /* Unneeded, already done by tg3_get_invariants. */
15220 tg3_switch_clocks(tp);
15223 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15224 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15227 /* It is best to perform DMA test with maximum write burst size
15228 * to expose the 5700/5701 write DMA bug.
15230 saved_dma_rwctrl = tp->dma_rwctrl;
15231 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15232 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15237 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15240 /* Send the buffer to the chip. */
15241 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15243 dev_err(&tp->pdev->dev,
15244 "%s: Buffer write failed. err = %d\n",
15250 /* validate data reached card RAM correctly. */
15251 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15253 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15254 if (le32_to_cpu(val) != p[i]) {
15255 dev_err(&tp->pdev->dev,
15256 "%s: Buffer corrupted on device! "
15257 "(%d != %d)\n", __func__, val, i);
15258 /* ret = -ENODEV here? */
15263 /* Now read it back. */
15264 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15266 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15267 "err = %d\n", __func__, ret);
15272 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15276 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15277 DMA_RWCTRL_WRITE_BNDRY_16) {
15278 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15279 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15280 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15283 dev_err(&tp->pdev->dev,
15284 "%s: Buffer corrupted on read back! "
15285 "(%d != %d)\n", __func__, p[i], i);
15291 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15297 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15298 DMA_RWCTRL_WRITE_BNDRY_16) {
15299 /* DMA test passed without adjusting DMA boundary,
15300 * now look for chipsets that are known to expose the
15301 * DMA bug without failing the test.
15303 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15304 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15305 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15307 /* Safe to use the calculated DMA boundary. */
15308 tp->dma_rwctrl = saved_dma_rwctrl;
15311 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15315 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15320 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15322 if (tg3_flag(tp, 57765_PLUS)) {
15323 tp->bufmgr_config.mbuf_read_dma_low_water =
15324 DEFAULT_MB_RDMA_LOW_WATER_5705;
15325 tp->bufmgr_config.mbuf_mac_rx_low_water =
15326 DEFAULT_MB_MACRX_LOW_WATER_57765;
15327 tp->bufmgr_config.mbuf_high_water =
15328 DEFAULT_MB_HIGH_WATER_57765;
15330 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15331 DEFAULT_MB_RDMA_LOW_WATER_5705;
15332 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15333 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15334 tp->bufmgr_config.mbuf_high_water_jumbo =
15335 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15336 } else if (tg3_flag(tp, 5705_PLUS)) {
15337 tp->bufmgr_config.mbuf_read_dma_low_water =
15338 DEFAULT_MB_RDMA_LOW_WATER_5705;
15339 tp->bufmgr_config.mbuf_mac_rx_low_water =
15340 DEFAULT_MB_MACRX_LOW_WATER_5705;
15341 tp->bufmgr_config.mbuf_high_water =
15342 DEFAULT_MB_HIGH_WATER_5705;
15343 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15344 tp->bufmgr_config.mbuf_mac_rx_low_water =
15345 DEFAULT_MB_MACRX_LOW_WATER_5906;
15346 tp->bufmgr_config.mbuf_high_water =
15347 DEFAULT_MB_HIGH_WATER_5906;
15350 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15351 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15352 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15353 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15354 tp->bufmgr_config.mbuf_high_water_jumbo =
15355 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15357 tp->bufmgr_config.mbuf_read_dma_low_water =
15358 DEFAULT_MB_RDMA_LOW_WATER;
15359 tp->bufmgr_config.mbuf_mac_rx_low_water =
15360 DEFAULT_MB_MACRX_LOW_WATER;
15361 tp->bufmgr_config.mbuf_high_water =
15362 DEFAULT_MB_HIGH_WATER;
15364 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15365 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15366 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15367 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15368 tp->bufmgr_config.mbuf_high_water_jumbo =
15369 DEFAULT_MB_HIGH_WATER_JUMBO;
15372 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15373 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15376 static char * __devinit tg3_phy_string(struct tg3 *tp)
15378 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15379 case TG3_PHY_ID_BCM5400: return "5400";
15380 case TG3_PHY_ID_BCM5401: return "5401";
15381 case TG3_PHY_ID_BCM5411: return "5411";
15382 case TG3_PHY_ID_BCM5701: return "5701";
15383 case TG3_PHY_ID_BCM5703: return "5703";
15384 case TG3_PHY_ID_BCM5704: return "5704";
15385 case TG3_PHY_ID_BCM5705: return "5705";
15386 case TG3_PHY_ID_BCM5750: return "5750";
15387 case TG3_PHY_ID_BCM5752: return "5752";
15388 case TG3_PHY_ID_BCM5714: return "5714";
15389 case TG3_PHY_ID_BCM5780: return "5780";
15390 case TG3_PHY_ID_BCM5755: return "5755";
15391 case TG3_PHY_ID_BCM5787: return "5787";
15392 case TG3_PHY_ID_BCM5784: return "5784";
15393 case TG3_PHY_ID_BCM5756: return "5722/5756";
15394 case TG3_PHY_ID_BCM5906: return "5906";
15395 case TG3_PHY_ID_BCM5761: return "5761";
15396 case TG3_PHY_ID_BCM5718C: return "5718C";
15397 case TG3_PHY_ID_BCM5718S: return "5718S";
15398 case TG3_PHY_ID_BCM57765: return "57765";
15399 case TG3_PHY_ID_BCM5719C: return "5719C";
15400 case TG3_PHY_ID_BCM5720C: return "5720C";
15401 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15402 case 0: return "serdes";
15403 default: return "unknown";
15407 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15409 if (tg3_flag(tp, PCI_EXPRESS)) {
15410 strcpy(str, "PCI Express");
15412 } else if (tg3_flag(tp, PCIX_MODE)) {
15413 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15415 strcpy(str, "PCIX:");
15417 if ((clock_ctrl == 7) ||
15418 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15419 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15420 strcat(str, "133MHz");
15421 else if (clock_ctrl == 0)
15422 strcat(str, "33MHz");
15423 else if (clock_ctrl == 2)
15424 strcat(str, "50MHz");
15425 else if (clock_ctrl == 4)
15426 strcat(str, "66MHz");
15427 else if (clock_ctrl == 6)
15428 strcat(str, "100MHz");
15430 strcpy(str, "PCI:");
15431 if (tg3_flag(tp, PCI_HIGH_SPEED))
15432 strcat(str, "66MHz");
15434 strcat(str, "33MHz");
15436 if (tg3_flag(tp, PCI_32BIT))
15437 strcat(str, ":32-bit");
15439 strcat(str, ":64-bit");
15443 static void __devinit tg3_init_coal(struct tg3 *tp)
15445 struct ethtool_coalesce *ec = &tp->coal;
15447 memset(ec, 0, sizeof(*ec));
15448 ec->cmd = ETHTOOL_GCOALESCE;
15449 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15450 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15451 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15452 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15453 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15454 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15455 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15456 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15457 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15459 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15460 HOSTCC_MODE_CLRTICK_TXBD)) {
15461 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15462 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15463 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15464 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15467 if (tg3_flag(tp, 5705_PLUS)) {
15468 ec->rx_coalesce_usecs_irq = 0;
15469 ec->tx_coalesce_usecs_irq = 0;
15470 ec->stats_block_coalesce_usecs = 0;
15474 static int __devinit tg3_init_one(struct pci_dev *pdev,
15475 const struct pci_device_id *ent)
15477 struct net_device *dev;
15479 int i, err, pm_cap;
15480 u32 sndmbx, rcvmbx, intmbx;
15482 u64 dma_mask, persist_dma_mask;
15483 netdev_features_t features = 0;
15485 printk_once(KERN_INFO "%s\n", version);
15487 err = pci_enable_device(pdev);
15489 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15493 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15495 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15496 goto err_out_disable_pdev;
15499 pci_set_master(pdev);
15501 /* Find power-management capability. */
15502 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15504 dev_err(&pdev->dev,
15505 "Cannot find Power Management capability, aborting\n");
15507 goto err_out_free_res;
15510 err = pci_set_power_state(pdev, PCI_D0);
15512 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15513 goto err_out_free_res;
15516 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15519 goto err_out_power_down;
15522 SET_NETDEV_DEV(dev, &pdev->dev);
15524 tp = netdev_priv(dev);
15527 tp->pm_cap = pm_cap;
15528 tp->rx_mode = TG3_DEF_RX_MODE;
15529 tp->tx_mode = TG3_DEF_TX_MODE;
15532 tp->msg_enable = tg3_debug;
15534 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15536 /* The word/byte swap controls here control register access byte
15537 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15540 tp->misc_host_ctrl =
15541 MISC_HOST_CTRL_MASK_PCI_INT |
15542 MISC_HOST_CTRL_WORD_SWAP |
15543 MISC_HOST_CTRL_INDIR_ACCESS |
15544 MISC_HOST_CTRL_PCISTATE_RW;
15546 /* The NONFRM (non-frame) byte/word swap controls take effect
15547 * on descriptor entries, anything which isn't packet data.
15549 * The StrongARM chips on the board (one for tx, one for rx)
15550 * are running in big-endian mode.
15552 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15553 GRC_MODE_WSWAP_NONFRM_DATA);
15554 #ifdef __BIG_ENDIAN
15555 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15557 spin_lock_init(&tp->lock);
15558 spin_lock_init(&tp->indirect_lock);
15559 INIT_WORK(&tp->reset_task, tg3_reset_task);
15561 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15563 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15565 goto err_out_free_dev;
15568 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15569 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15570 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15571 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15572 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15573 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15574 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15575 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15576 tg3_flag_set(tp, ENABLE_APE);
15577 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15578 if (!tp->aperegs) {
15579 dev_err(&pdev->dev,
15580 "Cannot map APE registers, aborting\n");
15582 goto err_out_iounmap;
15586 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15587 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15589 dev->ethtool_ops = &tg3_ethtool_ops;
15590 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15591 dev->netdev_ops = &tg3_netdev_ops;
15592 dev->irq = pdev->irq;
15594 err = tg3_get_invariants(tp);
15596 dev_err(&pdev->dev,
15597 "Problem fetching invariants of chip, aborting\n");
15598 goto err_out_apeunmap;
15601 /* The EPB bridge inside 5714, 5715, and 5780 and any
15602 * device behind the EPB cannot support DMA addresses > 40-bit.
15603 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15604 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15605 * do DMA address check in tg3_start_xmit().
15607 if (tg3_flag(tp, IS_5788))
15608 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15609 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15610 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15611 #ifdef CONFIG_HIGHMEM
15612 dma_mask = DMA_BIT_MASK(64);
15615 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15617 /* Configure DMA attributes. */
15618 if (dma_mask > DMA_BIT_MASK(32)) {
15619 err = pci_set_dma_mask(pdev, dma_mask);
15621 features |= NETIF_F_HIGHDMA;
15622 err = pci_set_consistent_dma_mask(pdev,
15625 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15626 "DMA for consistent allocations\n");
15627 goto err_out_apeunmap;
15631 if (err || dma_mask == DMA_BIT_MASK(32)) {
15632 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15634 dev_err(&pdev->dev,
15635 "No usable DMA configuration, aborting\n");
15636 goto err_out_apeunmap;
15640 tg3_init_bufmgr_config(tp);
15642 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15644 /* 5700 B0 chips do not support checksumming correctly due
15645 * to hardware bugs.
15647 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15648 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15650 if (tg3_flag(tp, 5755_PLUS))
15651 features |= NETIF_F_IPV6_CSUM;
15654 /* TSO is on by default on chips that support hardware TSO.
15655 * Firmware TSO on older chips gives lower performance, so it
15656 * is off by default, but can be enabled using ethtool.
15658 if ((tg3_flag(tp, HW_TSO_1) ||
15659 tg3_flag(tp, HW_TSO_2) ||
15660 tg3_flag(tp, HW_TSO_3)) &&
15661 (features & NETIF_F_IP_CSUM))
15662 features |= NETIF_F_TSO;
15663 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15664 if (features & NETIF_F_IPV6_CSUM)
15665 features |= NETIF_F_TSO6;
15666 if (tg3_flag(tp, HW_TSO_3) ||
15667 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15668 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15669 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15670 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15671 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15672 features |= NETIF_F_TSO_ECN;
15675 dev->features |= features;
15676 dev->vlan_features |= features;
15679 * Add loopback capability only for a subset of devices that support
15680 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15681 * loopback for the remaining devices.
15683 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15684 !tg3_flag(tp, CPMU_PRESENT))
15685 /* Add the loopback capability */
15686 features |= NETIF_F_LOOPBACK;
15688 dev->hw_features |= features;
15690 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15691 !tg3_flag(tp, TSO_CAPABLE) &&
15692 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15693 tg3_flag_set(tp, MAX_RXPEND_64);
15694 tp->rx_pending = 63;
15697 err = tg3_get_device_address(tp);
15699 dev_err(&pdev->dev,
15700 "Could not obtain valid ethernet address, aborting\n");
15701 goto err_out_apeunmap;
15705 * Reset chip in case UNDI or EFI driver did not shutdown
15706 * DMA self test will enable WDMAC and we'll see (spurious)
15707 * pending DMA on the PCI bus at that point.
15709 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15710 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15711 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15712 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15715 err = tg3_test_dma(tp);
15717 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15718 goto err_out_apeunmap;
15721 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15722 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15723 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15724 for (i = 0; i < tp->irq_max; i++) {
15725 struct tg3_napi *tnapi = &tp->napi[i];
15728 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15730 tnapi->int_mbox = intmbx;
15736 tnapi->consmbox = rcvmbx;
15737 tnapi->prodmbox = sndmbx;
15740 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15742 tnapi->coal_now = HOSTCC_MODE_NOW;
15744 if (!tg3_flag(tp, SUPPORT_MSIX))
15748 * If we support MSIX, we'll be using RSS. If we're using
15749 * RSS, the first vector only handles link interrupts and the
15750 * remaining vectors handle rx and tx interrupts. Reuse the
15751 * mailbox values for the next iteration. The values we setup
15752 * above are still useful for the single vectored mode.
15767 pci_set_drvdata(pdev, dev);
15769 if (tg3_flag(tp, 5717_PLUS)) {
15770 /* Resume a low-power mode */
15771 tg3_frob_aux_power(tp, false);
15774 tg3_timer_init(tp);
15776 err = register_netdev(dev);
15778 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15779 goto err_out_apeunmap;
15782 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15783 tp->board_part_number,
15784 tp->pci_chip_rev_id,
15785 tg3_bus_string(tp, str),
15788 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15789 struct phy_device *phydev;
15790 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15792 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15793 phydev->drv->name, dev_name(&phydev->dev));
15797 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15798 ethtype = "10/100Base-TX";
15799 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15800 ethtype = "1000Base-SX";
15802 ethtype = "10/100/1000Base-T";
15804 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15805 "(WireSpeed[%d], EEE[%d])\n",
15806 tg3_phy_string(tp), ethtype,
15807 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15808 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15811 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15812 (dev->features & NETIF_F_RXCSUM) != 0,
15813 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15814 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15815 tg3_flag(tp, ENABLE_ASF) != 0,
15816 tg3_flag(tp, TSO_CAPABLE) != 0);
15817 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15819 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15820 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15822 pci_save_state(pdev);
15828 iounmap(tp->aperegs);
15829 tp->aperegs = NULL;
15841 err_out_power_down:
15842 pci_set_power_state(pdev, PCI_D3hot);
15845 pci_release_regions(pdev);
15847 err_out_disable_pdev:
15848 pci_disable_device(pdev);
15849 pci_set_drvdata(pdev, NULL);
15853 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15855 struct net_device *dev = pci_get_drvdata(pdev);
15858 struct tg3 *tp = netdev_priv(dev);
15861 release_firmware(tp->fw);
15863 tg3_reset_task_cancel(tp);
15865 if (tg3_flag(tp, USE_PHYLIB)) {
15870 unregister_netdev(dev);
15872 iounmap(tp->aperegs);
15873 tp->aperegs = NULL;
15880 pci_release_regions(pdev);
15881 pci_disable_device(pdev);
15882 pci_set_drvdata(pdev, NULL);
15886 #ifdef CONFIG_PM_SLEEP
15887 static int tg3_suspend(struct device *device)
15889 struct pci_dev *pdev = to_pci_dev(device);
15890 struct net_device *dev = pci_get_drvdata(pdev);
15891 struct tg3 *tp = netdev_priv(dev);
15894 if (!netif_running(dev))
15897 tg3_reset_task_cancel(tp);
15899 tg3_netif_stop(tp);
15901 tg3_timer_stop(tp);
15903 tg3_full_lock(tp, 1);
15904 tg3_disable_ints(tp);
15905 tg3_full_unlock(tp);
15907 netif_device_detach(dev);
15909 tg3_full_lock(tp, 0);
15910 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15911 tg3_flag_clear(tp, INIT_COMPLETE);
15912 tg3_full_unlock(tp);
15914 err = tg3_power_down_prepare(tp);
15918 tg3_full_lock(tp, 0);
15920 tg3_flag_set(tp, INIT_COMPLETE);
15921 err2 = tg3_restart_hw(tp, 1);
15925 tg3_timer_start(tp);
15927 netif_device_attach(dev);
15928 tg3_netif_start(tp);
15931 tg3_full_unlock(tp);
15940 static int tg3_resume(struct device *device)
15942 struct pci_dev *pdev = to_pci_dev(device);
15943 struct net_device *dev = pci_get_drvdata(pdev);
15944 struct tg3 *tp = netdev_priv(dev);
15947 if (!netif_running(dev))
15950 netif_device_attach(dev);
15952 tg3_full_lock(tp, 0);
15954 tg3_flag_set(tp, INIT_COMPLETE);
15955 err = tg3_restart_hw(tp, 1);
15959 tg3_timer_start(tp);
15961 tg3_netif_start(tp);
15964 tg3_full_unlock(tp);
15972 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15973 #define TG3_PM_OPS (&tg3_pm_ops)
15977 #define TG3_PM_OPS NULL
15979 #endif /* CONFIG_PM_SLEEP */
15982 * tg3_io_error_detected - called when PCI error is detected
15983 * @pdev: Pointer to PCI device
15984 * @state: The current pci connection state
15986 * This function is called after a PCI bus error affecting
15987 * this device has been detected.
15989 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15990 pci_channel_state_t state)
15992 struct net_device *netdev = pci_get_drvdata(pdev);
15993 struct tg3 *tp = netdev_priv(netdev);
15994 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15996 netdev_info(netdev, "PCI I/O error detected\n");
16000 if (!netif_running(netdev))
16005 tg3_netif_stop(tp);
16007 tg3_timer_stop(tp);
16009 /* Want to make sure that the reset task doesn't run */
16010 tg3_reset_task_cancel(tp);
16012 netif_device_detach(netdev);
16014 /* Clean up software state, even if MMIO is blocked */
16015 tg3_full_lock(tp, 0);
16016 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16017 tg3_full_unlock(tp);
16020 if (state == pci_channel_io_perm_failure)
16021 err = PCI_ERS_RESULT_DISCONNECT;
16023 pci_disable_device(pdev);
16031 * tg3_io_slot_reset - called after the pci bus has been reset.
16032 * @pdev: Pointer to PCI device
16034 * Restart the card from scratch, as if from a cold-boot.
16035 * At this point, the card has exprienced a hard reset,
16036 * followed by fixups by BIOS, and has its config space
16037 * set up identically to what it was at cold boot.
16039 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16041 struct net_device *netdev = pci_get_drvdata(pdev);
16042 struct tg3 *tp = netdev_priv(netdev);
16043 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16048 if (pci_enable_device(pdev)) {
16049 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16053 pci_set_master(pdev);
16054 pci_restore_state(pdev);
16055 pci_save_state(pdev);
16057 if (!netif_running(netdev)) {
16058 rc = PCI_ERS_RESULT_RECOVERED;
16062 err = tg3_power_up(tp);
16066 rc = PCI_ERS_RESULT_RECOVERED;
16075 * tg3_io_resume - called when traffic can start flowing again.
16076 * @pdev: Pointer to PCI device
16078 * This callback is called when the error recovery driver tells
16079 * us that its OK to resume normal operation.
16081 static void tg3_io_resume(struct pci_dev *pdev)
16083 struct net_device *netdev = pci_get_drvdata(pdev);
16084 struct tg3 *tp = netdev_priv(netdev);
16089 if (!netif_running(netdev))
16092 tg3_full_lock(tp, 0);
16093 tg3_flag_set(tp, INIT_COMPLETE);
16094 err = tg3_restart_hw(tp, 1);
16095 tg3_full_unlock(tp);
16097 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16101 netif_device_attach(netdev);
16103 tg3_timer_start(tp);
16105 tg3_netif_start(tp);
16113 static struct pci_error_handlers tg3_err_handler = {
16114 .error_detected = tg3_io_error_detected,
16115 .slot_reset = tg3_io_slot_reset,
16116 .resume = tg3_io_resume
16119 static struct pci_driver tg3_driver = {
16120 .name = DRV_MODULE_NAME,
16121 .id_table = tg3_pci_tbl,
16122 .probe = tg3_init_one,
16123 .remove = __devexit_p(tg3_remove_one),
16124 .err_handler = &tg3_err_handler,
16125 .driver.pm = TG3_PM_OPS,
16128 static int __init tg3_init(void)
16130 return pci_register_driver(&tg3_driver);
16133 static void __exit tg3_cleanup(void)
16135 pci_unregister_driver(&tg3_driver);
16138 module_init(tg3_init);
16139 module_exit(tg3_cleanup);