2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
50 #include <asm/system.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
56 #include <asm/idprom.h>
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 return test_bit(flag, bits);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 clear_bit(flag, bits);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MIN_NUM 118
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "April 22, 2011"
96 #define TG3_DEF_MAC_MODE 0
97 #define TG3_DEF_RX_MODE 0
98 #define TG3_DEF_TX_MODE 0
99 #define TG3_DEF_MSG_ENABLE \
109 /* length of time before we decide the hardware is borked,
110 * and dev->tx_timeout() should be called to fix the problem
113 #define TG3_TX_TIMEOUT (5 * HZ)
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU 60
117 #define TG3_MAX_MTU(tp) \
118 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121 * You can't change the ring sizes, but you can change where you place
122 * them in the NIC onboard memory.
124 #define TG3_RX_STD_RING_SIZE(tp) \
125 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING 200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
132 #define TG3_RSS_INDIR_TBL_SIZE 128
134 /* Do not place this n-ring entries value into the tp struct itself,
135 * we really want to expose these constants to GCC so that modulo et
136 * al. operations are done with shifts and masks instead of with
137 * hw multiply/modulo instructions. Another solution would be to
138 * replace things like '% foo' with '& (foo - 1)'.
141 #define TG3_TX_RING_SIZE 512
142 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
144 #define TG3_RX_STD_RING_BYTES(tp) \
145 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
152 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154 #define TG3_DMA_BYTE_ENAB 64
156 #define TG3_RX_STD_DMA_SZ 1536
157 #define TG3_RX_JMB_DMA_SZ 9046
159 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
161 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171 * that are at least dword aligned when used in PCIX mode. The driver
172 * works around this bug by double copying the packet. This workaround
173 * is built into the normal double copy length check for efficiency.
175 * However, the double copy is only necessary on those architectures
176 * where unaligned memory accesses are inefficient. For those architectures
177 * where unaligned memory accesses incur little penalty, we can reintegrate
178 * the 5701 in the normal rx path. Doing so saves a device structure
179 * dereference by hardcoding the double copy threshold in place.
181 #define TG3_RX_COPY_THRESHOLD 256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
185 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
191 #define TG3_RAW_IP_ALIGN 2
193 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
195 #define FIRMWARE_TG3 "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
199 static char version[] __devinitdata =
200 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
210 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
300 static const struct {
301 const char string[ETH_GSTRING_LEN];
302 } ethtool_stats_keys[] = {
305 { "rx_ucast_packets" },
306 { "rx_mcast_packets" },
307 { "rx_bcast_packets" },
309 { "rx_align_errors" },
310 { "rx_xon_pause_rcvd" },
311 { "rx_xoff_pause_rcvd" },
312 { "rx_mac_ctrl_rcvd" },
313 { "rx_xoff_entered" },
314 { "rx_frame_too_long_errors" },
316 { "rx_undersize_packets" },
317 { "rx_in_length_errors" },
318 { "rx_out_length_errors" },
319 { "rx_64_or_less_octet_packets" },
320 { "rx_65_to_127_octet_packets" },
321 { "rx_128_to_255_octet_packets" },
322 { "rx_256_to_511_octet_packets" },
323 { "rx_512_to_1023_octet_packets" },
324 { "rx_1024_to_1522_octet_packets" },
325 { "rx_1523_to_2047_octet_packets" },
326 { "rx_2048_to_4095_octet_packets" },
327 { "rx_4096_to_8191_octet_packets" },
328 { "rx_8192_to_9022_octet_packets" },
335 { "tx_flow_control" },
337 { "tx_single_collisions" },
338 { "tx_mult_collisions" },
340 { "tx_excessive_collisions" },
341 { "tx_late_collisions" },
342 { "tx_collide_2times" },
343 { "tx_collide_3times" },
344 { "tx_collide_4times" },
345 { "tx_collide_5times" },
346 { "tx_collide_6times" },
347 { "tx_collide_7times" },
348 { "tx_collide_8times" },
349 { "tx_collide_9times" },
350 { "tx_collide_10times" },
351 { "tx_collide_11times" },
352 { "tx_collide_12times" },
353 { "tx_collide_13times" },
354 { "tx_collide_14times" },
355 { "tx_collide_15times" },
356 { "tx_ucast_packets" },
357 { "tx_mcast_packets" },
358 { "tx_bcast_packets" },
359 { "tx_carrier_sense_errors" },
363 { "dma_writeq_full" },
364 { "dma_write_prioq_full" },
367 { "mbuf_lwm_thresh_hit" },
369 { "rx_threshold_hit" },
371 { "dma_readq_full" },
372 { "dma_read_prioq_full" },
373 { "tx_comp_queue_full" },
375 { "ring_set_send_prod_index" },
376 { "ring_status_update" },
378 { "nic_avoided_irqs" },
379 { "nic_tx_threshold_hit" }
382 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
385 static const struct {
386 const char string[ETH_GSTRING_LEN];
387 } ethtool_test_keys[] = {
388 { "nvram test (online) " },
389 { "link test (online) " },
390 { "register test (offline)" },
391 { "memory test (offline)" },
392 { "loopback test (offline)" },
393 { "interrupt test (offline)" },
396 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
399 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
401 writel(val, tp->regs + off);
404 static u32 tg3_read32(struct tg3 *tp, u32 off)
406 return readl(tp->regs + off);
409 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
411 writel(val, tp->aperegs + off);
414 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
416 return readl(tp->aperegs + off);
419 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
423 spin_lock_irqsave(&tp->indirect_lock, flags);
424 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
425 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
426 spin_unlock_irqrestore(&tp->indirect_lock, flags);
429 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
431 writel(val, tp->regs + off);
432 readl(tp->regs + off);
435 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
451 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
452 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
453 TG3_64BIT_REG_LOW, val);
456 if (off == TG3_RX_STD_PROD_IDX_REG) {
457 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
458 TG3_64BIT_REG_LOW, val);
462 spin_lock_irqsave(&tp->indirect_lock, flags);
463 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
464 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
465 spin_unlock_irqrestore(&tp->indirect_lock, flags);
467 /* In indirect mode when disabling interrupts, we also need
468 * to clear the interrupt bit in the GRC local ctrl register.
470 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
472 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
473 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
477 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
482 spin_lock_irqsave(&tp->indirect_lock, flags);
483 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
484 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
485 spin_unlock_irqrestore(&tp->indirect_lock, flags);
489 /* usec_wait specifies the wait time in usec when writing to certain registers
490 * where it is unsafe to read back the register without some delay.
491 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
492 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
494 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
496 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
497 /* Non-posted methods */
498 tp->write32(tp, off, val);
501 tg3_write32(tp, off, val);
506 /* Wait again after the read for the posted method to guarantee that
507 * the wait time is met.
513 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
515 tp->write32_mbox(tp, off, val);
516 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
517 tp->read32_mbox(tp, off);
520 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
522 void __iomem *mbox = tp->regs + off;
524 if (tg3_flag(tp, TXD_MBOX_HWBUG))
526 if (tg3_flag(tp, MBOX_WRITE_REORDER))
530 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
532 return readl(tp->regs + off + GRCMBOX_BASE);
535 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
537 writel(val, tp->regs + off + GRCMBOX_BASE);
540 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
541 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
542 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
543 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
544 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
546 #define tw32(reg, val) tp->write32(tp, reg, val)
547 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
548 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
549 #define tr32(reg) tp->read32(tp, reg)
551 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
555 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
556 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
559 spin_lock_irqsave(&tp->indirect_lock, flags);
560 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
561 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
562 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
564 /* Always leave this as zero. */
565 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
567 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
568 tw32_f(TG3PCI_MEM_WIN_DATA, val);
570 /* Always leave this as zero. */
571 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
573 spin_unlock_irqrestore(&tp->indirect_lock, flags);
576 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
580 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
581 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
586 spin_lock_irqsave(&tp->indirect_lock, flags);
587 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
588 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
589 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
591 /* Always leave this as zero. */
592 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
594 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
595 *val = tr32(TG3PCI_MEM_WIN_DATA);
597 /* Always leave this as zero. */
598 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
600 spin_unlock_irqrestore(&tp->indirect_lock, flags);
603 static void tg3_ape_lock_init(struct tg3 *tp)
608 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
609 regbase = TG3_APE_LOCK_GRANT;
611 regbase = TG3_APE_PER_LOCK_GRANT;
613 /* Make sure the driver hasn't any stale locks. */
614 for (i = 0; i < 8; i++)
615 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
618 static int tg3_ape_lock(struct tg3 *tp, int locknum)
622 u32 status, req, gnt;
624 if (!tg3_flag(tp, ENABLE_APE))
628 case TG3_APE_LOCK_GRC:
629 case TG3_APE_LOCK_MEM:
635 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
636 req = TG3_APE_LOCK_REQ;
637 gnt = TG3_APE_LOCK_GRANT;
639 req = TG3_APE_PER_LOCK_REQ;
640 gnt = TG3_APE_PER_LOCK_GRANT;
645 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
647 /* Wait for up to 1 millisecond to acquire lock. */
648 for (i = 0; i < 100; i++) {
649 status = tg3_ape_read32(tp, gnt + off);
650 if (status == APE_LOCK_GRANT_DRIVER)
655 if (status != APE_LOCK_GRANT_DRIVER) {
656 /* Revoke the lock request. */
657 tg3_ape_write32(tp, gnt + off,
658 APE_LOCK_GRANT_DRIVER);
666 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
670 if (!tg3_flag(tp, ENABLE_APE))
674 case TG3_APE_LOCK_GRC:
675 case TG3_APE_LOCK_MEM:
681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
682 gnt = TG3_APE_LOCK_GRANT;
684 gnt = TG3_APE_PER_LOCK_GRANT;
686 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
689 static void tg3_disable_ints(struct tg3 *tp)
693 tw32(TG3PCI_MISC_HOST_CTRL,
694 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
695 for (i = 0; i < tp->irq_max; i++)
696 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
699 static void tg3_enable_ints(struct tg3 *tp)
706 tw32(TG3PCI_MISC_HOST_CTRL,
707 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
709 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
710 for (i = 0; i < tp->irq_cnt; i++) {
711 struct tg3_napi *tnapi = &tp->napi[i];
713 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
714 if (tg3_flag(tp, 1SHOT_MSI))
715 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
717 tp->coal_now |= tnapi->coal_now;
720 /* Force an initial interrupt */
721 if (!tg3_flag(tp, TAGGED_STATUS) &&
722 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
723 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
725 tw32(HOSTCC_MODE, tp->coal_now);
727 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
730 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
732 struct tg3 *tp = tnapi->tp;
733 struct tg3_hw_status *sblk = tnapi->hw_status;
734 unsigned int work_exists = 0;
736 /* check for phy events */
737 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
738 if (sblk->status & SD_STATUS_LINK_CHG)
741 /* check for RX/TX work to do */
742 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
743 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
750 * similar to tg3_enable_ints, but it accurately determines whether there
751 * is new work pending and can return without flushing the PIO write
752 * which reenables interrupts
754 static void tg3_int_reenable(struct tg3_napi *tnapi)
756 struct tg3 *tp = tnapi->tp;
758 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
761 /* When doing tagged status, this work check is unnecessary.
762 * The last_tag we write above tells the chip which piece of
763 * work we've completed.
765 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
766 tw32(HOSTCC_MODE, tp->coalesce_mode |
767 HOSTCC_MODE_ENABLE | tnapi->coal_now);
770 static void tg3_switch_clocks(struct tg3 *tp)
775 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
778 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
780 orig_clock_ctrl = clock_ctrl;
781 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
782 CLOCK_CTRL_CLKRUN_OENABLE |
784 tp->pci_clock_ctrl = clock_ctrl;
786 if (tg3_flag(tp, 5705_PLUS)) {
787 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
788 tw32_wait_f(TG3PCI_CLOCK_CTRL,
789 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
791 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
792 tw32_wait_f(TG3PCI_CLOCK_CTRL,
794 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
796 tw32_wait_f(TG3PCI_CLOCK_CTRL,
797 clock_ctrl | (CLOCK_CTRL_ALTCLK),
800 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
803 #define PHY_BUSY_LOOPS 5000
805 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
811 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
813 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
819 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
820 MI_COM_PHY_ADDR_MASK);
821 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
822 MI_COM_REG_ADDR_MASK);
823 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
825 tw32_f(MAC_MI_COM, frame_val);
827 loops = PHY_BUSY_LOOPS;
830 frame_val = tr32(MAC_MI_COM);
832 if ((frame_val & MI_COM_BUSY) == 0) {
834 frame_val = tr32(MAC_MI_COM);
842 *val = frame_val & MI_COM_DATA_MASK;
846 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
847 tw32_f(MAC_MI_MODE, tp->mi_mode);
854 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
860 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
861 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
864 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
866 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
870 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
871 MI_COM_PHY_ADDR_MASK);
872 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
873 MI_COM_REG_ADDR_MASK);
874 frame_val |= (val & MI_COM_DATA_MASK);
875 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
877 tw32_f(MAC_MI_COM, frame_val);
879 loops = PHY_BUSY_LOOPS;
882 frame_val = tr32(MAC_MI_COM);
883 if ((frame_val & MI_COM_BUSY) == 0) {
885 frame_val = tr32(MAC_MI_COM);
895 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
896 tw32_f(MAC_MI_MODE, tp->mi_mode);
903 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
907 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
911 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
915 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
916 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
920 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
926 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
930 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
934 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
938 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
939 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
943 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
949 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
953 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
955 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
960 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
964 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
966 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
971 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
975 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
976 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
977 MII_TG3_AUXCTL_SHDWSEL_MISC);
979 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
984 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
986 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
987 set |= MII_TG3_AUXCTL_MISC_WREN;
989 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
992 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
993 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
994 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
995 MII_TG3_AUXCTL_ACTL_TX_6DB)
997 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
998 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999 MII_TG3_AUXCTL_ACTL_TX_6DB);
1001 static int tg3_bmcr_reset(struct tg3 *tp)
1006 /* OK, reset it, and poll the BMCR_RESET bit until it
1007 * clears or we time out.
1009 phy_control = BMCR_RESET;
1010 err = tg3_writephy(tp, MII_BMCR, phy_control);
1016 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1020 if ((phy_control & BMCR_RESET) == 0) {
1032 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1034 struct tg3 *tp = bp->priv;
1037 spin_lock_bh(&tp->lock);
1039 if (tg3_readphy(tp, reg, &val))
1042 spin_unlock_bh(&tp->lock);
1047 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1049 struct tg3 *tp = bp->priv;
1052 spin_lock_bh(&tp->lock);
1054 if (tg3_writephy(tp, reg, val))
1057 spin_unlock_bh(&tp->lock);
1062 static int tg3_mdio_reset(struct mii_bus *bp)
1067 static void tg3_mdio_config_5785(struct tg3 *tp)
1070 struct phy_device *phydev;
1072 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1073 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1074 case PHY_ID_BCM50610:
1075 case PHY_ID_BCM50610M:
1076 val = MAC_PHYCFG2_50610_LED_MODES;
1078 case PHY_ID_BCMAC131:
1079 val = MAC_PHYCFG2_AC131_LED_MODES;
1081 case PHY_ID_RTL8211C:
1082 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1084 case PHY_ID_RTL8201E:
1085 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1091 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1092 tw32(MAC_PHYCFG2, val);
1094 val = tr32(MAC_PHYCFG1);
1095 val &= ~(MAC_PHYCFG1_RGMII_INT |
1096 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1097 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1098 tw32(MAC_PHYCFG1, val);
1103 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1104 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1105 MAC_PHYCFG2_FMODE_MASK_MASK |
1106 MAC_PHYCFG2_GMODE_MASK_MASK |
1107 MAC_PHYCFG2_ACT_MASK_MASK |
1108 MAC_PHYCFG2_QUAL_MASK_MASK |
1109 MAC_PHYCFG2_INBAND_ENABLE;
1111 tw32(MAC_PHYCFG2, val);
1113 val = tr32(MAC_PHYCFG1);
1114 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1115 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1116 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1117 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1118 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1119 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1120 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1122 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1123 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1124 tw32(MAC_PHYCFG1, val);
1126 val = tr32(MAC_EXT_RGMII_MODE);
1127 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1128 MAC_RGMII_MODE_RX_QUALITY |
1129 MAC_RGMII_MODE_RX_ACTIVITY |
1130 MAC_RGMII_MODE_RX_ENG_DET |
1131 MAC_RGMII_MODE_TX_ENABLE |
1132 MAC_RGMII_MODE_TX_LOWPWR |
1133 MAC_RGMII_MODE_TX_RESET);
1134 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1135 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1136 val |= MAC_RGMII_MODE_RX_INT_B |
1137 MAC_RGMII_MODE_RX_QUALITY |
1138 MAC_RGMII_MODE_RX_ACTIVITY |
1139 MAC_RGMII_MODE_RX_ENG_DET;
1140 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1141 val |= MAC_RGMII_MODE_TX_ENABLE |
1142 MAC_RGMII_MODE_TX_LOWPWR |
1143 MAC_RGMII_MODE_TX_RESET;
1145 tw32(MAC_EXT_RGMII_MODE, val);
1148 static void tg3_mdio_start(struct tg3 *tp)
1150 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1151 tw32_f(MAC_MI_MODE, tp->mi_mode);
1154 if (tg3_flag(tp, MDIOBUS_INITED) &&
1155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1156 tg3_mdio_config_5785(tp);
1159 static int tg3_mdio_init(struct tg3 *tp)
1163 struct phy_device *phydev;
1165 if (tg3_flag(tp, 5717_PLUS)) {
1168 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1170 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1171 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1173 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1174 TG3_CPMU_PHY_STRAP_IS_SERDES;
1178 tp->phy_addr = TG3_PHY_MII_ADDR;
1182 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1185 tp->mdio_bus = mdiobus_alloc();
1186 if (tp->mdio_bus == NULL)
1189 tp->mdio_bus->name = "tg3 mdio bus";
1190 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1191 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1192 tp->mdio_bus->priv = tp;
1193 tp->mdio_bus->parent = &tp->pdev->dev;
1194 tp->mdio_bus->read = &tg3_mdio_read;
1195 tp->mdio_bus->write = &tg3_mdio_write;
1196 tp->mdio_bus->reset = &tg3_mdio_reset;
1197 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1198 tp->mdio_bus->irq = &tp->mdio_irq[0];
1200 for (i = 0; i < PHY_MAX_ADDR; i++)
1201 tp->mdio_bus->irq[i] = PHY_POLL;
1203 /* The bus registration will look for all the PHYs on the mdio bus.
1204 * Unfortunately, it does not ensure the PHY is powered up before
1205 * accessing the PHY ID registers. A chip reset is the
1206 * quickest way to bring the device back to an operational state..
1208 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1211 i = mdiobus_register(tp->mdio_bus);
1213 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1214 mdiobus_free(tp->mdio_bus);
1218 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1220 if (!phydev || !phydev->drv) {
1221 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1222 mdiobus_unregister(tp->mdio_bus);
1223 mdiobus_free(tp->mdio_bus);
1227 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1228 case PHY_ID_BCM57780:
1229 phydev->interface = PHY_INTERFACE_MODE_GMII;
1230 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1232 case PHY_ID_BCM50610:
1233 case PHY_ID_BCM50610M:
1234 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1235 PHY_BRCM_RX_REFCLK_UNUSED |
1236 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1237 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1238 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1239 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1240 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1241 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1242 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1243 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1245 case PHY_ID_RTL8211C:
1246 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1248 case PHY_ID_RTL8201E:
1249 case PHY_ID_BCMAC131:
1250 phydev->interface = PHY_INTERFACE_MODE_MII;
1251 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1252 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1256 tg3_flag_set(tp, MDIOBUS_INITED);
1258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1259 tg3_mdio_config_5785(tp);
1264 static void tg3_mdio_fini(struct tg3 *tp)
1266 if (tg3_flag(tp, MDIOBUS_INITED)) {
1267 tg3_flag_clear(tp, MDIOBUS_INITED);
1268 mdiobus_unregister(tp->mdio_bus);
1269 mdiobus_free(tp->mdio_bus);
1273 /* tp->lock is held. */
1274 static inline void tg3_generate_fw_event(struct tg3 *tp)
1278 val = tr32(GRC_RX_CPU_EVENT);
1279 val |= GRC_RX_CPU_DRIVER_EVENT;
1280 tw32_f(GRC_RX_CPU_EVENT, val);
1282 tp->last_event_jiffies = jiffies;
1285 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1287 /* tp->lock is held. */
1288 static void tg3_wait_for_event_ack(struct tg3 *tp)
1291 unsigned int delay_cnt;
1294 /* If enough time has passed, no wait is necessary. */
1295 time_remain = (long)(tp->last_event_jiffies + 1 +
1296 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1298 if (time_remain < 0)
1301 /* Check if we can shorten the wait time. */
1302 delay_cnt = jiffies_to_usecs(time_remain);
1303 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1304 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1305 delay_cnt = (delay_cnt >> 3) + 1;
1307 for (i = 0; i < delay_cnt; i++) {
1308 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1314 /* tp->lock is held. */
1315 static void tg3_ump_link_report(struct tg3 *tp)
1320 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1323 tg3_wait_for_event_ack(tp);
1325 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1327 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1330 if (!tg3_readphy(tp, MII_BMCR, ®))
1332 if (!tg3_readphy(tp, MII_BMSR, ®))
1333 val |= (reg & 0xffff);
1334 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1337 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1339 if (!tg3_readphy(tp, MII_LPA, ®))
1340 val |= (reg & 0xffff);
1341 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1344 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1345 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1347 if (!tg3_readphy(tp, MII_STAT1000, ®))
1348 val |= (reg & 0xffff);
1350 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1352 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1356 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1358 tg3_generate_fw_event(tp);
1361 static void tg3_link_report(struct tg3 *tp)
1363 if (!netif_carrier_ok(tp->dev)) {
1364 netif_info(tp, link, tp->dev, "Link is down\n");
1365 tg3_ump_link_report(tp);
1366 } else if (netif_msg_link(tp)) {
1367 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1368 (tp->link_config.active_speed == SPEED_1000 ?
1370 (tp->link_config.active_speed == SPEED_100 ?
1372 (tp->link_config.active_duplex == DUPLEX_FULL ?
1375 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1376 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1378 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1381 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1382 netdev_info(tp->dev, "EEE is %s\n",
1383 tp->setlpicnt ? "enabled" : "disabled");
1385 tg3_ump_link_report(tp);
1389 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1393 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1394 miireg = ADVERTISE_PAUSE_CAP;
1395 else if (flow_ctrl & FLOW_CTRL_TX)
1396 miireg = ADVERTISE_PAUSE_ASYM;
1397 else if (flow_ctrl & FLOW_CTRL_RX)
1398 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1405 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1409 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1410 miireg = ADVERTISE_1000XPAUSE;
1411 else if (flow_ctrl & FLOW_CTRL_TX)
1412 miireg = ADVERTISE_1000XPSE_ASYM;
1413 else if (flow_ctrl & FLOW_CTRL_RX)
1414 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1421 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1425 if (lcladv & ADVERTISE_1000XPAUSE) {
1426 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1427 if (rmtadv & LPA_1000XPAUSE)
1428 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1429 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1432 if (rmtadv & LPA_1000XPAUSE)
1433 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1435 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1436 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1443 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1447 u32 old_rx_mode = tp->rx_mode;
1448 u32 old_tx_mode = tp->tx_mode;
1450 if (tg3_flag(tp, USE_PHYLIB))
1451 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1453 autoneg = tp->link_config.autoneg;
1455 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1456 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1457 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1459 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1461 flowctrl = tp->link_config.flowctrl;
1463 tp->link_config.active_flowctrl = flowctrl;
1465 if (flowctrl & FLOW_CTRL_RX)
1466 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1468 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1470 if (old_rx_mode != tp->rx_mode)
1471 tw32_f(MAC_RX_MODE, tp->rx_mode);
1473 if (flowctrl & FLOW_CTRL_TX)
1474 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1476 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1478 if (old_tx_mode != tp->tx_mode)
1479 tw32_f(MAC_TX_MODE, tp->tx_mode);
1482 static void tg3_adjust_link(struct net_device *dev)
1484 u8 oldflowctrl, linkmesg = 0;
1485 u32 mac_mode, lcl_adv, rmt_adv;
1486 struct tg3 *tp = netdev_priv(dev);
1487 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1489 spin_lock_bh(&tp->lock);
1491 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1492 MAC_MODE_HALF_DUPLEX);
1494 oldflowctrl = tp->link_config.active_flowctrl;
1500 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1501 mac_mode |= MAC_MODE_PORT_MODE_MII;
1502 else if (phydev->speed == SPEED_1000 ||
1503 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1504 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1506 mac_mode |= MAC_MODE_PORT_MODE_MII;
1508 if (phydev->duplex == DUPLEX_HALF)
1509 mac_mode |= MAC_MODE_HALF_DUPLEX;
1511 lcl_adv = tg3_advert_flowctrl_1000T(
1512 tp->link_config.flowctrl);
1515 rmt_adv = LPA_PAUSE_CAP;
1516 if (phydev->asym_pause)
1517 rmt_adv |= LPA_PAUSE_ASYM;
1520 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1522 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1524 if (mac_mode != tp->mac_mode) {
1525 tp->mac_mode = mac_mode;
1526 tw32_f(MAC_MODE, tp->mac_mode);
1530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1531 if (phydev->speed == SPEED_10)
1533 MAC_MI_STAT_10MBPS_MODE |
1534 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1536 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1539 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1540 tw32(MAC_TX_LENGTHS,
1541 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1542 (6 << TX_LENGTHS_IPG_SHIFT) |
1543 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1545 tw32(MAC_TX_LENGTHS,
1546 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547 (6 << TX_LENGTHS_IPG_SHIFT) |
1548 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1550 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1551 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1552 phydev->speed != tp->link_config.active_speed ||
1553 phydev->duplex != tp->link_config.active_duplex ||
1554 oldflowctrl != tp->link_config.active_flowctrl)
1557 tp->link_config.active_speed = phydev->speed;
1558 tp->link_config.active_duplex = phydev->duplex;
1560 spin_unlock_bh(&tp->lock);
1563 tg3_link_report(tp);
1566 static int tg3_phy_init(struct tg3 *tp)
1568 struct phy_device *phydev;
1570 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1573 /* Bring the PHY back to a known state. */
1576 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1578 /* Attach the MAC to the PHY. */
1579 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1580 phydev->dev_flags, phydev->interface);
1581 if (IS_ERR(phydev)) {
1582 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1583 return PTR_ERR(phydev);
1586 /* Mask with MAC supported features. */
1587 switch (phydev->interface) {
1588 case PHY_INTERFACE_MODE_GMII:
1589 case PHY_INTERFACE_MODE_RGMII:
1590 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1591 phydev->supported &= (PHY_GBIT_FEATURES |
1593 SUPPORTED_Asym_Pause);
1597 case PHY_INTERFACE_MODE_MII:
1598 phydev->supported &= (PHY_BASIC_FEATURES |
1600 SUPPORTED_Asym_Pause);
1603 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1607 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1609 phydev->advertising = phydev->supported;
1614 static void tg3_phy_start(struct tg3 *tp)
1616 struct phy_device *phydev;
1618 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1621 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1623 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1624 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1625 phydev->speed = tp->link_config.orig_speed;
1626 phydev->duplex = tp->link_config.orig_duplex;
1627 phydev->autoneg = tp->link_config.orig_autoneg;
1628 phydev->advertising = tp->link_config.orig_advertising;
1633 phy_start_aneg(phydev);
1636 static void tg3_phy_stop(struct tg3 *tp)
1638 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1641 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1644 static void tg3_phy_fini(struct tg3 *tp)
1646 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1647 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1648 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1652 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1656 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1659 tg3_writephy(tp, MII_TG3_FET_TEST,
1660 phytest | MII_TG3_FET_SHADOW_EN);
1661 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1663 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1665 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1666 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1668 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1672 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1676 if (!tg3_flag(tp, 5705_PLUS) ||
1677 (tg3_flag(tp, 5717_PLUS) &&
1678 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1681 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1682 tg3_phy_fet_toggle_apd(tp, enable);
1686 reg = MII_TG3_MISC_SHDW_WREN |
1687 MII_TG3_MISC_SHDW_SCR5_SEL |
1688 MII_TG3_MISC_SHDW_SCR5_LPED |
1689 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1690 MII_TG3_MISC_SHDW_SCR5_SDTL |
1691 MII_TG3_MISC_SHDW_SCR5_C125OE;
1692 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1693 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1695 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1698 reg = MII_TG3_MISC_SHDW_WREN |
1699 MII_TG3_MISC_SHDW_APD_SEL |
1700 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1702 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1704 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1707 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1711 if (!tg3_flag(tp, 5705_PLUS) ||
1712 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1715 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1718 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1719 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1721 tg3_writephy(tp, MII_TG3_FET_TEST,
1722 ephy | MII_TG3_FET_SHADOW_EN);
1723 if (!tg3_readphy(tp, reg, &phy)) {
1725 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1727 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1728 tg3_writephy(tp, reg, phy);
1730 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1735 ret = tg3_phy_auxctl_read(tp,
1736 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1739 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1741 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1742 tg3_phy_auxctl_write(tp,
1743 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1748 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1753 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1756 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1758 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1759 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1762 static void tg3_phy_apply_otp(struct tg3 *tp)
1771 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1774 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1775 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1776 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1778 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1779 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1780 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1782 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1783 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1784 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1786 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1787 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1789 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1790 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1792 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1793 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1794 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1796 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1799 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1803 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1808 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1809 current_link_up == 1 &&
1810 tp->link_config.active_duplex == DUPLEX_FULL &&
1811 (tp->link_config.active_speed == SPEED_100 ||
1812 tp->link_config.active_speed == SPEED_1000)) {
1815 if (tp->link_config.active_speed == SPEED_1000)
1816 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1818 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1820 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1822 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1823 TG3_CL45_D7_EEERES_STAT, &val);
1826 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1827 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1830 case ASIC_REV_57765:
1831 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1832 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26,
1834 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1838 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1843 if (!tp->setlpicnt) {
1844 val = tr32(TG3_CPMU_EEE_MODE);
1845 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1849 static int tg3_wait_macro_done(struct tg3 *tp)
1856 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1857 if ((tmp32 & 0x1000) == 0)
1867 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1869 static const u32 test_pat[4][6] = {
1870 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1871 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1872 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1873 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1877 for (chan = 0; chan < 4; chan++) {
1880 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1881 (chan * 0x2000) | 0x0200);
1882 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1884 for (i = 0; i < 6; i++)
1885 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1888 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1889 if (tg3_wait_macro_done(tp)) {
1894 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1895 (chan * 0x2000) | 0x0200);
1896 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1897 if (tg3_wait_macro_done(tp)) {
1902 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1903 if (tg3_wait_macro_done(tp)) {
1908 for (i = 0; i < 6; i += 2) {
1911 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1912 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1913 tg3_wait_macro_done(tp)) {
1919 if (low != test_pat[chan][i] ||
1920 high != test_pat[chan][i+1]) {
1921 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1922 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1923 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1933 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1937 for (chan = 0; chan < 4; chan++) {
1940 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1941 (chan * 0x2000) | 0x0200);
1942 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1943 for (i = 0; i < 6; i++)
1944 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1945 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1946 if (tg3_wait_macro_done(tp))
1953 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1955 u32 reg32, phy9_orig;
1956 int retries, do_phy_reset, err;
1962 err = tg3_bmcr_reset(tp);
1968 /* Disable transmitter and interrupt. */
1969 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1973 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1975 /* Set full-duplex, 1000 mbps. */
1976 tg3_writephy(tp, MII_BMCR,
1977 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1979 /* Set to master mode. */
1980 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1983 tg3_writephy(tp, MII_TG3_CTRL,
1984 (MII_TG3_CTRL_AS_MASTER |
1985 MII_TG3_CTRL_ENABLE_AS_MASTER));
1987 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1991 /* Block the PHY control access. */
1992 tg3_phydsp_write(tp, 0x8005, 0x0800);
1994 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1997 } while (--retries);
1999 err = tg3_phy_reset_chanpat(tp);
2003 tg3_phydsp_write(tp, 0x8005, 0x0000);
2005 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2006 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2008 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2010 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2012 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2014 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2021 /* This will reset the tigon3 PHY if there is no valid
2022 * link unless the FORCE argument is non-zero.
2024 static int tg3_phy_reset(struct tg3 *tp)
2029 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2030 val = tr32(GRC_MISC_CFG);
2031 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2034 err = tg3_readphy(tp, MII_BMSR, &val);
2035 err |= tg3_readphy(tp, MII_BMSR, &val);
2039 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2040 netif_carrier_off(tp->dev);
2041 tg3_link_report(tp);
2044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2047 err = tg3_phy_reset_5703_4_5(tp);
2054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2055 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2056 cpmuctrl = tr32(TG3_CPMU_CTRL);
2057 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2059 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2062 err = tg3_bmcr_reset(tp);
2066 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2067 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2068 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2070 tw32(TG3_CPMU_CTRL, cpmuctrl);
2073 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2074 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2075 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2076 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2077 CPMU_LSPD_1000MB_MACCLK_12_5) {
2078 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2080 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2084 if (tg3_flag(tp, 5717_PLUS) &&
2085 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2088 tg3_phy_apply_otp(tp);
2090 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2091 tg3_phy_toggle_apd(tp, true);
2093 tg3_phy_toggle_apd(tp, false);
2096 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2097 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2098 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2099 tg3_phydsp_write(tp, 0x000a, 0x0323);
2100 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2103 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2104 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2105 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2108 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2109 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2110 tg3_phydsp_write(tp, 0x000a, 0x310b);
2111 tg3_phydsp_write(tp, 0x201f, 0x9506);
2112 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2113 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2115 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2116 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2117 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2118 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2119 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2120 tg3_writephy(tp, MII_TG3_TEST1,
2121 MII_TG3_TEST1_TRIM_EN | 0x4);
2123 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2125 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2129 /* Set Extended packet length bit (bit 14) on all chips that */
2130 /* support jumbo frames */
2131 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2132 /* Cannot do read-modify-write on 5401 */
2133 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2134 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2135 /* Set bit 14 with read-modify-write to preserve other bits */
2136 err = tg3_phy_auxctl_read(tp,
2137 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2139 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2140 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2143 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2144 * jumbo frames transmission.
2146 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2147 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2148 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2149 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2152 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2153 /* adjust output voltage */
2154 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2157 tg3_phy_toggle_automdix(tp, 1);
2158 tg3_phy_set_wirespeed(tp);
2162 static void tg3_frob_aux_power(struct tg3 *tp)
2164 bool need_vaux = false;
2166 /* The GPIOs do something completely different on 57765. */
2167 if (!tg3_flag(tp, IS_NIC) ||
2168 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2169 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2172 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2176 tp->pdev_peer != tp->pdev) {
2177 struct net_device *dev_peer;
2179 dev_peer = pci_get_drvdata(tp->pdev_peer);
2181 /* remove_one() may have been run on the peer. */
2183 struct tg3 *tp_peer = netdev_priv(dev_peer);
2185 if (tg3_flag(tp_peer, INIT_COMPLETE))
2188 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2189 tg3_flag(tp_peer, ENABLE_ASF))
2194 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2198 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2200 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2201 (GRC_LCLCTRL_GPIO_OE0 |
2202 GRC_LCLCTRL_GPIO_OE1 |
2203 GRC_LCLCTRL_GPIO_OE2 |
2204 GRC_LCLCTRL_GPIO_OUTPUT0 |
2205 GRC_LCLCTRL_GPIO_OUTPUT1),
2207 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2208 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2209 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2210 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2211 GRC_LCLCTRL_GPIO_OE1 |
2212 GRC_LCLCTRL_GPIO_OE2 |
2213 GRC_LCLCTRL_GPIO_OUTPUT0 |
2214 GRC_LCLCTRL_GPIO_OUTPUT1 |
2216 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2218 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2219 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2221 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2222 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2225 u32 grc_local_ctrl = 0;
2227 /* Workaround to prevent overdrawing Amps. */
2228 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2230 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2231 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2232 grc_local_ctrl, 100);
2235 /* On 5753 and variants, GPIO2 cannot be used. */
2236 no_gpio2 = tp->nic_sram_data_cfg &
2237 NIC_SRAM_DATA_CFG_NO_GPIO2;
2239 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2240 GRC_LCLCTRL_GPIO_OE1 |
2241 GRC_LCLCTRL_GPIO_OE2 |
2242 GRC_LCLCTRL_GPIO_OUTPUT1 |
2243 GRC_LCLCTRL_GPIO_OUTPUT2;
2245 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2246 GRC_LCLCTRL_GPIO_OUTPUT2);
2248 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2249 grc_local_ctrl, 100);
2251 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2253 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2254 grc_local_ctrl, 100);
2257 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2258 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2259 grc_local_ctrl, 100);
2263 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2264 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2265 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2266 (GRC_LCLCTRL_GPIO_OE1 |
2267 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2269 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2270 GRC_LCLCTRL_GPIO_OE1, 100);
2272 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2273 (GRC_LCLCTRL_GPIO_OE1 |
2274 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2279 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2281 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2283 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2284 if (speed != SPEED_10)
2286 } else if (speed == SPEED_10)
2292 static int tg3_setup_phy(struct tg3 *, int);
2294 #define RESET_KIND_SHUTDOWN 0
2295 #define RESET_KIND_INIT 1
2296 #define RESET_KIND_SUSPEND 2
2298 static void tg3_write_sig_post_reset(struct tg3 *, int);
2299 static int tg3_halt_cpu(struct tg3 *, u32);
2301 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2305 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2306 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2307 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2308 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2311 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2312 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2313 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2318 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2320 val = tr32(GRC_MISC_CFG);
2321 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2324 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2326 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2329 tg3_writephy(tp, MII_ADVERTISE, 0);
2330 tg3_writephy(tp, MII_BMCR,
2331 BMCR_ANENABLE | BMCR_ANRESTART);
2333 tg3_writephy(tp, MII_TG3_FET_TEST,
2334 phytest | MII_TG3_FET_SHADOW_EN);
2335 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2336 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2338 MII_TG3_FET_SHDW_AUXMODE4,
2341 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2344 } else if (do_low_power) {
2345 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2346 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2348 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2349 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2350 MII_TG3_AUXCTL_PCTL_VREG_11V;
2351 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2354 /* The PHY should not be powered down on some chips because
2357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2360 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2363 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2364 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2365 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2366 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2367 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2368 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2371 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2374 /* tp->lock is held. */
2375 static int tg3_nvram_lock(struct tg3 *tp)
2377 if (tg3_flag(tp, NVRAM)) {
2380 if (tp->nvram_lock_cnt == 0) {
2381 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2382 for (i = 0; i < 8000; i++) {
2383 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2388 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2392 tp->nvram_lock_cnt++;
2397 /* tp->lock is held. */
2398 static void tg3_nvram_unlock(struct tg3 *tp)
2400 if (tg3_flag(tp, NVRAM)) {
2401 if (tp->nvram_lock_cnt > 0)
2402 tp->nvram_lock_cnt--;
2403 if (tp->nvram_lock_cnt == 0)
2404 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2408 /* tp->lock is held. */
2409 static void tg3_enable_nvram_access(struct tg3 *tp)
2411 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2412 u32 nvaccess = tr32(NVRAM_ACCESS);
2414 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2418 /* tp->lock is held. */
2419 static void tg3_disable_nvram_access(struct tg3 *tp)
2421 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2422 u32 nvaccess = tr32(NVRAM_ACCESS);
2424 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2428 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2429 u32 offset, u32 *val)
2434 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2437 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2438 EEPROM_ADDR_DEVID_MASK |
2440 tw32(GRC_EEPROM_ADDR,
2442 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2443 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2444 EEPROM_ADDR_ADDR_MASK) |
2445 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2447 for (i = 0; i < 1000; i++) {
2448 tmp = tr32(GRC_EEPROM_ADDR);
2450 if (tmp & EEPROM_ADDR_COMPLETE)
2454 if (!(tmp & EEPROM_ADDR_COMPLETE))
2457 tmp = tr32(GRC_EEPROM_DATA);
2460 * The data will always be opposite the native endian
2461 * format. Perform a blind byteswap to compensate.
2468 #define NVRAM_CMD_TIMEOUT 10000
2470 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2474 tw32(NVRAM_CMD, nvram_cmd);
2475 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2477 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2483 if (i == NVRAM_CMD_TIMEOUT)
2489 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2491 if (tg3_flag(tp, NVRAM) &&
2492 tg3_flag(tp, NVRAM_BUFFERED) &&
2493 tg3_flag(tp, FLASH) &&
2494 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2495 (tp->nvram_jedecnum == JEDEC_ATMEL))
2497 addr = ((addr / tp->nvram_pagesize) <<
2498 ATMEL_AT45DB0X1B_PAGE_POS) +
2499 (addr % tp->nvram_pagesize);
2504 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2506 if (tg3_flag(tp, NVRAM) &&
2507 tg3_flag(tp, NVRAM_BUFFERED) &&
2508 tg3_flag(tp, FLASH) &&
2509 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2510 (tp->nvram_jedecnum == JEDEC_ATMEL))
2512 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2513 tp->nvram_pagesize) +
2514 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2519 /* NOTE: Data read in from NVRAM is byteswapped according to
2520 * the byteswapping settings for all other register accesses.
2521 * tg3 devices are BE devices, so on a BE machine, the data
2522 * returned will be exactly as it is seen in NVRAM. On a LE
2523 * machine, the 32-bit value will be byteswapped.
2525 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2529 if (!tg3_flag(tp, NVRAM))
2530 return tg3_nvram_read_using_eeprom(tp, offset, val);
2532 offset = tg3_nvram_phys_addr(tp, offset);
2534 if (offset > NVRAM_ADDR_MSK)
2537 ret = tg3_nvram_lock(tp);
2541 tg3_enable_nvram_access(tp);
2543 tw32(NVRAM_ADDR, offset);
2544 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2545 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2548 *val = tr32(NVRAM_RDDATA);
2550 tg3_disable_nvram_access(tp);
2552 tg3_nvram_unlock(tp);
2557 /* Ensures NVRAM data is in bytestream format. */
2558 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2561 int res = tg3_nvram_read(tp, offset, &v);
2563 *val = cpu_to_be32(v);
2567 /* tp->lock is held. */
2568 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2570 u32 addr_high, addr_low;
2573 addr_high = ((tp->dev->dev_addr[0] << 8) |
2574 tp->dev->dev_addr[1]);
2575 addr_low = ((tp->dev->dev_addr[2] << 24) |
2576 (tp->dev->dev_addr[3] << 16) |
2577 (tp->dev->dev_addr[4] << 8) |
2578 (tp->dev->dev_addr[5] << 0));
2579 for (i = 0; i < 4; i++) {
2580 if (i == 1 && skip_mac_1)
2582 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2583 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2588 for (i = 0; i < 12; i++) {
2589 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2590 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2594 addr_high = (tp->dev->dev_addr[0] +
2595 tp->dev->dev_addr[1] +
2596 tp->dev->dev_addr[2] +
2597 tp->dev->dev_addr[3] +
2598 tp->dev->dev_addr[4] +
2599 tp->dev->dev_addr[5]) &
2600 TX_BACKOFF_SEED_MASK;
2601 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2604 static void tg3_enable_register_access(struct tg3 *tp)
2607 * Make sure register accesses (indirect or otherwise) will function
2610 pci_write_config_dword(tp->pdev,
2611 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2614 static int tg3_power_up(struct tg3 *tp)
2616 tg3_enable_register_access(tp);
2618 pci_set_power_state(tp->pdev, PCI_D0);
2620 /* Switch out of Vaux if it is a NIC */
2621 if (tg3_flag(tp, IS_NIC))
2622 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2627 static int tg3_power_down_prepare(struct tg3 *tp)
2630 bool device_should_wake, do_low_power;
2632 tg3_enable_register_access(tp);
2634 /* Restore the CLKREQ setting. */
2635 if (tg3_flag(tp, CLKREQ_BUG)) {
2638 pci_read_config_word(tp->pdev,
2639 tp->pcie_cap + PCI_EXP_LNKCTL,
2641 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2642 pci_write_config_word(tp->pdev,
2643 tp->pcie_cap + PCI_EXP_LNKCTL,
2647 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2648 tw32(TG3PCI_MISC_HOST_CTRL,
2649 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2651 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2652 tg3_flag(tp, WOL_ENABLE);
2654 if (tg3_flag(tp, USE_PHYLIB)) {
2655 do_low_power = false;
2656 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2657 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2658 struct phy_device *phydev;
2659 u32 phyid, advertising;
2661 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2663 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2665 tp->link_config.orig_speed = phydev->speed;
2666 tp->link_config.orig_duplex = phydev->duplex;
2667 tp->link_config.orig_autoneg = phydev->autoneg;
2668 tp->link_config.orig_advertising = phydev->advertising;
2670 advertising = ADVERTISED_TP |
2672 ADVERTISED_Autoneg |
2673 ADVERTISED_10baseT_Half;
2675 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2676 if (tg3_flag(tp, WOL_SPEED_100MB))
2678 ADVERTISED_100baseT_Half |
2679 ADVERTISED_100baseT_Full |
2680 ADVERTISED_10baseT_Full;
2682 advertising |= ADVERTISED_10baseT_Full;
2685 phydev->advertising = advertising;
2687 phy_start_aneg(phydev);
2689 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2690 if (phyid != PHY_ID_BCMAC131) {
2691 phyid &= PHY_BCM_OUI_MASK;
2692 if (phyid == PHY_BCM_OUI_1 ||
2693 phyid == PHY_BCM_OUI_2 ||
2694 phyid == PHY_BCM_OUI_3)
2695 do_low_power = true;
2699 do_low_power = true;
2701 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2702 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2703 tp->link_config.orig_speed = tp->link_config.speed;
2704 tp->link_config.orig_duplex = tp->link_config.duplex;
2705 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2708 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2709 tp->link_config.speed = SPEED_10;
2710 tp->link_config.duplex = DUPLEX_HALF;
2711 tp->link_config.autoneg = AUTONEG_ENABLE;
2712 tg3_setup_phy(tp, 0);
2716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2719 val = tr32(GRC_VCPU_EXT_CTRL);
2720 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2721 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2725 for (i = 0; i < 200; i++) {
2726 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2727 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2732 if (tg3_flag(tp, WOL_CAP))
2733 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2734 WOL_DRV_STATE_SHUTDOWN |
2738 if (device_should_wake) {
2741 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2743 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2744 tg3_phy_auxctl_write(tp,
2745 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2746 MII_TG3_AUXCTL_PCTL_WOL_EN |
2747 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2748 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2752 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2753 mac_mode = MAC_MODE_PORT_MODE_GMII;
2755 mac_mode = MAC_MODE_PORT_MODE_MII;
2757 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2758 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2760 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2761 SPEED_100 : SPEED_10;
2762 if (tg3_5700_link_polarity(tp, speed))
2763 mac_mode |= MAC_MODE_LINK_POLARITY;
2765 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2768 mac_mode = MAC_MODE_PORT_MODE_TBI;
2771 if (!tg3_flag(tp, 5750_PLUS))
2772 tw32(MAC_LED_CTRL, tp->led_ctrl);
2774 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2775 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2776 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2777 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2779 if (tg3_flag(tp, ENABLE_APE))
2780 mac_mode |= MAC_MODE_APE_TX_EN |
2781 MAC_MODE_APE_RX_EN |
2782 MAC_MODE_TDE_ENABLE;
2784 tw32_f(MAC_MODE, mac_mode);
2787 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2791 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2792 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2796 base_val = tp->pci_clock_ctrl;
2797 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2798 CLOCK_CTRL_TXCLK_DISABLE);
2800 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2801 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2802 } else if (tg3_flag(tp, 5780_CLASS) ||
2803 tg3_flag(tp, CPMU_PRESENT) ||
2804 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2806 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2807 u32 newbits1, newbits2;
2809 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2810 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2811 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2812 CLOCK_CTRL_TXCLK_DISABLE |
2814 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2815 } else if (tg3_flag(tp, 5705_PLUS)) {
2816 newbits1 = CLOCK_CTRL_625_CORE;
2817 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2819 newbits1 = CLOCK_CTRL_ALTCLK;
2820 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2823 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2826 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2829 if (!tg3_flag(tp, 5705_PLUS)) {
2832 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2833 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2834 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2835 CLOCK_CTRL_TXCLK_DISABLE |
2836 CLOCK_CTRL_44MHZ_CORE);
2838 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2841 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2842 tp->pci_clock_ctrl | newbits3, 40);
2846 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2847 tg3_power_down_phy(tp, do_low_power);
2849 tg3_frob_aux_power(tp);
2851 /* Workaround for unstable PLL clock */
2852 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2853 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2854 u32 val = tr32(0x7d00);
2856 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2858 if (!tg3_flag(tp, ENABLE_ASF)) {
2861 err = tg3_nvram_lock(tp);
2862 tg3_halt_cpu(tp, RX_CPU_BASE);
2864 tg3_nvram_unlock(tp);
2868 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2873 static void tg3_power_down(struct tg3 *tp)
2875 tg3_power_down_prepare(tp);
2877 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2878 pci_set_power_state(tp->pdev, PCI_D3hot);
2881 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2883 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2884 case MII_TG3_AUX_STAT_10HALF:
2886 *duplex = DUPLEX_HALF;
2889 case MII_TG3_AUX_STAT_10FULL:
2891 *duplex = DUPLEX_FULL;
2894 case MII_TG3_AUX_STAT_100HALF:
2896 *duplex = DUPLEX_HALF;
2899 case MII_TG3_AUX_STAT_100FULL:
2901 *duplex = DUPLEX_FULL;
2904 case MII_TG3_AUX_STAT_1000HALF:
2905 *speed = SPEED_1000;
2906 *duplex = DUPLEX_HALF;
2909 case MII_TG3_AUX_STAT_1000FULL:
2910 *speed = SPEED_1000;
2911 *duplex = DUPLEX_FULL;
2915 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2916 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2918 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2922 *speed = SPEED_INVALID;
2923 *duplex = DUPLEX_INVALID;
2928 static void tg3_phy_copper_begin(struct tg3 *tp)
2933 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2934 /* Entering low power mode. Disable gigabit and
2935 * 100baseT advertisements.
2937 tg3_writephy(tp, MII_TG3_CTRL, 0);
2939 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2940 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2941 if (tg3_flag(tp, WOL_SPEED_100MB))
2942 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2944 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2945 } else if (tp->link_config.speed == SPEED_INVALID) {
2946 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2947 tp->link_config.advertising &=
2948 ~(ADVERTISED_1000baseT_Half |
2949 ADVERTISED_1000baseT_Full);
2951 new_adv = ADVERTISE_CSMA;
2952 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2953 new_adv |= ADVERTISE_10HALF;
2954 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2955 new_adv |= ADVERTISE_10FULL;
2956 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2957 new_adv |= ADVERTISE_100HALF;
2958 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2959 new_adv |= ADVERTISE_100FULL;
2961 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2963 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2965 if (tp->link_config.advertising &
2966 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2968 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2969 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2970 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2971 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2972 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2973 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2974 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2975 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2976 MII_TG3_CTRL_ENABLE_AS_MASTER);
2977 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2979 tg3_writephy(tp, MII_TG3_CTRL, 0);
2982 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2983 new_adv |= ADVERTISE_CSMA;
2985 /* Asking for a specific link mode. */
2986 if (tp->link_config.speed == SPEED_1000) {
2987 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2989 if (tp->link_config.duplex == DUPLEX_FULL)
2990 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2992 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2993 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2994 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2995 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2996 MII_TG3_CTRL_ENABLE_AS_MASTER);
2998 if (tp->link_config.speed == SPEED_100) {
2999 if (tp->link_config.duplex == DUPLEX_FULL)
3000 new_adv |= ADVERTISE_100FULL;
3002 new_adv |= ADVERTISE_100HALF;
3004 if (tp->link_config.duplex == DUPLEX_FULL)
3005 new_adv |= ADVERTISE_10FULL;
3007 new_adv |= ADVERTISE_10HALF;
3009 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3014 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
3017 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
3020 tw32(TG3_CPMU_EEE_MODE,
3021 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3023 TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3025 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3027 case ASIC_REV_57765:
3028 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3029 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3030 MII_TG3_DSP_CH34TP2_HIBW01);
3033 val = MII_TG3_DSP_TAP26_ALNOKO |
3034 MII_TG3_DSP_TAP26_RMRXSTO |
3035 MII_TG3_DSP_TAP26_OPCSINPT;
3036 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3040 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3041 /* Advertise 100-BaseTX EEE ability */
3042 if (tp->link_config.advertising &
3043 ADVERTISED_100baseT_Full)
3044 val |= MDIO_AN_EEE_ADV_100TX;
3045 /* Advertise 1000-BaseT EEE ability */
3046 if (tp->link_config.advertising &
3047 ADVERTISED_1000baseT_Full)
3048 val |= MDIO_AN_EEE_ADV_1000T;
3050 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3052 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3055 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3056 tp->link_config.speed != SPEED_INVALID) {
3057 u32 bmcr, orig_bmcr;
3059 tp->link_config.active_speed = tp->link_config.speed;
3060 tp->link_config.active_duplex = tp->link_config.duplex;
3063 switch (tp->link_config.speed) {
3069 bmcr |= BMCR_SPEED100;
3073 bmcr |= TG3_BMCR_SPEED1000;
3077 if (tp->link_config.duplex == DUPLEX_FULL)
3078 bmcr |= BMCR_FULLDPLX;
3080 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3081 (bmcr != orig_bmcr)) {
3082 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3083 for (i = 0; i < 1500; i++) {
3087 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3088 tg3_readphy(tp, MII_BMSR, &tmp))
3090 if (!(tmp & BMSR_LSTATUS)) {
3095 tg3_writephy(tp, MII_BMCR, bmcr);
3099 tg3_writephy(tp, MII_BMCR,
3100 BMCR_ANENABLE | BMCR_ANRESTART);
3104 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3108 /* Turn off tap power management. */
3109 /* Set Extended packet length bit */
3110 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3112 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3113 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3114 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3115 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3116 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3123 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3125 u32 adv_reg, all_mask = 0;
3127 if (mask & ADVERTISED_10baseT_Half)
3128 all_mask |= ADVERTISE_10HALF;
3129 if (mask & ADVERTISED_10baseT_Full)
3130 all_mask |= ADVERTISE_10FULL;
3131 if (mask & ADVERTISED_100baseT_Half)
3132 all_mask |= ADVERTISE_100HALF;
3133 if (mask & ADVERTISED_100baseT_Full)
3134 all_mask |= ADVERTISE_100FULL;
3136 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3139 if ((adv_reg & all_mask) != all_mask)
3141 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3145 if (mask & ADVERTISED_1000baseT_Half)
3146 all_mask |= ADVERTISE_1000HALF;
3147 if (mask & ADVERTISED_1000baseT_Full)
3148 all_mask |= ADVERTISE_1000FULL;
3150 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3153 if ((tg3_ctrl & all_mask) != all_mask)
3159 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3163 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3166 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3167 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3169 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3170 if (curadv != reqadv)
3173 if (tg3_flag(tp, PAUSE_AUTONEG))
3174 tg3_readphy(tp, MII_LPA, rmtadv);
3176 /* Reprogram the advertisement register, even if it
3177 * does not affect the current link. If the link
3178 * gets renegotiated in the future, we can save an
3179 * additional renegotiation cycle by advertising
3180 * it correctly in the first place.
3182 if (curadv != reqadv) {
3183 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3184 ADVERTISE_PAUSE_ASYM);
3185 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3192 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3194 int current_link_up;
3196 u32 lcl_adv, rmt_adv;
3204 (MAC_STATUS_SYNC_CHANGED |
3205 MAC_STATUS_CFG_CHANGED |
3206 MAC_STATUS_MI_COMPLETION |
3207 MAC_STATUS_LNKSTATE_CHANGED));
3210 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3212 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3216 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3218 /* Some third-party PHYs need to be reset on link going
3221 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3222 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3223 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3224 netif_carrier_ok(tp->dev)) {
3225 tg3_readphy(tp, MII_BMSR, &bmsr);
3226 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3227 !(bmsr & BMSR_LSTATUS))
3233 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3234 tg3_readphy(tp, MII_BMSR, &bmsr);
3235 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3236 !tg3_flag(tp, INIT_COMPLETE))
3239 if (!(bmsr & BMSR_LSTATUS)) {
3240 err = tg3_init_5401phy_dsp(tp);
3244 tg3_readphy(tp, MII_BMSR, &bmsr);
3245 for (i = 0; i < 1000; i++) {
3247 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3248 (bmsr & BMSR_LSTATUS)) {
3254 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3255 TG3_PHY_REV_BCM5401_B0 &&
3256 !(bmsr & BMSR_LSTATUS) &&
3257 tp->link_config.active_speed == SPEED_1000) {
3258 err = tg3_phy_reset(tp);
3260 err = tg3_init_5401phy_dsp(tp);
3265 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3266 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3267 /* 5701 {A0,B0} CRC bug workaround */
3268 tg3_writephy(tp, 0x15, 0x0a75);
3269 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3270 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3271 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3274 /* Clear pending interrupts... */
3275 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3276 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3278 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3279 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3280 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3281 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3284 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3285 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3286 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3287 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3289 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3292 current_link_up = 0;
3293 current_speed = SPEED_INVALID;
3294 current_duplex = DUPLEX_INVALID;
3296 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3297 err = tg3_phy_auxctl_read(tp,
3298 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3300 if (!err && !(val & (1 << 10))) {
3301 tg3_phy_auxctl_write(tp,
3302 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3309 for (i = 0; i < 100; i++) {
3310 tg3_readphy(tp, MII_BMSR, &bmsr);
3311 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3312 (bmsr & BMSR_LSTATUS))
3317 if (bmsr & BMSR_LSTATUS) {
3320 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3321 for (i = 0; i < 2000; i++) {
3323 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3328 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3333 for (i = 0; i < 200; i++) {
3334 tg3_readphy(tp, MII_BMCR, &bmcr);
3335 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3337 if (bmcr && bmcr != 0x7fff)
3345 tp->link_config.active_speed = current_speed;
3346 tp->link_config.active_duplex = current_duplex;
3348 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3349 if ((bmcr & BMCR_ANENABLE) &&
3350 tg3_copper_is_advertising_all(tp,
3351 tp->link_config.advertising)) {
3352 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3354 current_link_up = 1;
3357 if (!(bmcr & BMCR_ANENABLE) &&
3358 tp->link_config.speed == current_speed &&
3359 tp->link_config.duplex == current_duplex &&
3360 tp->link_config.flowctrl ==
3361 tp->link_config.active_flowctrl) {
3362 current_link_up = 1;
3366 if (current_link_up == 1 &&
3367 tp->link_config.active_duplex == DUPLEX_FULL)
3368 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3372 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3373 tg3_phy_copper_begin(tp);
3375 tg3_readphy(tp, MII_BMSR, &bmsr);
3376 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3377 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3378 current_link_up = 1;
3381 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3382 if (current_link_up == 1) {
3383 if (tp->link_config.active_speed == SPEED_100 ||
3384 tp->link_config.active_speed == SPEED_10)
3385 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3387 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3388 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3389 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3391 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3393 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3394 if (tp->link_config.active_duplex == DUPLEX_HALF)
3395 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3398 if (current_link_up == 1 &&
3399 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3400 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3402 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3405 /* ??? Without this setting Netgear GA302T PHY does not
3406 * ??? send/receive packets...
3408 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3409 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3410 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3411 tw32_f(MAC_MI_MODE, tp->mi_mode);
3415 tw32_f(MAC_MODE, tp->mac_mode);
3418 tg3_phy_eee_adjust(tp, current_link_up);
3420 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3421 /* Polled via timer. */
3422 tw32_f(MAC_EVENT, 0);
3424 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3429 current_link_up == 1 &&
3430 tp->link_config.active_speed == SPEED_1000 &&
3431 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3434 (MAC_STATUS_SYNC_CHANGED |
3435 MAC_STATUS_CFG_CHANGED));
3438 NIC_SRAM_FIRMWARE_MBOX,
3439 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3442 /* Prevent send BD corruption. */
3443 if (tg3_flag(tp, CLKREQ_BUG)) {
3444 u16 oldlnkctl, newlnkctl;
3446 pci_read_config_word(tp->pdev,
3447 tp->pcie_cap + PCI_EXP_LNKCTL,
3449 if (tp->link_config.active_speed == SPEED_100 ||
3450 tp->link_config.active_speed == SPEED_10)
3451 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3453 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3454 if (newlnkctl != oldlnkctl)
3455 pci_write_config_word(tp->pdev,
3456 tp->pcie_cap + PCI_EXP_LNKCTL,
3460 if (current_link_up != netif_carrier_ok(tp->dev)) {
3461 if (current_link_up)
3462 netif_carrier_on(tp->dev);
3464 netif_carrier_off(tp->dev);
3465 tg3_link_report(tp);
3471 struct tg3_fiber_aneginfo {
3473 #define ANEG_STATE_UNKNOWN 0
3474 #define ANEG_STATE_AN_ENABLE 1
3475 #define ANEG_STATE_RESTART_INIT 2
3476 #define ANEG_STATE_RESTART 3
3477 #define ANEG_STATE_DISABLE_LINK_OK 4
3478 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3479 #define ANEG_STATE_ABILITY_DETECT 6
3480 #define ANEG_STATE_ACK_DETECT_INIT 7
3481 #define ANEG_STATE_ACK_DETECT 8
3482 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3483 #define ANEG_STATE_COMPLETE_ACK 10
3484 #define ANEG_STATE_IDLE_DETECT_INIT 11
3485 #define ANEG_STATE_IDLE_DETECT 12
3486 #define ANEG_STATE_LINK_OK 13
3487 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3488 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3491 #define MR_AN_ENABLE 0x00000001
3492 #define MR_RESTART_AN 0x00000002
3493 #define MR_AN_COMPLETE 0x00000004
3494 #define MR_PAGE_RX 0x00000008
3495 #define MR_NP_LOADED 0x00000010
3496 #define MR_TOGGLE_TX 0x00000020
3497 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3498 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3499 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3500 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3501 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3502 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3503 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3504 #define MR_TOGGLE_RX 0x00002000
3505 #define MR_NP_RX 0x00004000
3507 #define MR_LINK_OK 0x80000000
3509 unsigned long link_time, cur_time;
3511 u32 ability_match_cfg;
3512 int ability_match_count;
3514 char ability_match, idle_match, ack_match;
3516 u32 txconfig, rxconfig;
3517 #define ANEG_CFG_NP 0x00000080
3518 #define ANEG_CFG_ACK 0x00000040
3519 #define ANEG_CFG_RF2 0x00000020
3520 #define ANEG_CFG_RF1 0x00000010
3521 #define ANEG_CFG_PS2 0x00000001
3522 #define ANEG_CFG_PS1 0x00008000
3523 #define ANEG_CFG_HD 0x00004000
3524 #define ANEG_CFG_FD 0x00002000
3525 #define ANEG_CFG_INVAL 0x00001f06
3530 #define ANEG_TIMER_ENAB 2
3531 #define ANEG_FAILED -1
3533 #define ANEG_STATE_SETTLE_TIME 10000
3535 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3536 struct tg3_fiber_aneginfo *ap)
3539 unsigned long delta;
3543 if (ap->state == ANEG_STATE_UNKNOWN) {
3547 ap->ability_match_cfg = 0;
3548 ap->ability_match_count = 0;
3549 ap->ability_match = 0;
3555 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3556 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3558 if (rx_cfg_reg != ap->ability_match_cfg) {
3559 ap->ability_match_cfg = rx_cfg_reg;
3560 ap->ability_match = 0;
3561 ap->ability_match_count = 0;
3563 if (++ap->ability_match_count > 1) {
3564 ap->ability_match = 1;
3565 ap->ability_match_cfg = rx_cfg_reg;
3568 if (rx_cfg_reg & ANEG_CFG_ACK)
3576 ap->ability_match_cfg = 0;
3577 ap->ability_match_count = 0;
3578 ap->ability_match = 0;
3584 ap->rxconfig = rx_cfg_reg;
3587 switch (ap->state) {
3588 case ANEG_STATE_UNKNOWN:
3589 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3590 ap->state = ANEG_STATE_AN_ENABLE;
3593 case ANEG_STATE_AN_ENABLE:
3594 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3595 if (ap->flags & MR_AN_ENABLE) {
3598 ap->ability_match_cfg = 0;
3599 ap->ability_match_count = 0;
3600 ap->ability_match = 0;
3604 ap->state = ANEG_STATE_RESTART_INIT;
3606 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3610 case ANEG_STATE_RESTART_INIT:
3611 ap->link_time = ap->cur_time;
3612 ap->flags &= ~(MR_NP_LOADED);
3614 tw32(MAC_TX_AUTO_NEG, 0);
3615 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3616 tw32_f(MAC_MODE, tp->mac_mode);
3619 ret = ANEG_TIMER_ENAB;
3620 ap->state = ANEG_STATE_RESTART;
3623 case ANEG_STATE_RESTART:
3624 delta = ap->cur_time - ap->link_time;
3625 if (delta > ANEG_STATE_SETTLE_TIME)
3626 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3628 ret = ANEG_TIMER_ENAB;
3631 case ANEG_STATE_DISABLE_LINK_OK:
3635 case ANEG_STATE_ABILITY_DETECT_INIT:
3636 ap->flags &= ~(MR_TOGGLE_TX);
3637 ap->txconfig = ANEG_CFG_FD;
3638 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3639 if (flowctrl & ADVERTISE_1000XPAUSE)
3640 ap->txconfig |= ANEG_CFG_PS1;
3641 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3642 ap->txconfig |= ANEG_CFG_PS2;
3643 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3644 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3645 tw32_f(MAC_MODE, tp->mac_mode);
3648 ap->state = ANEG_STATE_ABILITY_DETECT;
3651 case ANEG_STATE_ABILITY_DETECT:
3652 if (ap->ability_match != 0 && ap->rxconfig != 0)
3653 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3656 case ANEG_STATE_ACK_DETECT_INIT:
3657 ap->txconfig |= ANEG_CFG_ACK;
3658 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3659 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3660 tw32_f(MAC_MODE, tp->mac_mode);
3663 ap->state = ANEG_STATE_ACK_DETECT;
3666 case ANEG_STATE_ACK_DETECT:
3667 if (ap->ack_match != 0) {
3668 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3669 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3670 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3672 ap->state = ANEG_STATE_AN_ENABLE;
3674 } else if (ap->ability_match != 0 &&
3675 ap->rxconfig == 0) {
3676 ap->state = ANEG_STATE_AN_ENABLE;
3680 case ANEG_STATE_COMPLETE_ACK_INIT:
3681 if (ap->rxconfig & ANEG_CFG_INVAL) {
3685 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3686 MR_LP_ADV_HALF_DUPLEX |
3687 MR_LP_ADV_SYM_PAUSE |
3688 MR_LP_ADV_ASYM_PAUSE |
3689 MR_LP_ADV_REMOTE_FAULT1 |
3690 MR_LP_ADV_REMOTE_FAULT2 |
3691 MR_LP_ADV_NEXT_PAGE |
3694 if (ap->rxconfig & ANEG_CFG_FD)
3695 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3696 if (ap->rxconfig & ANEG_CFG_HD)
3697 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3698 if (ap->rxconfig & ANEG_CFG_PS1)
3699 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3700 if (ap->rxconfig & ANEG_CFG_PS2)
3701 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3702 if (ap->rxconfig & ANEG_CFG_RF1)
3703 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3704 if (ap->rxconfig & ANEG_CFG_RF2)
3705 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3706 if (ap->rxconfig & ANEG_CFG_NP)
3707 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3709 ap->link_time = ap->cur_time;
3711 ap->flags ^= (MR_TOGGLE_TX);
3712 if (ap->rxconfig & 0x0008)
3713 ap->flags |= MR_TOGGLE_RX;
3714 if (ap->rxconfig & ANEG_CFG_NP)
3715 ap->flags |= MR_NP_RX;
3716 ap->flags |= MR_PAGE_RX;
3718 ap->state = ANEG_STATE_COMPLETE_ACK;
3719 ret = ANEG_TIMER_ENAB;
3722 case ANEG_STATE_COMPLETE_ACK:
3723 if (ap->ability_match != 0 &&
3724 ap->rxconfig == 0) {
3725 ap->state = ANEG_STATE_AN_ENABLE;
3728 delta = ap->cur_time - ap->link_time;
3729 if (delta > ANEG_STATE_SETTLE_TIME) {
3730 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3731 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3733 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3734 !(ap->flags & MR_NP_RX)) {
3735 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3743 case ANEG_STATE_IDLE_DETECT_INIT:
3744 ap->link_time = ap->cur_time;
3745 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3746 tw32_f(MAC_MODE, tp->mac_mode);
3749 ap->state = ANEG_STATE_IDLE_DETECT;
3750 ret = ANEG_TIMER_ENAB;
3753 case ANEG_STATE_IDLE_DETECT:
3754 if (ap->ability_match != 0 &&
3755 ap->rxconfig == 0) {
3756 ap->state = ANEG_STATE_AN_ENABLE;
3759 delta = ap->cur_time - ap->link_time;
3760 if (delta > ANEG_STATE_SETTLE_TIME) {
3761 /* XXX another gem from the Broadcom driver :( */
3762 ap->state = ANEG_STATE_LINK_OK;
3766 case ANEG_STATE_LINK_OK:
3767 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3771 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3772 /* ??? unimplemented */
3775 case ANEG_STATE_NEXT_PAGE_WAIT:
3776 /* ??? unimplemented */
3787 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3790 struct tg3_fiber_aneginfo aninfo;
3791 int status = ANEG_FAILED;
3795 tw32_f(MAC_TX_AUTO_NEG, 0);
3797 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3798 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3801 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3804 memset(&aninfo, 0, sizeof(aninfo));
3805 aninfo.flags |= MR_AN_ENABLE;
3806 aninfo.state = ANEG_STATE_UNKNOWN;
3807 aninfo.cur_time = 0;
3809 while (++tick < 195000) {
3810 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3811 if (status == ANEG_DONE || status == ANEG_FAILED)
3817 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3818 tw32_f(MAC_MODE, tp->mac_mode);
3821 *txflags = aninfo.txconfig;
3822 *rxflags = aninfo.flags;
3824 if (status == ANEG_DONE &&
3825 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3826 MR_LP_ADV_FULL_DUPLEX)))
3832 static void tg3_init_bcm8002(struct tg3 *tp)
3834 u32 mac_status = tr32(MAC_STATUS);
3837 /* Reset when initting first time or we have a link. */
3838 if (tg3_flag(tp, INIT_COMPLETE) &&
3839 !(mac_status & MAC_STATUS_PCS_SYNCED))
3842 /* Set PLL lock range. */
3843 tg3_writephy(tp, 0x16, 0x8007);
3846 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3848 /* Wait for reset to complete. */
3849 /* XXX schedule_timeout() ... */
3850 for (i = 0; i < 500; i++)
3853 /* Config mode; select PMA/Ch 1 regs. */
3854 tg3_writephy(tp, 0x10, 0x8411);
3856 /* Enable auto-lock and comdet, select txclk for tx. */
3857 tg3_writephy(tp, 0x11, 0x0a10);
3859 tg3_writephy(tp, 0x18, 0x00a0);
3860 tg3_writephy(tp, 0x16, 0x41ff);
3862 /* Assert and deassert POR. */
3863 tg3_writephy(tp, 0x13, 0x0400);
3865 tg3_writephy(tp, 0x13, 0x0000);
3867 tg3_writephy(tp, 0x11, 0x0a50);
3869 tg3_writephy(tp, 0x11, 0x0a10);
3871 /* Wait for signal to stabilize */
3872 /* XXX schedule_timeout() ... */
3873 for (i = 0; i < 15000; i++)
3876 /* Deselect the channel register so we can read the PHYID
3879 tg3_writephy(tp, 0x10, 0x8011);
3882 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3885 u32 sg_dig_ctrl, sg_dig_status;
3886 u32 serdes_cfg, expected_sg_dig_ctrl;
3887 int workaround, port_a;
3888 int current_link_up;
3891 expected_sg_dig_ctrl = 0;
3894 current_link_up = 0;
3896 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3897 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3899 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3902 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3903 /* preserve bits 20-23 for voltage regulator */
3904 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3907 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3909 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3910 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3912 u32 val = serdes_cfg;
3918 tw32_f(MAC_SERDES_CFG, val);
3921 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3923 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3924 tg3_setup_flow_control(tp, 0, 0);
3925 current_link_up = 1;
3930 /* Want auto-negotiation. */
3931 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3933 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3934 if (flowctrl & ADVERTISE_1000XPAUSE)
3935 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3936 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3937 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3939 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3940 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3941 tp->serdes_counter &&
3942 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3943 MAC_STATUS_RCVD_CFG)) ==
3944 MAC_STATUS_PCS_SYNCED)) {
3945 tp->serdes_counter--;
3946 current_link_up = 1;
3951 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3952 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3954 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3956 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3957 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3958 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3959 MAC_STATUS_SIGNAL_DET)) {
3960 sg_dig_status = tr32(SG_DIG_STATUS);
3961 mac_status = tr32(MAC_STATUS);
3963 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3964 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3965 u32 local_adv = 0, remote_adv = 0;
3967 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3968 local_adv |= ADVERTISE_1000XPAUSE;
3969 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3970 local_adv |= ADVERTISE_1000XPSE_ASYM;
3972 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3973 remote_adv |= LPA_1000XPAUSE;
3974 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3975 remote_adv |= LPA_1000XPAUSE_ASYM;
3977 tg3_setup_flow_control(tp, local_adv, remote_adv);
3978 current_link_up = 1;
3979 tp->serdes_counter = 0;
3980 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3981 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3982 if (tp->serdes_counter)
3983 tp->serdes_counter--;
3986 u32 val = serdes_cfg;
3993 tw32_f(MAC_SERDES_CFG, val);
3996 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3999 /* Link parallel detection - link is up */
4000 /* only if we have PCS_SYNC and not */
4001 /* receiving config code words */
4002 mac_status = tr32(MAC_STATUS);
4003 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4004 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4005 tg3_setup_flow_control(tp, 0, 0);
4006 current_link_up = 1;
4008 TG3_PHYFLG_PARALLEL_DETECT;
4009 tp->serdes_counter =
4010 SERDES_PARALLEL_DET_TIMEOUT;
4012 goto restart_autoneg;
4016 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4017 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4021 return current_link_up;
4024 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4026 int current_link_up = 0;
4028 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4031 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4032 u32 txflags, rxflags;
4035 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4036 u32 local_adv = 0, remote_adv = 0;
4038 if (txflags & ANEG_CFG_PS1)
4039 local_adv |= ADVERTISE_1000XPAUSE;
4040 if (txflags & ANEG_CFG_PS2)
4041 local_adv |= ADVERTISE_1000XPSE_ASYM;
4043 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4044 remote_adv |= LPA_1000XPAUSE;
4045 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4046 remote_adv |= LPA_1000XPAUSE_ASYM;
4048 tg3_setup_flow_control(tp, local_adv, remote_adv);
4050 current_link_up = 1;
4052 for (i = 0; i < 30; i++) {
4055 (MAC_STATUS_SYNC_CHANGED |
4056 MAC_STATUS_CFG_CHANGED));
4058 if ((tr32(MAC_STATUS) &
4059 (MAC_STATUS_SYNC_CHANGED |
4060 MAC_STATUS_CFG_CHANGED)) == 0)
4064 mac_status = tr32(MAC_STATUS);
4065 if (current_link_up == 0 &&
4066 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4067 !(mac_status & MAC_STATUS_RCVD_CFG))
4068 current_link_up = 1;
4070 tg3_setup_flow_control(tp, 0, 0);
4072 /* Forcing 1000FD link up. */
4073 current_link_up = 1;
4075 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4078 tw32_f(MAC_MODE, tp->mac_mode);
4083 return current_link_up;
4086 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4089 u16 orig_active_speed;
4090 u8 orig_active_duplex;
4092 int current_link_up;
4095 orig_pause_cfg = tp->link_config.active_flowctrl;
4096 orig_active_speed = tp->link_config.active_speed;
4097 orig_active_duplex = tp->link_config.active_duplex;
4099 if (!tg3_flag(tp, HW_AUTONEG) &&
4100 netif_carrier_ok(tp->dev) &&
4101 tg3_flag(tp, INIT_COMPLETE)) {
4102 mac_status = tr32(MAC_STATUS);
4103 mac_status &= (MAC_STATUS_PCS_SYNCED |
4104 MAC_STATUS_SIGNAL_DET |
4105 MAC_STATUS_CFG_CHANGED |
4106 MAC_STATUS_RCVD_CFG);
4107 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4108 MAC_STATUS_SIGNAL_DET)) {
4109 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4110 MAC_STATUS_CFG_CHANGED));
4115 tw32_f(MAC_TX_AUTO_NEG, 0);
4117 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4118 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4119 tw32_f(MAC_MODE, tp->mac_mode);
4122 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4123 tg3_init_bcm8002(tp);
4125 /* Enable link change event even when serdes polling. */
4126 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4129 current_link_up = 0;
4130 mac_status = tr32(MAC_STATUS);
4132 if (tg3_flag(tp, HW_AUTONEG))
4133 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4135 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4137 tp->napi[0].hw_status->status =
4138 (SD_STATUS_UPDATED |
4139 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4141 for (i = 0; i < 100; i++) {
4142 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4143 MAC_STATUS_CFG_CHANGED));
4145 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4146 MAC_STATUS_CFG_CHANGED |
4147 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4151 mac_status = tr32(MAC_STATUS);
4152 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4153 current_link_up = 0;
4154 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4155 tp->serdes_counter == 0) {
4156 tw32_f(MAC_MODE, (tp->mac_mode |
4157 MAC_MODE_SEND_CONFIGS));
4159 tw32_f(MAC_MODE, tp->mac_mode);
4163 if (current_link_up == 1) {
4164 tp->link_config.active_speed = SPEED_1000;
4165 tp->link_config.active_duplex = DUPLEX_FULL;
4166 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4167 LED_CTRL_LNKLED_OVERRIDE |
4168 LED_CTRL_1000MBPS_ON));
4170 tp->link_config.active_speed = SPEED_INVALID;
4171 tp->link_config.active_duplex = DUPLEX_INVALID;
4172 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4173 LED_CTRL_LNKLED_OVERRIDE |
4174 LED_CTRL_TRAFFIC_OVERRIDE));
4177 if (current_link_up != netif_carrier_ok(tp->dev)) {
4178 if (current_link_up)
4179 netif_carrier_on(tp->dev);
4181 netif_carrier_off(tp->dev);
4182 tg3_link_report(tp);
4184 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4185 if (orig_pause_cfg != now_pause_cfg ||
4186 orig_active_speed != tp->link_config.active_speed ||
4187 orig_active_duplex != tp->link_config.active_duplex)
4188 tg3_link_report(tp);
4194 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4196 int current_link_up, err = 0;
4200 u32 local_adv, remote_adv;
4202 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4203 tw32_f(MAC_MODE, tp->mac_mode);
4209 (MAC_STATUS_SYNC_CHANGED |
4210 MAC_STATUS_CFG_CHANGED |
4211 MAC_STATUS_MI_COMPLETION |
4212 MAC_STATUS_LNKSTATE_CHANGED));
4218 current_link_up = 0;
4219 current_speed = SPEED_INVALID;
4220 current_duplex = DUPLEX_INVALID;
4222 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4223 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4225 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4226 bmsr |= BMSR_LSTATUS;
4228 bmsr &= ~BMSR_LSTATUS;
4231 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4233 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4234 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4235 /* do nothing, just check for link up at the end */
4236 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4239 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4240 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4241 ADVERTISE_1000XPAUSE |
4242 ADVERTISE_1000XPSE_ASYM |
4245 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4247 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4248 new_adv |= ADVERTISE_1000XHALF;
4249 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4250 new_adv |= ADVERTISE_1000XFULL;
4252 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4253 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4254 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4255 tg3_writephy(tp, MII_BMCR, bmcr);
4257 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4258 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4259 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4266 bmcr &= ~BMCR_SPEED1000;
4267 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4269 if (tp->link_config.duplex == DUPLEX_FULL)
4270 new_bmcr |= BMCR_FULLDPLX;
4272 if (new_bmcr != bmcr) {
4273 /* BMCR_SPEED1000 is a reserved bit that needs
4274 * to be set on write.
4276 new_bmcr |= BMCR_SPEED1000;
4278 /* Force a linkdown */
4279 if (netif_carrier_ok(tp->dev)) {
4282 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4283 adv &= ~(ADVERTISE_1000XFULL |
4284 ADVERTISE_1000XHALF |
4286 tg3_writephy(tp, MII_ADVERTISE, adv);
4287 tg3_writephy(tp, MII_BMCR, bmcr |
4291 netif_carrier_off(tp->dev);
4293 tg3_writephy(tp, MII_BMCR, new_bmcr);
4295 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4296 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4297 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4299 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4300 bmsr |= BMSR_LSTATUS;
4302 bmsr &= ~BMSR_LSTATUS;
4304 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4308 if (bmsr & BMSR_LSTATUS) {
4309 current_speed = SPEED_1000;
4310 current_link_up = 1;
4311 if (bmcr & BMCR_FULLDPLX)
4312 current_duplex = DUPLEX_FULL;
4314 current_duplex = DUPLEX_HALF;
4319 if (bmcr & BMCR_ANENABLE) {
4322 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4323 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4324 common = local_adv & remote_adv;
4325 if (common & (ADVERTISE_1000XHALF |
4326 ADVERTISE_1000XFULL)) {
4327 if (common & ADVERTISE_1000XFULL)
4328 current_duplex = DUPLEX_FULL;
4330 current_duplex = DUPLEX_HALF;
4331 } else if (!tg3_flag(tp, 5780_CLASS)) {
4332 /* Link is up via parallel detect */
4334 current_link_up = 0;
4339 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4340 tg3_setup_flow_control(tp, local_adv, remote_adv);
4342 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4343 if (tp->link_config.active_duplex == DUPLEX_HALF)
4344 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4346 tw32_f(MAC_MODE, tp->mac_mode);
4349 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4351 tp->link_config.active_speed = current_speed;
4352 tp->link_config.active_duplex = current_duplex;
4354 if (current_link_up != netif_carrier_ok(tp->dev)) {
4355 if (current_link_up)
4356 netif_carrier_on(tp->dev);
4358 netif_carrier_off(tp->dev);
4359 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4361 tg3_link_report(tp);
4366 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4368 if (tp->serdes_counter) {
4369 /* Give autoneg time to complete. */
4370 tp->serdes_counter--;
4374 if (!netif_carrier_ok(tp->dev) &&
4375 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4378 tg3_readphy(tp, MII_BMCR, &bmcr);
4379 if (bmcr & BMCR_ANENABLE) {
4382 /* Select shadow register 0x1f */
4383 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4384 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4386 /* Select expansion interrupt status register */
4387 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4388 MII_TG3_DSP_EXP1_INT_STAT);
4389 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4390 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4392 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4393 /* We have signal detect and not receiving
4394 * config code words, link is up by parallel
4398 bmcr &= ~BMCR_ANENABLE;
4399 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4400 tg3_writephy(tp, MII_BMCR, bmcr);
4401 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4404 } else if (netif_carrier_ok(tp->dev) &&
4405 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4406 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4409 /* Select expansion interrupt status register */
4410 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4411 MII_TG3_DSP_EXP1_INT_STAT);
4412 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4416 /* Config code words received, turn on autoneg. */
4417 tg3_readphy(tp, MII_BMCR, &bmcr);
4418 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4420 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4426 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4431 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4432 err = tg3_setup_fiber_phy(tp, force_reset);
4433 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4434 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4436 err = tg3_setup_copper_phy(tp, force_reset);
4438 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4441 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4442 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4444 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4449 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4450 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4451 tw32(GRC_MISC_CFG, val);
4454 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4455 (6 << TX_LENGTHS_IPG_SHIFT);
4456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4457 val |= tr32(MAC_TX_LENGTHS) &
4458 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4459 TX_LENGTHS_CNT_DWN_VAL_MSK);
4461 if (tp->link_config.active_speed == SPEED_1000 &&
4462 tp->link_config.active_duplex == DUPLEX_HALF)
4463 tw32(MAC_TX_LENGTHS, val |
4464 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4466 tw32(MAC_TX_LENGTHS, val |
4467 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4469 if (!tg3_flag(tp, 5705_PLUS)) {
4470 if (netif_carrier_ok(tp->dev)) {
4471 tw32(HOSTCC_STAT_COAL_TICKS,
4472 tp->coal.stats_block_coalesce_usecs);
4474 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4478 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4479 val = tr32(PCIE_PWR_MGMT_THRESH);
4480 if (!netif_carrier_ok(tp->dev))
4481 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4484 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4485 tw32(PCIE_PWR_MGMT_THRESH, val);
4491 static inline int tg3_irq_sync(struct tg3 *tp)
4493 return tp->irq_sync;
4496 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4500 dst = (u32 *)((u8 *)dst + off);
4501 for (i = 0; i < len; i += sizeof(u32))
4502 *dst++ = tr32(off + i);
4505 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4507 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4508 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4509 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4510 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4511 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4512 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4513 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4514 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4515 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4516 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4517 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4518 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4519 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4520 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4521 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4522 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4523 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4524 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4525 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4527 if (tg3_flag(tp, SUPPORT_MSIX))
4528 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4530 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4531 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4532 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4533 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4534 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4535 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4536 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4537 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4539 if (!tg3_flag(tp, 5705_PLUS)) {
4540 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4541 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4542 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4545 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4546 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4547 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4548 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4549 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4551 if (tg3_flag(tp, NVRAM))
4552 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4555 static void tg3_dump_state(struct tg3 *tp)
4560 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4562 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4566 if (tg3_flag(tp, PCI_EXPRESS)) {
4567 /* Read up to but not including private PCI registers */
4568 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4569 regs[i / sizeof(u32)] = tr32(i);
4571 tg3_dump_legacy_regs(tp, regs);
4573 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4574 if (!regs[i + 0] && !regs[i + 1] &&
4575 !regs[i + 2] && !regs[i + 3])
4578 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4580 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4585 for (i = 0; i < tp->irq_cnt; i++) {
4586 struct tg3_napi *tnapi = &tp->napi[i];
4588 /* SW status block */
4590 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4592 tnapi->hw_status->status,
4593 tnapi->hw_status->status_tag,
4594 tnapi->hw_status->rx_jumbo_consumer,
4595 tnapi->hw_status->rx_consumer,
4596 tnapi->hw_status->rx_mini_consumer,
4597 tnapi->hw_status->idx[0].rx_producer,
4598 tnapi->hw_status->idx[0].tx_consumer);
4601 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4603 tnapi->last_tag, tnapi->last_irq_tag,
4604 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4606 tnapi->prodring.rx_std_prod_idx,
4607 tnapi->prodring.rx_std_cons_idx,
4608 tnapi->prodring.rx_jmb_prod_idx,
4609 tnapi->prodring.rx_jmb_cons_idx);
4613 /* This is called whenever we suspect that the system chipset is re-
4614 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4615 * is bogus tx completions. We try to recover by setting the
4616 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4619 static void tg3_tx_recover(struct tg3 *tp)
4621 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4622 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4624 netdev_warn(tp->dev,
4625 "The system may be re-ordering memory-mapped I/O "
4626 "cycles to the network device, attempting to recover. "
4627 "Please report the problem to the driver maintainer "
4628 "and include system chipset information.\n");
4630 spin_lock(&tp->lock);
4631 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4632 spin_unlock(&tp->lock);
4635 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4637 /* Tell compiler to fetch tx indices from memory. */
4639 return tnapi->tx_pending -
4640 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4643 /* Tigon3 never reports partial packet sends. So we do not
4644 * need special logic to handle SKBs that have not had all
4645 * of their frags sent yet, like SunGEM does.
4647 static void tg3_tx(struct tg3_napi *tnapi)
4649 struct tg3 *tp = tnapi->tp;
4650 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4651 u32 sw_idx = tnapi->tx_cons;
4652 struct netdev_queue *txq;
4653 int index = tnapi - tp->napi;
4655 if (tg3_flag(tp, ENABLE_TSS))
4658 txq = netdev_get_tx_queue(tp->dev, index);
4660 while (sw_idx != hw_idx) {
4661 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4662 struct sk_buff *skb = ri->skb;
4665 if (unlikely(skb == NULL)) {
4670 pci_unmap_single(tp->pdev,
4671 dma_unmap_addr(ri, mapping),
4677 sw_idx = NEXT_TX(sw_idx);
4679 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4680 ri = &tnapi->tx_buffers[sw_idx];
4681 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4684 pci_unmap_page(tp->pdev,
4685 dma_unmap_addr(ri, mapping),
4686 skb_shinfo(skb)->frags[i].size,
4688 sw_idx = NEXT_TX(sw_idx);
4693 if (unlikely(tx_bug)) {
4699 tnapi->tx_cons = sw_idx;
4701 /* Need to make the tx_cons update visible to tg3_start_xmit()
4702 * before checking for netif_queue_stopped(). Without the
4703 * memory barrier, there is a small possibility that tg3_start_xmit()
4704 * will miss it and cause the queue to be stopped forever.
4708 if (unlikely(netif_tx_queue_stopped(txq) &&
4709 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4710 __netif_tx_lock(txq, smp_processor_id());
4711 if (netif_tx_queue_stopped(txq) &&
4712 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4713 netif_tx_wake_queue(txq);
4714 __netif_tx_unlock(txq);
4718 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4723 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4724 map_sz, PCI_DMA_FROMDEVICE);
4725 dev_kfree_skb_any(ri->skb);
4729 /* Returns size of skb allocated or < 0 on error.
4731 * We only need to fill in the address because the other members
4732 * of the RX descriptor are invariant, see tg3_init_rings.
4734 * Note the purposeful assymetry of cpu vs. chip accesses. For
4735 * posting buffers we only dirty the first cache line of the RX
4736 * descriptor (containing the address). Whereas for the RX status
4737 * buffers the cpu only reads the last cacheline of the RX descriptor
4738 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4740 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4741 u32 opaque_key, u32 dest_idx_unmasked)
4743 struct tg3_rx_buffer_desc *desc;
4744 struct ring_info *map;
4745 struct sk_buff *skb;
4747 int skb_size, dest_idx;
4749 switch (opaque_key) {
4750 case RXD_OPAQUE_RING_STD:
4751 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4752 desc = &tpr->rx_std[dest_idx];
4753 map = &tpr->rx_std_buffers[dest_idx];
4754 skb_size = tp->rx_pkt_map_sz;
4757 case RXD_OPAQUE_RING_JUMBO:
4758 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4759 desc = &tpr->rx_jmb[dest_idx].std;
4760 map = &tpr->rx_jmb_buffers[dest_idx];
4761 skb_size = TG3_RX_JMB_MAP_SZ;
4768 /* Do not overwrite any of the map or rp information
4769 * until we are sure we can commit to a new buffer.
4771 * Callers depend upon this behavior and assume that
4772 * we leave everything unchanged if we fail.
4774 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4778 skb_reserve(skb, tp->rx_offset);
4780 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4781 PCI_DMA_FROMDEVICE);
4782 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4788 dma_unmap_addr_set(map, mapping, mapping);
4790 desc->addr_hi = ((u64)mapping >> 32);
4791 desc->addr_lo = ((u64)mapping & 0xffffffff);
4796 /* We only need to move over in the address because the other
4797 * members of the RX descriptor are invariant. See notes above
4798 * tg3_alloc_rx_skb for full details.
4800 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4801 struct tg3_rx_prodring_set *dpr,
4802 u32 opaque_key, int src_idx,
4803 u32 dest_idx_unmasked)
4805 struct tg3 *tp = tnapi->tp;
4806 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4807 struct ring_info *src_map, *dest_map;
4808 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4811 switch (opaque_key) {
4812 case RXD_OPAQUE_RING_STD:
4813 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4814 dest_desc = &dpr->rx_std[dest_idx];
4815 dest_map = &dpr->rx_std_buffers[dest_idx];
4816 src_desc = &spr->rx_std[src_idx];
4817 src_map = &spr->rx_std_buffers[src_idx];
4820 case RXD_OPAQUE_RING_JUMBO:
4821 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4822 dest_desc = &dpr->rx_jmb[dest_idx].std;
4823 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4824 src_desc = &spr->rx_jmb[src_idx].std;
4825 src_map = &spr->rx_jmb_buffers[src_idx];
4832 dest_map->skb = src_map->skb;
4833 dma_unmap_addr_set(dest_map, mapping,
4834 dma_unmap_addr(src_map, mapping));
4835 dest_desc->addr_hi = src_desc->addr_hi;
4836 dest_desc->addr_lo = src_desc->addr_lo;
4838 /* Ensure that the update to the skb happens after the physical
4839 * addresses have been transferred to the new BD location.
4843 src_map->skb = NULL;
4846 /* The RX ring scheme is composed of multiple rings which post fresh
4847 * buffers to the chip, and one special ring the chip uses to report
4848 * status back to the host.
4850 * The special ring reports the status of received packets to the
4851 * host. The chip does not write into the original descriptor the
4852 * RX buffer was obtained from. The chip simply takes the original
4853 * descriptor as provided by the host, updates the status and length
4854 * field, then writes this into the next status ring entry.
4856 * Each ring the host uses to post buffers to the chip is described
4857 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4858 * it is first placed into the on-chip ram. When the packet's length
4859 * is known, it walks down the TG3_BDINFO entries to select the ring.
4860 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4861 * which is within the range of the new packet's length is chosen.
4863 * The "separate ring for rx status" scheme may sound queer, but it makes
4864 * sense from a cache coherency perspective. If only the host writes
4865 * to the buffer post rings, and only the chip writes to the rx status
4866 * rings, then cache lines never move beyond shared-modified state.
4867 * If both the host and chip were to write into the same ring, cache line
4868 * eviction could occur since both entities want it in an exclusive state.
4870 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4872 struct tg3 *tp = tnapi->tp;
4873 u32 work_mask, rx_std_posted = 0;
4874 u32 std_prod_idx, jmb_prod_idx;
4875 u32 sw_idx = tnapi->rx_rcb_ptr;
4878 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4880 hw_idx = *(tnapi->rx_rcb_prod_idx);
4882 * We need to order the read of hw_idx and the read of
4883 * the opaque cookie.
4888 std_prod_idx = tpr->rx_std_prod_idx;
4889 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4890 while (sw_idx != hw_idx && budget > 0) {
4891 struct ring_info *ri;
4892 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4894 struct sk_buff *skb;
4895 dma_addr_t dma_addr;
4896 u32 opaque_key, desc_idx, *post_ptr;
4898 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4899 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4900 if (opaque_key == RXD_OPAQUE_RING_STD) {
4901 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4902 dma_addr = dma_unmap_addr(ri, mapping);
4904 post_ptr = &std_prod_idx;
4906 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4907 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4908 dma_addr = dma_unmap_addr(ri, mapping);
4910 post_ptr = &jmb_prod_idx;
4912 goto next_pkt_nopost;
4914 work_mask |= opaque_key;
4916 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4917 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4919 tg3_recycle_rx(tnapi, tpr, opaque_key,
4920 desc_idx, *post_ptr);
4922 /* Other statistics kept track of by card. */
4927 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4930 if (len > TG3_RX_COPY_THRESH(tp)) {
4933 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4938 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4939 PCI_DMA_FROMDEVICE);
4941 /* Ensure that the update to the skb happens
4942 * after the usage of the old DMA mapping.
4950 struct sk_buff *copy_skb;
4952 tg3_recycle_rx(tnapi, tpr, opaque_key,
4953 desc_idx, *post_ptr);
4955 copy_skb = netdev_alloc_skb(tp->dev, len +
4957 if (copy_skb == NULL)
4958 goto drop_it_no_recycle;
4960 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4961 skb_put(copy_skb, len);
4962 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4963 skb_copy_from_linear_data(skb, copy_skb->data, len);
4964 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4966 /* We'll reuse the original ring buffer. */
4970 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4971 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4972 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4973 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4974 skb->ip_summed = CHECKSUM_UNNECESSARY;
4976 skb_checksum_none_assert(skb);
4978 skb->protocol = eth_type_trans(skb, tp->dev);
4980 if (len > (tp->dev->mtu + ETH_HLEN) &&
4981 skb->protocol != htons(ETH_P_8021Q)) {
4983 goto drop_it_no_recycle;
4986 if (desc->type_flags & RXD_FLAG_VLAN &&
4987 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4988 __vlan_hwaccel_put_tag(skb,
4989 desc->err_vlan & RXD_VLAN_MASK);
4991 napi_gro_receive(&tnapi->napi, skb);
4999 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5000 tpr->rx_std_prod_idx = std_prod_idx &
5001 tp->rx_std_ring_mask;
5002 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5003 tpr->rx_std_prod_idx);
5004 work_mask &= ~RXD_OPAQUE_RING_STD;
5009 sw_idx &= tp->rx_ret_ring_mask;
5011 /* Refresh hw_idx to see if there is new work */
5012 if (sw_idx == hw_idx) {
5013 hw_idx = *(tnapi->rx_rcb_prod_idx);
5018 /* ACK the status ring. */
5019 tnapi->rx_rcb_ptr = sw_idx;
5020 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5022 /* Refill RX ring(s). */
5023 if (!tg3_flag(tp, ENABLE_RSS)) {
5024 if (work_mask & RXD_OPAQUE_RING_STD) {
5025 tpr->rx_std_prod_idx = std_prod_idx &
5026 tp->rx_std_ring_mask;
5027 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5028 tpr->rx_std_prod_idx);
5030 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5031 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5032 tp->rx_jmb_ring_mask;
5033 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5034 tpr->rx_jmb_prod_idx);
5037 } else if (work_mask) {
5038 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5039 * updated before the producer indices can be updated.
5043 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5044 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5046 if (tnapi != &tp->napi[1])
5047 napi_schedule(&tp->napi[1].napi);
5053 static void tg3_poll_link(struct tg3 *tp)
5055 /* handle link change and other phy events */
5056 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5057 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5059 if (sblk->status & SD_STATUS_LINK_CHG) {
5060 sblk->status = SD_STATUS_UPDATED |
5061 (sblk->status & ~SD_STATUS_LINK_CHG);
5062 spin_lock(&tp->lock);
5063 if (tg3_flag(tp, USE_PHYLIB)) {
5065 (MAC_STATUS_SYNC_CHANGED |
5066 MAC_STATUS_CFG_CHANGED |
5067 MAC_STATUS_MI_COMPLETION |
5068 MAC_STATUS_LNKSTATE_CHANGED));
5071 tg3_setup_phy(tp, 0);
5072 spin_unlock(&tp->lock);
5077 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5078 struct tg3_rx_prodring_set *dpr,
5079 struct tg3_rx_prodring_set *spr)
5081 u32 si, di, cpycnt, src_prod_idx;
5085 src_prod_idx = spr->rx_std_prod_idx;
5087 /* Make sure updates to the rx_std_buffers[] entries and the
5088 * standard producer index are seen in the correct order.
5092 if (spr->rx_std_cons_idx == src_prod_idx)
5095 if (spr->rx_std_cons_idx < src_prod_idx)
5096 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5098 cpycnt = tp->rx_std_ring_mask + 1 -
5099 spr->rx_std_cons_idx;
5101 cpycnt = min(cpycnt,
5102 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5104 si = spr->rx_std_cons_idx;
5105 di = dpr->rx_std_prod_idx;
5107 for (i = di; i < di + cpycnt; i++) {
5108 if (dpr->rx_std_buffers[i].skb) {
5118 /* Ensure that updates to the rx_std_buffers ring and the
5119 * shadowed hardware producer ring from tg3_recycle_skb() are
5120 * ordered correctly WRT the skb check above.
5124 memcpy(&dpr->rx_std_buffers[di],
5125 &spr->rx_std_buffers[si],
5126 cpycnt * sizeof(struct ring_info));
5128 for (i = 0; i < cpycnt; i++, di++, si++) {
5129 struct tg3_rx_buffer_desc *sbd, *dbd;
5130 sbd = &spr->rx_std[si];
5131 dbd = &dpr->rx_std[di];
5132 dbd->addr_hi = sbd->addr_hi;
5133 dbd->addr_lo = sbd->addr_lo;
5136 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5137 tp->rx_std_ring_mask;
5138 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5139 tp->rx_std_ring_mask;
5143 src_prod_idx = spr->rx_jmb_prod_idx;
5145 /* Make sure updates to the rx_jmb_buffers[] entries and
5146 * the jumbo producer index are seen in the correct order.
5150 if (spr->rx_jmb_cons_idx == src_prod_idx)
5153 if (spr->rx_jmb_cons_idx < src_prod_idx)
5154 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5156 cpycnt = tp->rx_jmb_ring_mask + 1 -
5157 spr->rx_jmb_cons_idx;
5159 cpycnt = min(cpycnt,
5160 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5162 si = spr->rx_jmb_cons_idx;
5163 di = dpr->rx_jmb_prod_idx;
5165 for (i = di; i < di + cpycnt; i++) {
5166 if (dpr->rx_jmb_buffers[i].skb) {
5176 /* Ensure that updates to the rx_jmb_buffers ring and the
5177 * shadowed hardware producer ring from tg3_recycle_skb() are
5178 * ordered correctly WRT the skb check above.
5182 memcpy(&dpr->rx_jmb_buffers[di],
5183 &spr->rx_jmb_buffers[si],
5184 cpycnt * sizeof(struct ring_info));
5186 for (i = 0; i < cpycnt; i++, di++, si++) {
5187 struct tg3_rx_buffer_desc *sbd, *dbd;
5188 sbd = &spr->rx_jmb[si].std;
5189 dbd = &dpr->rx_jmb[di].std;
5190 dbd->addr_hi = sbd->addr_hi;
5191 dbd->addr_lo = sbd->addr_lo;
5194 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5195 tp->rx_jmb_ring_mask;
5196 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5197 tp->rx_jmb_ring_mask;
5203 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5205 struct tg3 *tp = tnapi->tp;
5207 /* run TX completion thread */
5208 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5210 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5214 /* run RX thread, within the bounds set by NAPI.
5215 * All RX "locking" is done by ensuring outside
5216 * code synchronizes with tg3->napi.poll()
5218 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5219 work_done += tg3_rx(tnapi, budget - work_done);
5221 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5222 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5224 u32 std_prod_idx = dpr->rx_std_prod_idx;
5225 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5227 for (i = 1; i < tp->irq_cnt; i++)
5228 err |= tg3_rx_prodring_xfer(tp, dpr,
5229 &tp->napi[i].prodring);
5233 if (std_prod_idx != dpr->rx_std_prod_idx)
5234 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5235 dpr->rx_std_prod_idx);
5237 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5238 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5239 dpr->rx_jmb_prod_idx);
5244 tw32_f(HOSTCC_MODE, tp->coal_now);
5250 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5252 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5253 struct tg3 *tp = tnapi->tp;
5255 struct tg3_hw_status *sblk = tnapi->hw_status;
5258 work_done = tg3_poll_work(tnapi, work_done, budget);
5260 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5263 if (unlikely(work_done >= budget))
5266 /* tp->last_tag is used in tg3_int_reenable() below
5267 * to tell the hw how much work has been processed,
5268 * so we must read it before checking for more work.
5270 tnapi->last_tag = sblk->status_tag;
5271 tnapi->last_irq_tag = tnapi->last_tag;
5274 /* check for RX/TX work to do */
5275 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5276 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5277 napi_complete(napi);
5278 /* Reenable interrupts. */
5279 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5288 /* work_done is guaranteed to be less than budget. */
5289 napi_complete(napi);
5290 schedule_work(&tp->reset_task);
5294 static void tg3_process_error(struct tg3 *tp)
5297 bool real_error = false;
5299 if (tg3_flag(tp, ERROR_PROCESSED))
5302 /* Check Flow Attention register */
5303 val = tr32(HOSTCC_FLOW_ATTN);
5304 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5305 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5309 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5310 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5314 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5315 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5324 tg3_flag_set(tp, ERROR_PROCESSED);
5325 schedule_work(&tp->reset_task);
5328 static int tg3_poll(struct napi_struct *napi, int budget)
5330 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5331 struct tg3 *tp = tnapi->tp;
5333 struct tg3_hw_status *sblk = tnapi->hw_status;
5336 if (sblk->status & SD_STATUS_ERROR)
5337 tg3_process_error(tp);
5341 work_done = tg3_poll_work(tnapi, work_done, budget);
5343 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5346 if (unlikely(work_done >= budget))
5349 if (tg3_flag(tp, TAGGED_STATUS)) {
5350 /* tp->last_tag is used in tg3_int_reenable() below
5351 * to tell the hw how much work has been processed,
5352 * so we must read it before checking for more work.
5354 tnapi->last_tag = sblk->status_tag;
5355 tnapi->last_irq_tag = tnapi->last_tag;
5358 sblk->status &= ~SD_STATUS_UPDATED;
5360 if (likely(!tg3_has_work(tnapi))) {
5361 napi_complete(napi);
5362 tg3_int_reenable(tnapi);
5370 /* work_done is guaranteed to be less than budget. */
5371 napi_complete(napi);
5372 schedule_work(&tp->reset_task);
5376 static void tg3_napi_disable(struct tg3 *tp)
5380 for (i = tp->irq_cnt - 1; i >= 0; i--)
5381 napi_disable(&tp->napi[i].napi);
5384 static void tg3_napi_enable(struct tg3 *tp)
5388 for (i = 0; i < tp->irq_cnt; i++)
5389 napi_enable(&tp->napi[i].napi);
5392 static void tg3_napi_init(struct tg3 *tp)
5396 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5397 for (i = 1; i < tp->irq_cnt; i++)
5398 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5401 static void tg3_napi_fini(struct tg3 *tp)
5405 for (i = 0; i < tp->irq_cnt; i++)
5406 netif_napi_del(&tp->napi[i].napi);
5409 static inline void tg3_netif_stop(struct tg3 *tp)
5411 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5412 tg3_napi_disable(tp);
5413 netif_tx_disable(tp->dev);
5416 static inline void tg3_netif_start(struct tg3 *tp)
5418 /* NOTE: unconditional netif_tx_wake_all_queues is only
5419 * appropriate so long as all callers are assured to
5420 * have free tx slots (such as after tg3_init_hw)
5422 netif_tx_wake_all_queues(tp->dev);
5424 tg3_napi_enable(tp);
5425 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5426 tg3_enable_ints(tp);
5429 static void tg3_irq_quiesce(struct tg3 *tp)
5433 BUG_ON(tp->irq_sync);
5438 for (i = 0; i < tp->irq_cnt; i++)
5439 synchronize_irq(tp->napi[i].irq_vec);
5442 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5443 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5444 * with as well. Most of the time, this is not necessary except when
5445 * shutting down the device.
5447 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5449 spin_lock_bh(&tp->lock);
5451 tg3_irq_quiesce(tp);
5454 static inline void tg3_full_unlock(struct tg3 *tp)
5456 spin_unlock_bh(&tp->lock);
5459 /* One-shot MSI handler - Chip automatically disables interrupt
5460 * after sending MSI so driver doesn't have to do it.
5462 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5464 struct tg3_napi *tnapi = dev_id;
5465 struct tg3 *tp = tnapi->tp;
5467 prefetch(tnapi->hw_status);
5469 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5471 if (likely(!tg3_irq_sync(tp)))
5472 napi_schedule(&tnapi->napi);
5477 /* MSI ISR - No need to check for interrupt sharing and no need to
5478 * flush status block and interrupt mailbox. PCI ordering rules
5479 * guarantee that MSI will arrive after the status block.
5481 static irqreturn_t tg3_msi(int irq, void *dev_id)
5483 struct tg3_napi *tnapi = dev_id;
5484 struct tg3 *tp = tnapi->tp;
5486 prefetch(tnapi->hw_status);
5488 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5490 * Writing any value to intr-mbox-0 clears PCI INTA# and
5491 * chip-internal interrupt pending events.
5492 * Writing non-zero to intr-mbox-0 additional tells the
5493 * NIC to stop sending us irqs, engaging "in-intr-handler"
5496 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5497 if (likely(!tg3_irq_sync(tp)))
5498 napi_schedule(&tnapi->napi);
5500 return IRQ_RETVAL(1);
5503 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5505 struct tg3_napi *tnapi = dev_id;
5506 struct tg3 *tp = tnapi->tp;
5507 struct tg3_hw_status *sblk = tnapi->hw_status;
5508 unsigned int handled = 1;
5510 /* In INTx mode, it is possible for the interrupt to arrive at
5511 * the CPU before the status block posted prior to the interrupt.
5512 * Reading the PCI State register will confirm whether the
5513 * interrupt is ours and will flush the status block.
5515 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5516 if (tg3_flag(tp, CHIP_RESETTING) ||
5517 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5524 * Writing any value to intr-mbox-0 clears PCI INTA# and
5525 * chip-internal interrupt pending events.
5526 * Writing non-zero to intr-mbox-0 additional tells the
5527 * NIC to stop sending us irqs, engaging "in-intr-handler"
5530 * Flush the mailbox to de-assert the IRQ immediately to prevent
5531 * spurious interrupts. The flush impacts performance but
5532 * excessive spurious interrupts can be worse in some cases.
5534 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5535 if (tg3_irq_sync(tp))
5537 sblk->status &= ~SD_STATUS_UPDATED;
5538 if (likely(tg3_has_work(tnapi))) {
5539 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5540 napi_schedule(&tnapi->napi);
5542 /* No work, shared interrupt perhaps? re-enable
5543 * interrupts, and flush that PCI write
5545 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5549 return IRQ_RETVAL(handled);
5552 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5554 struct tg3_napi *tnapi = dev_id;
5555 struct tg3 *tp = tnapi->tp;
5556 struct tg3_hw_status *sblk = tnapi->hw_status;
5557 unsigned int handled = 1;
5559 /* In INTx mode, it is possible for the interrupt to arrive at
5560 * the CPU before the status block posted prior to the interrupt.
5561 * Reading the PCI State register will confirm whether the
5562 * interrupt is ours and will flush the status block.
5564 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5565 if (tg3_flag(tp, CHIP_RESETTING) ||
5566 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5573 * writing any value to intr-mbox-0 clears PCI INTA# and
5574 * chip-internal interrupt pending events.
5575 * writing non-zero to intr-mbox-0 additional tells the
5576 * NIC to stop sending us irqs, engaging "in-intr-handler"
5579 * Flush the mailbox to de-assert the IRQ immediately to prevent
5580 * spurious interrupts. The flush impacts performance but
5581 * excessive spurious interrupts can be worse in some cases.
5583 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5586 * In a shared interrupt configuration, sometimes other devices'
5587 * interrupts will scream. We record the current status tag here
5588 * so that the above check can report that the screaming interrupts
5589 * are unhandled. Eventually they will be silenced.
5591 tnapi->last_irq_tag = sblk->status_tag;
5593 if (tg3_irq_sync(tp))
5596 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5598 napi_schedule(&tnapi->napi);
5601 return IRQ_RETVAL(handled);
5604 /* ISR for interrupt test */
5605 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5607 struct tg3_napi *tnapi = dev_id;
5608 struct tg3 *tp = tnapi->tp;
5609 struct tg3_hw_status *sblk = tnapi->hw_status;
5611 if ((sblk->status & SD_STATUS_UPDATED) ||
5612 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5613 tg3_disable_ints(tp);
5614 return IRQ_RETVAL(1);
5616 return IRQ_RETVAL(0);
5619 static int tg3_init_hw(struct tg3 *, int);
5620 static int tg3_halt(struct tg3 *, int, int);
5622 /* Restart hardware after configuration changes, self-test, etc.
5623 * Invoked with tp->lock held.
5625 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5626 __releases(tp->lock)
5627 __acquires(tp->lock)
5631 err = tg3_init_hw(tp, reset_phy);
5634 "Failed to re-initialize device, aborting\n");
5635 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5636 tg3_full_unlock(tp);
5637 del_timer_sync(&tp->timer);
5639 tg3_napi_enable(tp);
5641 tg3_full_lock(tp, 0);
5646 #ifdef CONFIG_NET_POLL_CONTROLLER
5647 static void tg3_poll_controller(struct net_device *dev)
5650 struct tg3 *tp = netdev_priv(dev);
5652 for (i = 0; i < tp->irq_cnt; i++)
5653 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5657 static void tg3_reset_task(struct work_struct *work)
5659 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5661 unsigned int restart_timer;
5663 tg3_full_lock(tp, 0);
5665 if (!netif_running(tp->dev)) {
5666 tg3_full_unlock(tp);
5670 tg3_full_unlock(tp);
5676 tg3_full_lock(tp, 1);
5678 restart_timer = tg3_flag(tp, RESTART_TIMER);
5679 tg3_flag_clear(tp, RESTART_TIMER);
5681 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5682 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5683 tp->write32_rx_mbox = tg3_write_flush_reg32;
5684 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5685 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5688 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5689 err = tg3_init_hw(tp, 1);
5693 tg3_netif_start(tp);
5696 mod_timer(&tp->timer, jiffies + 1);
5699 tg3_full_unlock(tp);
5705 static void tg3_tx_timeout(struct net_device *dev)
5707 struct tg3 *tp = netdev_priv(dev);
5709 if (netif_msg_tx_err(tp)) {
5710 netdev_err(dev, "transmit timed out, resetting\n");
5714 schedule_work(&tp->reset_task);
5717 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5718 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5720 u32 base = (u32) mapping & 0xffffffff;
5722 return (base > 0xffffdcc0) && (base + len + 8 < base);
5725 /* Test for DMA addresses > 40-bit */
5726 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5729 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5730 if (tg3_flag(tp, 40BIT_DMA_BUG))
5731 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5738 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5739 dma_addr_t mapping, int len, u32 flags,
5742 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5743 int is_end = (mss_and_is_end & 0x1);
5744 u32 mss = (mss_and_is_end >> 1);
5748 flags |= TXD_FLAG_END;
5749 if (flags & TXD_FLAG_VLAN) {
5750 vlan_tag = flags >> 16;
5753 vlan_tag |= (mss << TXD_MSS_SHIFT);
5755 txd->addr_hi = ((u64) mapping >> 32);
5756 txd->addr_lo = ((u64) mapping & 0xffffffff);
5757 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5758 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5761 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5762 struct sk_buff *skb, int last)
5765 u32 entry = tnapi->tx_prod;
5766 struct ring_info *txb = &tnapi->tx_buffers[entry];
5768 pci_unmap_single(tnapi->tp->pdev,
5769 dma_unmap_addr(txb, mapping),
5772 for (i = 0; i <= last; i++) {
5773 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5775 entry = NEXT_TX(entry);
5776 txb = &tnapi->tx_buffers[entry];
5778 pci_unmap_page(tnapi->tp->pdev,
5779 dma_unmap_addr(txb, mapping),
5780 frag->size, PCI_DMA_TODEVICE);
5784 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5785 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5786 struct sk_buff *skb,
5787 u32 base_flags, u32 mss)
5789 struct tg3 *tp = tnapi->tp;
5790 struct sk_buff *new_skb;
5791 dma_addr_t new_addr = 0;
5792 u32 entry = tnapi->tx_prod;
5795 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5796 new_skb = skb_copy(skb, GFP_ATOMIC);
5798 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5800 new_skb = skb_copy_expand(skb,
5801 skb_headroom(skb) + more_headroom,
5802 skb_tailroom(skb), GFP_ATOMIC);
5808 /* New SKB is guaranteed to be linear. */
5809 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5811 /* Make sure the mapping succeeded */
5812 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5814 dev_kfree_skb(new_skb);
5816 /* Make sure new skb does not cross any 4G boundaries.
5817 * Drop the packet if it does.
5819 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5820 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5821 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5824 dev_kfree_skb(new_skb);
5826 tnapi->tx_buffers[entry].skb = new_skb;
5827 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5830 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5831 base_flags, 1 | (mss << 1));
5840 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5842 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5843 * TSO header is greater than 80 bytes.
5845 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5847 struct sk_buff *segs, *nskb;
5848 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5850 /* Estimate the number of fragments in the worst case */
5851 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5852 netif_stop_queue(tp->dev);
5854 /* netif_tx_stop_queue() must be done before checking
5855 * checking tx index in tg3_tx_avail() below, because in
5856 * tg3_tx(), we update tx index before checking for
5857 * netif_tx_queue_stopped().
5860 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5861 return NETDEV_TX_BUSY;
5863 netif_wake_queue(tp->dev);
5866 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5868 goto tg3_tso_bug_end;
5874 tg3_start_xmit(nskb, tp->dev);
5880 return NETDEV_TX_OK;
5883 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5884 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5886 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5888 struct tg3 *tp = netdev_priv(dev);
5889 u32 len, entry, base_flags, mss;
5890 int i = -1, would_hit_hwbug;
5892 struct tg3_napi *tnapi;
5893 struct netdev_queue *txq;
5896 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5897 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5898 if (tg3_flag(tp, ENABLE_TSS))
5901 /* We are running in BH disabled context with netif_tx_lock
5902 * and TX reclaim runs via tp->napi.poll inside of a software
5903 * interrupt. Furthermore, IRQ processing runs lockless so we have
5904 * no IRQ context deadlocks to worry about either. Rejoice!
5906 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5907 if (!netif_tx_queue_stopped(txq)) {
5908 netif_tx_stop_queue(txq);
5910 /* This is a hard error, log it. */
5912 "BUG! Tx Ring full when queue awake!\n");
5914 return NETDEV_TX_BUSY;
5917 entry = tnapi->tx_prod;
5919 if (skb->ip_summed == CHECKSUM_PARTIAL)
5920 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5922 mss = skb_shinfo(skb)->gso_size;
5925 u32 tcp_opt_len, hdr_len;
5927 if (skb_header_cloned(skb) &&
5928 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5934 tcp_opt_len = tcp_optlen(skb);
5936 if (skb_is_gso_v6(skb)) {
5937 hdr_len = skb_headlen(skb) - ETH_HLEN;
5941 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5942 hdr_len = ip_tcp_len + tcp_opt_len;
5945 iph->tot_len = htons(mss + hdr_len);
5948 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5949 tg3_flag(tp, TSO_BUG))
5950 return tg3_tso_bug(tp, skb);
5952 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5953 TXD_FLAG_CPU_POST_DMA);
5955 if (tg3_flag(tp, HW_TSO_1) ||
5956 tg3_flag(tp, HW_TSO_2) ||
5957 tg3_flag(tp, HW_TSO_3)) {
5958 tcp_hdr(skb)->check = 0;
5959 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5961 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5966 if (tg3_flag(tp, HW_TSO_3)) {
5967 mss |= (hdr_len & 0xc) << 12;
5969 base_flags |= 0x00000010;
5970 base_flags |= (hdr_len & 0x3e0) << 5;
5971 } else if (tg3_flag(tp, HW_TSO_2))
5972 mss |= hdr_len << 9;
5973 else if (tg3_flag(tp, HW_TSO_1) ||
5974 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5975 if (tcp_opt_len || iph->ihl > 5) {
5978 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5979 mss |= (tsflags << 11);
5982 if (tcp_opt_len || iph->ihl > 5) {
5985 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5986 base_flags |= tsflags << 12;
5991 if (vlan_tx_tag_present(skb))
5992 base_flags |= (TXD_FLAG_VLAN |
5993 (vlan_tx_tag_get(skb) << 16));
5995 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
5996 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5997 base_flags |= TXD_FLAG_JMB_PKT;
5999 len = skb_headlen(skb);
6001 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6002 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6007 tnapi->tx_buffers[entry].skb = skb;
6008 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6010 would_hit_hwbug = 0;
6012 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6013 would_hit_hwbug = 1;
6015 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6016 tg3_4g_overflow_test(mapping, len))
6017 would_hit_hwbug = 1;
6019 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6020 tg3_40bit_overflow_test(tp, mapping, len))
6021 would_hit_hwbug = 1;
6023 if (tg3_flag(tp, 5701_DMA_BUG))
6024 would_hit_hwbug = 1;
6026 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6027 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6029 entry = NEXT_TX(entry);
6031 /* Now loop through additional data fragments, and queue them. */
6032 if (skb_shinfo(skb)->nr_frags > 0) {
6033 last = skb_shinfo(skb)->nr_frags - 1;
6034 for (i = 0; i <= last; i++) {
6035 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6038 mapping = pci_map_page(tp->pdev,
6041 len, PCI_DMA_TODEVICE);
6043 tnapi->tx_buffers[entry].skb = NULL;
6044 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6046 if (pci_dma_mapping_error(tp->pdev, mapping))
6049 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6051 would_hit_hwbug = 1;
6053 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6054 tg3_4g_overflow_test(mapping, len))
6055 would_hit_hwbug = 1;
6057 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6058 tg3_40bit_overflow_test(tp, mapping, len))
6059 would_hit_hwbug = 1;
6061 if (tg3_flag(tp, HW_TSO_1) ||
6062 tg3_flag(tp, HW_TSO_2) ||
6063 tg3_flag(tp, HW_TSO_3))
6064 tg3_set_txd(tnapi, entry, mapping, len,
6065 base_flags, (i == last)|(mss << 1));
6067 tg3_set_txd(tnapi, entry, mapping, len,
6068 base_flags, (i == last));
6070 entry = NEXT_TX(entry);
6074 if (would_hit_hwbug) {
6075 tg3_skb_error_unmap(tnapi, skb, i);
6077 /* If the workaround fails due to memory/mapping
6078 * failure, silently drop this packet.
6080 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6083 entry = NEXT_TX(tnapi->tx_prod);
6086 /* Packets are ready, update Tx producer idx local and on card. */
6087 tw32_tx_mbox(tnapi->prodmbox, entry);
6089 tnapi->tx_prod = entry;
6090 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6091 netif_tx_stop_queue(txq);
6093 /* netif_tx_stop_queue() must be done before checking
6094 * checking tx index in tg3_tx_avail() below, because in
6095 * tg3_tx(), we update tx index before checking for
6096 * netif_tx_queue_stopped().
6099 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6100 netif_tx_wake_queue(txq);
6106 return NETDEV_TX_OK;
6109 tg3_skb_error_unmap(tnapi, skb, i);
6111 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6112 return NETDEV_TX_OK;
6115 static void tg3_set_loopback(struct net_device *dev, u32 features)
6117 struct tg3 *tp = netdev_priv(dev);
6119 if (features & NETIF_F_LOOPBACK) {
6120 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6124 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6125 * loopback mode if Half-Duplex mode was negotiated earlier.
6127 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6129 /* Enable internal MAC loopback mode */
6130 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6131 spin_lock_bh(&tp->lock);
6132 tw32(MAC_MODE, tp->mac_mode);
6133 netif_carrier_on(tp->dev);
6134 spin_unlock_bh(&tp->lock);
6135 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6137 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6140 /* Disable internal MAC loopback mode */
6141 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6142 spin_lock_bh(&tp->lock);
6143 tw32(MAC_MODE, tp->mac_mode);
6144 /* Force link status check */
6145 tg3_setup_phy(tp, 1);
6146 spin_unlock_bh(&tp->lock);
6147 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6151 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6153 struct tg3 *tp = netdev_priv(dev);
6155 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6156 features &= ~NETIF_F_ALL_TSO;
6161 static int tg3_set_features(struct net_device *dev, u32 features)
6163 u32 changed = dev->features ^ features;
6165 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6166 tg3_set_loopback(dev, features);
6171 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6176 if (new_mtu > ETH_DATA_LEN) {
6177 if (tg3_flag(tp, 5780_CLASS)) {
6178 netdev_update_features(dev);
6179 tg3_flag_clear(tp, TSO_CAPABLE);
6181 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6184 if (tg3_flag(tp, 5780_CLASS)) {
6185 tg3_flag_set(tp, TSO_CAPABLE);
6186 netdev_update_features(dev);
6188 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6192 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6194 struct tg3 *tp = netdev_priv(dev);
6197 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6200 if (!netif_running(dev)) {
6201 /* We'll just catch it later when the
6204 tg3_set_mtu(dev, tp, new_mtu);
6212 tg3_full_lock(tp, 1);
6214 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6216 tg3_set_mtu(dev, tp, new_mtu);
6218 err = tg3_restart_hw(tp, 0);
6221 tg3_netif_start(tp);
6223 tg3_full_unlock(tp);
6231 static void tg3_rx_prodring_free(struct tg3 *tp,
6232 struct tg3_rx_prodring_set *tpr)
6236 if (tpr != &tp->napi[0].prodring) {
6237 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6238 i = (i + 1) & tp->rx_std_ring_mask)
6239 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6242 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6243 for (i = tpr->rx_jmb_cons_idx;
6244 i != tpr->rx_jmb_prod_idx;
6245 i = (i + 1) & tp->rx_jmb_ring_mask) {
6246 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6254 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6255 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6258 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6259 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6260 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6265 /* Initialize rx rings for packet processing.
6267 * The chip has been shut down and the driver detached from
6268 * the networking, so no interrupts or new tx packets will
6269 * end up in the driver. tp->{tx,}lock are held and thus
6272 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6273 struct tg3_rx_prodring_set *tpr)
6275 u32 i, rx_pkt_dma_sz;
6277 tpr->rx_std_cons_idx = 0;
6278 tpr->rx_std_prod_idx = 0;
6279 tpr->rx_jmb_cons_idx = 0;
6280 tpr->rx_jmb_prod_idx = 0;
6282 if (tpr != &tp->napi[0].prodring) {
6283 memset(&tpr->rx_std_buffers[0], 0,
6284 TG3_RX_STD_BUFF_RING_SIZE(tp));
6285 if (tpr->rx_jmb_buffers)
6286 memset(&tpr->rx_jmb_buffers[0], 0,
6287 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6291 /* Zero out all descriptors. */
6292 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6294 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6295 if (tg3_flag(tp, 5780_CLASS) &&
6296 tp->dev->mtu > ETH_DATA_LEN)
6297 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6298 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6300 /* Initialize invariants of the rings, we only set this
6301 * stuff once. This works because the card does not
6302 * write into the rx buffer posting rings.
6304 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6305 struct tg3_rx_buffer_desc *rxd;
6307 rxd = &tpr->rx_std[i];
6308 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6309 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6310 rxd->opaque = (RXD_OPAQUE_RING_STD |
6311 (i << RXD_OPAQUE_INDEX_SHIFT));
6314 /* Now allocate fresh SKBs for each rx ring. */
6315 for (i = 0; i < tp->rx_pending; i++) {
6316 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6317 netdev_warn(tp->dev,
6318 "Using a smaller RX standard ring. Only "
6319 "%d out of %d buffers were allocated "
6320 "successfully\n", i, tp->rx_pending);
6328 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6331 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6333 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6336 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6337 struct tg3_rx_buffer_desc *rxd;
6339 rxd = &tpr->rx_jmb[i].std;
6340 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6341 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6343 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6344 (i << RXD_OPAQUE_INDEX_SHIFT));
6347 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6348 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6349 netdev_warn(tp->dev,
6350 "Using a smaller RX jumbo ring. Only %d "
6351 "out of %d buffers were allocated "
6352 "successfully\n", i, tp->rx_jumbo_pending);
6355 tp->rx_jumbo_pending = i;
6364 tg3_rx_prodring_free(tp, tpr);
6368 static void tg3_rx_prodring_fini(struct tg3 *tp,
6369 struct tg3_rx_prodring_set *tpr)
6371 kfree(tpr->rx_std_buffers);
6372 tpr->rx_std_buffers = NULL;
6373 kfree(tpr->rx_jmb_buffers);
6374 tpr->rx_jmb_buffers = NULL;
6376 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6377 tpr->rx_std, tpr->rx_std_mapping);
6381 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6382 tpr->rx_jmb, tpr->rx_jmb_mapping);
6387 static int tg3_rx_prodring_init(struct tg3 *tp,
6388 struct tg3_rx_prodring_set *tpr)
6390 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6392 if (!tpr->rx_std_buffers)
6395 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6396 TG3_RX_STD_RING_BYTES(tp),
6397 &tpr->rx_std_mapping,
6402 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6403 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6405 if (!tpr->rx_jmb_buffers)
6408 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6409 TG3_RX_JMB_RING_BYTES(tp),
6410 &tpr->rx_jmb_mapping,
6419 tg3_rx_prodring_fini(tp, tpr);
6423 /* Free up pending packets in all rx/tx rings.
6425 * The chip has been shut down and the driver detached from
6426 * the networking, so no interrupts or new tx packets will
6427 * end up in the driver. tp->{tx,}lock is not held and we are not
6428 * in an interrupt context and thus may sleep.
6430 static void tg3_free_rings(struct tg3 *tp)
6434 for (j = 0; j < tp->irq_cnt; j++) {
6435 struct tg3_napi *tnapi = &tp->napi[j];
6437 tg3_rx_prodring_free(tp, &tnapi->prodring);
6439 if (!tnapi->tx_buffers)
6442 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6443 struct ring_info *txp;
6444 struct sk_buff *skb;
6447 txp = &tnapi->tx_buffers[i];
6455 pci_unmap_single(tp->pdev,
6456 dma_unmap_addr(txp, mapping),
6463 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6464 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6465 pci_unmap_page(tp->pdev,
6466 dma_unmap_addr(txp, mapping),
6467 skb_shinfo(skb)->frags[k].size,
6472 dev_kfree_skb_any(skb);
6477 /* Initialize tx/rx rings for packet processing.
6479 * The chip has been shut down and the driver detached from
6480 * the networking, so no interrupts or new tx packets will
6481 * end up in the driver. tp->{tx,}lock are held and thus
6484 static int tg3_init_rings(struct tg3 *tp)
6488 /* Free up all the SKBs. */
6491 for (i = 0; i < tp->irq_cnt; i++) {
6492 struct tg3_napi *tnapi = &tp->napi[i];
6494 tnapi->last_tag = 0;
6495 tnapi->last_irq_tag = 0;
6496 tnapi->hw_status->status = 0;
6497 tnapi->hw_status->status_tag = 0;
6498 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6503 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6505 tnapi->rx_rcb_ptr = 0;
6507 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6509 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6519 * Must not be invoked with interrupt sources disabled and
6520 * the hardware shutdown down.
6522 static void tg3_free_consistent(struct tg3 *tp)
6526 for (i = 0; i < tp->irq_cnt; i++) {
6527 struct tg3_napi *tnapi = &tp->napi[i];
6529 if (tnapi->tx_ring) {
6530 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6531 tnapi->tx_ring, tnapi->tx_desc_mapping);
6532 tnapi->tx_ring = NULL;
6535 kfree(tnapi->tx_buffers);
6536 tnapi->tx_buffers = NULL;
6538 if (tnapi->rx_rcb) {
6539 dma_free_coherent(&tp->pdev->dev,
6540 TG3_RX_RCB_RING_BYTES(tp),
6542 tnapi->rx_rcb_mapping);
6543 tnapi->rx_rcb = NULL;
6546 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6548 if (tnapi->hw_status) {
6549 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6551 tnapi->status_mapping);
6552 tnapi->hw_status = NULL;
6557 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6558 tp->hw_stats, tp->stats_mapping);
6559 tp->hw_stats = NULL;
6564 * Must not be invoked with interrupt sources disabled and
6565 * the hardware shutdown down. Can sleep.
6567 static int tg3_alloc_consistent(struct tg3 *tp)
6571 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6572 sizeof(struct tg3_hw_stats),
6578 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6580 for (i = 0; i < tp->irq_cnt; i++) {
6581 struct tg3_napi *tnapi = &tp->napi[i];
6582 struct tg3_hw_status *sblk;
6584 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6586 &tnapi->status_mapping,
6588 if (!tnapi->hw_status)
6591 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6592 sblk = tnapi->hw_status;
6594 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6597 /* If multivector TSS is enabled, vector 0 does not handle
6598 * tx interrupts. Don't allocate any resources for it.
6600 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6601 (i && tg3_flag(tp, ENABLE_TSS))) {
6602 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6605 if (!tnapi->tx_buffers)
6608 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6610 &tnapi->tx_desc_mapping,
6612 if (!tnapi->tx_ring)
6617 * When RSS is enabled, the status block format changes
6618 * slightly. The "rx_jumbo_consumer", "reserved",
6619 * and "rx_mini_consumer" members get mapped to the
6620 * other three rx return ring producer indexes.
6624 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6627 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6630 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6633 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6638 * If multivector RSS is enabled, vector 0 does not handle
6639 * rx or tx interrupts. Don't allocate any resources for it.
6641 if (!i && tg3_flag(tp, ENABLE_RSS))
6644 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6645 TG3_RX_RCB_RING_BYTES(tp),
6646 &tnapi->rx_rcb_mapping,
6651 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6657 tg3_free_consistent(tp);
6661 #define MAX_WAIT_CNT 1000
6663 /* To stop a block, clear the enable bit and poll till it
6664 * clears. tp->lock is held.
6666 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6671 if (tg3_flag(tp, 5705_PLUS)) {
6678 /* We can't enable/disable these bits of the
6679 * 5705/5750, just say success.
6692 for (i = 0; i < MAX_WAIT_CNT; i++) {
6695 if ((val & enable_bit) == 0)
6699 if (i == MAX_WAIT_CNT && !silent) {
6700 dev_err(&tp->pdev->dev,
6701 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6709 /* tp->lock is held. */
6710 static int tg3_abort_hw(struct tg3 *tp, int silent)
6714 tg3_disable_ints(tp);
6716 tp->rx_mode &= ~RX_MODE_ENABLE;
6717 tw32_f(MAC_RX_MODE, tp->rx_mode);
6720 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6721 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6722 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6723 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6724 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6725 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6727 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6728 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6729 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6730 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6731 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6732 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6733 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6735 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6736 tw32_f(MAC_MODE, tp->mac_mode);
6739 tp->tx_mode &= ~TX_MODE_ENABLE;
6740 tw32_f(MAC_TX_MODE, tp->tx_mode);
6742 for (i = 0; i < MAX_WAIT_CNT; i++) {
6744 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6747 if (i >= MAX_WAIT_CNT) {
6748 dev_err(&tp->pdev->dev,
6749 "%s timed out, TX_MODE_ENABLE will not clear "
6750 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6754 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6755 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6756 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6758 tw32(FTQ_RESET, 0xffffffff);
6759 tw32(FTQ_RESET, 0x00000000);
6761 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6762 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6764 for (i = 0; i < tp->irq_cnt; i++) {
6765 struct tg3_napi *tnapi = &tp->napi[i];
6766 if (tnapi->hw_status)
6767 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6770 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6775 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6780 /* NCSI does not support APE events */
6781 if (tg3_flag(tp, APE_HAS_NCSI))
6784 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6785 if (apedata != APE_SEG_SIG_MAGIC)
6788 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6789 if (!(apedata & APE_FW_STATUS_READY))
6792 /* Wait for up to 1 millisecond for APE to service previous event. */
6793 for (i = 0; i < 10; i++) {
6794 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6797 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6799 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6800 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6801 event | APE_EVENT_STATUS_EVENT_PENDING);
6803 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6805 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6811 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6812 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6815 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6820 if (!tg3_flag(tp, ENABLE_APE))
6824 case RESET_KIND_INIT:
6825 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6826 APE_HOST_SEG_SIG_MAGIC);
6827 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6828 APE_HOST_SEG_LEN_MAGIC);
6829 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6830 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6831 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6832 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6833 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6834 APE_HOST_BEHAV_NO_PHYLOCK);
6835 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6836 TG3_APE_HOST_DRVR_STATE_START);
6838 event = APE_EVENT_STATUS_STATE_START;
6840 case RESET_KIND_SHUTDOWN:
6841 /* With the interface we are currently using,
6842 * APE does not track driver state. Wiping
6843 * out the HOST SEGMENT SIGNATURE forces
6844 * the APE to assume OS absent status.
6846 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6848 if (device_may_wakeup(&tp->pdev->dev) &&
6849 tg3_flag(tp, WOL_ENABLE)) {
6850 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6851 TG3_APE_HOST_WOL_SPEED_AUTO);
6852 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6854 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6856 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6858 event = APE_EVENT_STATUS_STATE_UNLOAD;
6860 case RESET_KIND_SUSPEND:
6861 event = APE_EVENT_STATUS_STATE_SUSPEND;
6867 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6869 tg3_ape_send_event(tp, event);
6872 /* tp->lock is held. */
6873 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6875 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6876 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6878 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6880 case RESET_KIND_INIT:
6881 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6885 case RESET_KIND_SHUTDOWN:
6886 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6890 case RESET_KIND_SUSPEND:
6891 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6900 if (kind == RESET_KIND_INIT ||
6901 kind == RESET_KIND_SUSPEND)
6902 tg3_ape_driver_state_change(tp, kind);
6905 /* tp->lock is held. */
6906 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6908 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6910 case RESET_KIND_INIT:
6911 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6912 DRV_STATE_START_DONE);
6915 case RESET_KIND_SHUTDOWN:
6916 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6917 DRV_STATE_UNLOAD_DONE);
6925 if (kind == RESET_KIND_SHUTDOWN)
6926 tg3_ape_driver_state_change(tp, kind);
6929 /* tp->lock is held. */
6930 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6932 if (tg3_flag(tp, ENABLE_ASF)) {
6934 case RESET_KIND_INIT:
6935 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6939 case RESET_KIND_SHUTDOWN:
6940 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6944 case RESET_KIND_SUSPEND:
6945 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6955 static int tg3_poll_fw(struct tg3 *tp)
6960 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6961 /* Wait up to 20ms for init done. */
6962 for (i = 0; i < 200; i++) {
6963 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6970 /* Wait for firmware initialization to complete. */
6971 for (i = 0; i < 100000; i++) {
6972 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6973 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6978 /* Chip might not be fitted with firmware. Some Sun onboard
6979 * parts are configured like that. So don't signal the timeout
6980 * of the above loop as an error, but do report the lack of
6981 * running firmware once.
6983 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6984 tg3_flag_set(tp, NO_FWARE_REPORTED);
6986 netdev_info(tp->dev, "No firmware running\n");
6989 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6990 /* The 57765 A0 needs a little more
6991 * time to do some important work.
6999 /* Save PCI command register before chip reset */
7000 static void tg3_save_pci_state(struct tg3 *tp)
7002 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7005 /* Restore PCI state after chip reset */
7006 static void tg3_restore_pci_state(struct tg3 *tp)
7010 /* Re-enable indirect register accesses. */
7011 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7012 tp->misc_host_ctrl);
7014 /* Set MAX PCI retry to zero. */
7015 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7016 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7017 tg3_flag(tp, PCIX_MODE))
7018 val |= PCISTATE_RETRY_SAME_DMA;
7019 /* Allow reads and writes to the APE register and memory space. */
7020 if (tg3_flag(tp, ENABLE_APE))
7021 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7022 PCISTATE_ALLOW_APE_SHMEM_WR |
7023 PCISTATE_ALLOW_APE_PSPACE_WR;
7024 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7026 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7028 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7029 if (tg3_flag(tp, PCI_EXPRESS))
7030 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7032 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7033 tp->pci_cacheline_sz);
7034 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7039 /* Make sure PCI-X relaxed ordering bit is clear. */
7040 if (tg3_flag(tp, PCIX_MODE)) {
7043 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7045 pcix_cmd &= ~PCI_X_CMD_ERO;
7046 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7050 if (tg3_flag(tp, 5780_CLASS)) {
7052 /* Chip reset on 5780 will reset MSI enable bit,
7053 * so need to restore it.
7055 if (tg3_flag(tp, USING_MSI)) {
7058 pci_read_config_word(tp->pdev,
7059 tp->msi_cap + PCI_MSI_FLAGS,
7061 pci_write_config_word(tp->pdev,
7062 tp->msi_cap + PCI_MSI_FLAGS,
7063 ctrl | PCI_MSI_FLAGS_ENABLE);
7064 val = tr32(MSGINT_MODE);
7065 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7070 static void tg3_stop_fw(struct tg3 *);
7072 /* tp->lock is held. */
7073 static int tg3_chip_reset(struct tg3 *tp)
7076 void (*write_op)(struct tg3 *, u32, u32);
7081 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7083 /* No matching tg3_nvram_unlock() after this because
7084 * chip reset below will undo the nvram lock.
7086 tp->nvram_lock_cnt = 0;
7088 /* GRC_MISC_CFG core clock reset will clear the memory
7089 * enable bit in PCI register 4 and the MSI enable bit
7090 * on some chips, so we save relevant registers here.
7092 tg3_save_pci_state(tp);
7094 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7095 tg3_flag(tp, 5755_PLUS))
7096 tw32(GRC_FASTBOOT_PC, 0);
7099 * We must avoid the readl() that normally takes place.
7100 * It locks machines, causes machine checks, and other
7101 * fun things. So, temporarily disable the 5701
7102 * hardware workaround, while we do the reset.
7104 write_op = tp->write32;
7105 if (write_op == tg3_write_flush_reg32)
7106 tp->write32 = tg3_write32;
7108 /* Prevent the irq handler from reading or writing PCI registers
7109 * during chip reset when the memory enable bit in the PCI command
7110 * register may be cleared. The chip does not generate interrupt
7111 * at this time, but the irq handler may still be called due to irq
7112 * sharing or irqpoll.
7114 tg3_flag_set(tp, CHIP_RESETTING);
7115 for (i = 0; i < tp->irq_cnt; i++) {
7116 struct tg3_napi *tnapi = &tp->napi[i];
7117 if (tnapi->hw_status) {
7118 tnapi->hw_status->status = 0;
7119 tnapi->hw_status->status_tag = 0;
7121 tnapi->last_tag = 0;
7122 tnapi->last_irq_tag = 0;
7126 for (i = 0; i < tp->irq_cnt; i++)
7127 synchronize_irq(tp->napi[i].irq_vec);
7129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7130 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7131 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7135 val = GRC_MISC_CFG_CORECLK_RESET;
7137 if (tg3_flag(tp, PCI_EXPRESS)) {
7138 /* Force PCIe 1.0a mode */
7139 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7140 !tg3_flag(tp, 57765_PLUS) &&
7141 tr32(TG3_PCIE_PHY_TSTCTL) ==
7142 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7143 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7145 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7146 tw32(GRC_MISC_CFG, (1 << 29));
7151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7152 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7153 tw32(GRC_VCPU_EXT_CTRL,
7154 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7157 /* Manage gphy power for all CPMU absent PCIe devices. */
7158 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7159 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7161 tw32(GRC_MISC_CFG, val);
7163 /* restore 5701 hardware bug workaround write method */
7164 tp->write32 = write_op;
7166 /* Unfortunately, we have to delay before the PCI read back.
7167 * Some 575X chips even will not respond to a PCI cfg access
7168 * when the reset command is given to the chip.
7170 * How do these hardware designers expect things to work
7171 * properly if the PCI write is posted for a long period
7172 * of time? It is always necessary to have some method by
7173 * which a register read back can occur to push the write
7174 * out which does the reset.
7176 * For most tg3 variants the trick below was working.
7181 /* Flush PCI posted writes. The normal MMIO registers
7182 * are inaccessible at this time so this is the only
7183 * way to make this reliably (actually, this is no longer
7184 * the case, see above). I tried to use indirect
7185 * register read/write but this upset some 5701 variants.
7187 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7191 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7194 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7198 /* Wait for link training to complete. */
7199 for (i = 0; i < 5000; i++)
7202 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7203 pci_write_config_dword(tp->pdev, 0xc4,
7204 cfg_val | (1 << 15));
7207 /* Clear the "no snoop" and "relaxed ordering" bits. */
7208 pci_read_config_word(tp->pdev,
7209 tp->pcie_cap + PCI_EXP_DEVCTL,
7211 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7212 PCI_EXP_DEVCTL_NOSNOOP_EN);
7214 * Older PCIe devices only support the 128 byte
7215 * MPS setting. Enforce the restriction.
7217 if (!tg3_flag(tp, CPMU_PRESENT))
7218 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7219 pci_write_config_word(tp->pdev,
7220 tp->pcie_cap + PCI_EXP_DEVCTL,
7223 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7225 /* Clear error status */
7226 pci_write_config_word(tp->pdev,
7227 tp->pcie_cap + PCI_EXP_DEVSTA,
7228 PCI_EXP_DEVSTA_CED |
7229 PCI_EXP_DEVSTA_NFED |
7230 PCI_EXP_DEVSTA_FED |
7231 PCI_EXP_DEVSTA_URD);
7234 tg3_restore_pci_state(tp);
7236 tg3_flag_clear(tp, CHIP_RESETTING);
7237 tg3_flag_clear(tp, ERROR_PROCESSED);
7240 if (tg3_flag(tp, 5780_CLASS))
7241 val = tr32(MEMARB_MODE);
7242 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7244 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7246 tw32(0x5000, 0x400);
7249 tw32(GRC_MODE, tp->grc_mode);
7251 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7254 tw32(0xc4, val | (1 << 15));
7257 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7258 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7259 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7260 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7261 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7262 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7265 if (tg3_flag(tp, ENABLE_APE))
7266 tp->mac_mode = MAC_MODE_APE_TX_EN |
7267 MAC_MODE_APE_RX_EN |
7268 MAC_MODE_TDE_ENABLE;
7270 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7271 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7273 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7274 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7279 tw32_f(MAC_MODE, val);
7282 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7284 err = tg3_poll_fw(tp);
7290 if (tg3_flag(tp, PCI_EXPRESS) &&
7291 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7292 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7293 !tg3_flag(tp, 57765_PLUS)) {
7296 tw32(0x7c00, val | (1 << 25));
7299 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7300 val = tr32(TG3_CPMU_CLCK_ORIDE);
7301 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7304 /* Reprobe ASF enable state. */
7305 tg3_flag_clear(tp, ENABLE_ASF);
7306 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7307 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7308 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7311 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7312 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7313 tg3_flag_set(tp, ENABLE_ASF);
7314 tp->last_event_jiffies = jiffies;
7315 if (tg3_flag(tp, 5750_PLUS))
7316 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7323 /* tp->lock is held. */
7324 static void tg3_stop_fw(struct tg3 *tp)
7326 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7327 /* Wait for RX cpu to ACK the previous event. */
7328 tg3_wait_for_event_ack(tp);
7330 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7332 tg3_generate_fw_event(tp);
7334 /* Wait for RX cpu to ACK this event. */
7335 tg3_wait_for_event_ack(tp);
7339 /* tp->lock is held. */
7340 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7346 tg3_write_sig_pre_reset(tp, kind);
7348 tg3_abort_hw(tp, silent);
7349 err = tg3_chip_reset(tp);
7351 __tg3_set_mac_addr(tp, 0);
7353 tg3_write_sig_legacy(tp, kind);
7354 tg3_write_sig_post_reset(tp, kind);
7362 #define RX_CPU_SCRATCH_BASE 0x30000
7363 #define RX_CPU_SCRATCH_SIZE 0x04000
7364 #define TX_CPU_SCRATCH_BASE 0x34000
7365 #define TX_CPU_SCRATCH_SIZE 0x04000
7367 /* tp->lock is held. */
7368 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7372 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7374 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7375 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7377 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7380 if (offset == RX_CPU_BASE) {
7381 for (i = 0; i < 10000; i++) {
7382 tw32(offset + CPU_STATE, 0xffffffff);
7383 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7384 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7388 tw32(offset + CPU_STATE, 0xffffffff);
7389 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7392 for (i = 0; i < 10000; i++) {
7393 tw32(offset + CPU_STATE, 0xffffffff);
7394 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7395 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7401 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7402 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7406 /* Clear firmware's nvram arbitration. */
7407 if (tg3_flag(tp, NVRAM))
7408 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7413 unsigned int fw_base;
7414 unsigned int fw_len;
7415 const __be32 *fw_data;
7418 /* tp->lock is held. */
7419 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7420 int cpu_scratch_size, struct fw_info *info)
7422 int err, lock_err, i;
7423 void (*write_op)(struct tg3 *, u32, u32);
7425 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7427 "%s: Trying to load TX cpu firmware which is 5705\n",
7432 if (tg3_flag(tp, 5705_PLUS))
7433 write_op = tg3_write_mem;
7435 write_op = tg3_write_indirect_reg32;
7437 /* It is possible that bootcode is still loading at this point.
7438 * Get the nvram lock first before halting the cpu.
7440 lock_err = tg3_nvram_lock(tp);
7441 err = tg3_halt_cpu(tp, cpu_base);
7443 tg3_nvram_unlock(tp);
7447 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7448 write_op(tp, cpu_scratch_base + i, 0);
7449 tw32(cpu_base + CPU_STATE, 0xffffffff);
7450 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7451 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7452 write_op(tp, (cpu_scratch_base +
7453 (info->fw_base & 0xffff) +
7455 be32_to_cpu(info->fw_data[i]));
7463 /* tp->lock is held. */
7464 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7466 struct fw_info info;
7467 const __be32 *fw_data;
7470 fw_data = (void *)tp->fw->data;
7472 /* Firmware blob starts with version numbers, followed by
7473 start address and length. We are setting complete length.
7474 length = end_address_of_bss - start_address_of_text.
7475 Remainder is the blob to be loaded contiguously
7476 from start address. */
7478 info.fw_base = be32_to_cpu(fw_data[1]);
7479 info.fw_len = tp->fw->size - 12;
7480 info.fw_data = &fw_data[3];
7482 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7483 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7488 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7489 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7494 /* Now startup only the RX cpu. */
7495 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7496 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7498 for (i = 0; i < 5; i++) {
7499 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7501 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7502 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7503 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7507 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7508 "should be %08x\n", __func__,
7509 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7512 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7513 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7518 /* tp->lock is held. */
7519 static int tg3_load_tso_firmware(struct tg3 *tp)
7521 struct fw_info info;
7522 const __be32 *fw_data;
7523 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7526 if (tg3_flag(tp, HW_TSO_1) ||
7527 tg3_flag(tp, HW_TSO_2) ||
7528 tg3_flag(tp, HW_TSO_3))
7531 fw_data = (void *)tp->fw->data;
7533 /* Firmware blob starts with version numbers, followed by
7534 start address and length. We are setting complete length.
7535 length = end_address_of_bss - start_address_of_text.
7536 Remainder is the blob to be loaded contiguously
7537 from start address. */
7539 info.fw_base = be32_to_cpu(fw_data[1]);
7540 cpu_scratch_size = tp->fw_len;
7541 info.fw_len = tp->fw->size - 12;
7542 info.fw_data = &fw_data[3];
7544 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7545 cpu_base = RX_CPU_BASE;
7546 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7548 cpu_base = TX_CPU_BASE;
7549 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7550 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7553 err = tg3_load_firmware_cpu(tp, cpu_base,
7554 cpu_scratch_base, cpu_scratch_size,
7559 /* Now startup the cpu. */
7560 tw32(cpu_base + CPU_STATE, 0xffffffff);
7561 tw32_f(cpu_base + CPU_PC, info.fw_base);
7563 for (i = 0; i < 5; i++) {
7564 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7566 tw32(cpu_base + CPU_STATE, 0xffffffff);
7567 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7568 tw32_f(cpu_base + CPU_PC, info.fw_base);
7573 "%s fails to set CPU PC, is %08x should be %08x\n",
7574 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7577 tw32(cpu_base + CPU_STATE, 0xffffffff);
7578 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7583 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7585 struct tg3 *tp = netdev_priv(dev);
7586 struct sockaddr *addr = p;
7587 int err = 0, skip_mac_1 = 0;
7589 if (!is_valid_ether_addr(addr->sa_data))
7592 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7594 if (!netif_running(dev))
7597 if (tg3_flag(tp, ENABLE_ASF)) {
7598 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7600 addr0_high = tr32(MAC_ADDR_0_HIGH);
7601 addr0_low = tr32(MAC_ADDR_0_LOW);
7602 addr1_high = tr32(MAC_ADDR_1_HIGH);
7603 addr1_low = tr32(MAC_ADDR_1_LOW);
7605 /* Skip MAC addr 1 if ASF is using it. */
7606 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7607 !(addr1_high == 0 && addr1_low == 0))
7610 spin_lock_bh(&tp->lock);
7611 __tg3_set_mac_addr(tp, skip_mac_1);
7612 spin_unlock_bh(&tp->lock);
7617 /* tp->lock is held. */
7618 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7619 dma_addr_t mapping, u32 maxlen_flags,
7623 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7624 ((u64) mapping >> 32));
7626 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7627 ((u64) mapping & 0xffffffff));
7629 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7632 if (!tg3_flag(tp, 5705_PLUS))
7634 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7638 static void __tg3_set_rx_mode(struct net_device *);
7639 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7643 if (!tg3_flag(tp, ENABLE_TSS)) {
7644 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7645 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7646 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7648 tw32(HOSTCC_TXCOL_TICKS, 0);
7649 tw32(HOSTCC_TXMAX_FRAMES, 0);
7650 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7653 if (!tg3_flag(tp, ENABLE_RSS)) {
7654 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7655 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7656 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7658 tw32(HOSTCC_RXCOL_TICKS, 0);
7659 tw32(HOSTCC_RXMAX_FRAMES, 0);
7660 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7663 if (!tg3_flag(tp, 5705_PLUS)) {
7664 u32 val = ec->stats_block_coalesce_usecs;
7666 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7667 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7669 if (!netif_carrier_ok(tp->dev))
7672 tw32(HOSTCC_STAT_COAL_TICKS, val);
7675 for (i = 0; i < tp->irq_cnt - 1; i++) {
7678 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7679 tw32(reg, ec->rx_coalesce_usecs);
7680 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7681 tw32(reg, ec->rx_max_coalesced_frames);
7682 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7683 tw32(reg, ec->rx_max_coalesced_frames_irq);
7685 if (tg3_flag(tp, ENABLE_TSS)) {
7686 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7687 tw32(reg, ec->tx_coalesce_usecs);
7688 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7689 tw32(reg, ec->tx_max_coalesced_frames);
7690 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7691 tw32(reg, ec->tx_max_coalesced_frames_irq);
7695 for (; i < tp->irq_max - 1; i++) {
7696 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7697 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7698 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7700 if (tg3_flag(tp, ENABLE_TSS)) {
7701 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7702 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7703 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7708 /* tp->lock is held. */
7709 static void tg3_rings_reset(struct tg3 *tp)
7712 u32 stblk, txrcb, rxrcb, limit;
7713 struct tg3_napi *tnapi = &tp->napi[0];
7715 /* Disable all transmit rings but the first. */
7716 if (!tg3_flag(tp, 5705_PLUS))
7717 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7718 else if (tg3_flag(tp, 5717_PLUS))
7719 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7720 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7721 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7723 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7725 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7726 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7727 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7728 BDINFO_FLAGS_DISABLED);
7731 /* Disable all receive return rings but the first. */
7732 if (tg3_flag(tp, 5717_PLUS))
7733 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7734 else if (!tg3_flag(tp, 5705_PLUS))
7735 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7736 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7737 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7738 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7740 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7742 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7743 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7744 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7745 BDINFO_FLAGS_DISABLED);
7747 /* Disable interrupts */
7748 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7750 /* Zero mailbox registers. */
7751 if (tg3_flag(tp, SUPPORT_MSIX)) {
7752 for (i = 1; i < tp->irq_max; i++) {
7753 tp->napi[i].tx_prod = 0;
7754 tp->napi[i].tx_cons = 0;
7755 if (tg3_flag(tp, ENABLE_TSS))
7756 tw32_mailbox(tp->napi[i].prodmbox, 0);
7757 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7758 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7760 if (!tg3_flag(tp, ENABLE_TSS))
7761 tw32_mailbox(tp->napi[0].prodmbox, 0);
7763 tp->napi[0].tx_prod = 0;
7764 tp->napi[0].tx_cons = 0;
7765 tw32_mailbox(tp->napi[0].prodmbox, 0);
7766 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7769 /* Make sure the NIC-based send BD rings are disabled. */
7770 if (!tg3_flag(tp, 5705_PLUS)) {
7771 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7772 for (i = 0; i < 16; i++)
7773 tw32_tx_mbox(mbox + i * 8, 0);
7776 txrcb = NIC_SRAM_SEND_RCB;
7777 rxrcb = NIC_SRAM_RCV_RET_RCB;
7779 /* Clear status block in ram. */
7780 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7782 /* Set status block DMA address */
7783 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7784 ((u64) tnapi->status_mapping >> 32));
7785 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7786 ((u64) tnapi->status_mapping & 0xffffffff));
7788 if (tnapi->tx_ring) {
7789 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7790 (TG3_TX_RING_SIZE <<
7791 BDINFO_FLAGS_MAXLEN_SHIFT),
7792 NIC_SRAM_TX_BUFFER_DESC);
7793 txrcb += TG3_BDINFO_SIZE;
7796 if (tnapi->rx_rcb) {
7797 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7798 (tp->rx_ret_ring_mask + 1) <<
7799 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7800 rxrcb += TG3_BDINFO_SIZE;
7803 stblk = HOSTCC_STATBLCK_RING1;
7805 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7806 u64 mapping = (u64)tnapi->status_mapping;
7807 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7808 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7810 /* Clear status block in ram. */
7811 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7813 if (tnapi->tx_ring) {
7814 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7815 (TG3_TX_RING_SIZE <<
7816 BDINFO_FLAGS_MAXLEN_SHIFT),
7817 NIC_SRAM_TX_BUFFER_DESC);
7818 txrcb += TG3_BDINFO_SIZE;
7821 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7822 ((tp->rx_ret_ring_mask + 1) <<
7823 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7826 rxrcb += TG3_BDINFO_SIZE;
7830 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7832 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7834 if (!tg3_flag(tp, 5750_PLUS) ||
7835 tg3_flag(tp, 5780_CLASS) ||
7836 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7837 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7838 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7839 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7841 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7843 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7845 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7846 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7848 val = min(nic_rep_thresh, host_rep_thresh);
7849 tw32(RCVBDI_STD_THRESH, val);
7851 if (tg3_flag(tp, 57765_PLUS))
7852 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7854 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7857 if (!tg3_flag(tp, 5705_PLUS))
7858 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7860 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7862 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7864 val = min(bdcache_maxcnt / 2, host_rep_thresh);
7865 tw32(RCVBDI_JUMBO_THRESH, val);
7867 if (tg3_flag(tp, 57765_PLUS))
7868 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7871 /* tp->lock is held. */
7872 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7874 u32 val, rdmac_mode;
7876 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7878 tg3_disable_ints(tp);
7882 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7884 if (tg3_flag(tp, INIT_COMPLETE))
7885 tg3_abort_hw(tp, 1);
7887 /* Enable MAC control of LPI */
7888 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7889 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7890 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7891 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7893 tw32_f(TG3_CPMU_EEE_CTRL,
7894 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7896 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7897 TG3_CPMU_EEEMD_LPI_IN_TX |
7898 TG3_CPMU_EEEMD_LPI_IN_RX |
7899 TG3_CPMU_EEEMD_EEE_ENABLE;
7901 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7902 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7904 if (tg3_flag(tp, ENABLE_APE))
7905 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7907 tw32_f(TG3_CPMU_EEE_MODE, val);
7909 tw32_f(TG3_CPMU_EEE_DBTMR1,
7910 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7911 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7913 tw32_f(TG3_CPMU_EEE_DBTMR2,
7914 TG3_CPMU_DBTMR2_APE_TX_2047US |
7915 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7921 err = tg3_chip_reset(tp);
7925 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7927 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7928 val = tr32(TG3_CPMU_CTRL);
7929 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7930 tw32(TG3_CPMU_CTRL, val);
7932 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7933 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7934 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7935 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7937 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7938 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7939 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7940 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7942 val = tr32(TG3_CPMU_HST_ACC);
7943 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7944 val |= CPMU_HST_ACC_MACCLK_6_25;
7945 tw32(TG3_CPMU_HST_ACC, val);
7948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7949 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7950 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7951 PCIE_PWR_MGMT_L1_THRESH_4MS;
7952 tw32(PCIE_PWR_MGMT_THRESH, val);
7954 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7955 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7957 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7959 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7960 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7963 if (tg3_flag(tp, L1PLLPD_EN)) {
7964 u32 grc_mode = tr32(GRC_MODE);
7966 /* Access the lower 1K of PL PCIE block registers. */
7967 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7968 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7970 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7971 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7972 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7974 tw32(GRC_MODE, grc_mode);
7977 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7978 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7979 u32 grc_mode = tr32(GRC_MODE);
7981 /* Access the lower 1K of PL PCIE block registers. */
7982 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7983 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7985 val = tr32(TG3_PCIE_TLDLPL_PORT +
7986 TG3_PCIE_PL_LO_PHYCTL5);
7987 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7988 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7990 tw32(GRC_MODE, grc_mode);
7993 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
7994 u32 grc_mode = tr32(GRC_MODE);
7996 /* Access the lower 1K of DL PCIE block registers. */
7997 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7998 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8000 val = tr32(TG3_PCIE_TLDLPL_PORT +
8001 TG3_PCIE_DL_LO_FTSMAX);
8002 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8003 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8004 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8006 tw32(GRC_MODE, grc_mode);
8009 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8010 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8011 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8012 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8015 /* This works around an issue with Athlon chipsets on
8016 * B3 tigon3 silicon. This bit has no effect on any
8017 * other revision. But do not set this on PCI Express
8018 * chips and don't even touch the clocks if the CPMU is present.
8020 if (!tg3_flag(tp, CPMU_PRESENT)) {
8021 if (!tg3_flag(tp, PCI_EXPRESS))
8022 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8023 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8026 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8027 tg3_flag(tp, PCIX_MODE)) {
8028 val = tr32(TG3PCI_PCISTATE);
8029 val |= PCISTATE_RETRY_SAME_DMA;
8030 tw32(TG3PCI_PCISTATE, val);
8033 if (tg3_flag(tp, ENABLE_APE)) {
8034 /* Allow reads and writes to the
8035 * APE register and memory space.
8037 val = tr32(TG3PCI_PCISTATE);
8038 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8039 PCISTATE_ALLOW_APE_SHMEM_WR |
8040 PCISTATE_ALLOW_APE_PSPACE_WR;
8041 tw32(TG3PCI_PCISTATE, val);
8044 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8045 /* Enable some hw fixes. */
8046 val = tr32(TG3PCI_MSI_DATA);
8047 val |= (1 << 26) | (1 << 28) | (1 << 29);
8048 tw32(TG3PCI_MSI_DATA, val);
8051 /* Descriptor ring init may make accesses to the
8052 * NIC SRAM area to setup the TX descriptors, so we
8053 * can only do this after the hardware has been
8054 * successfully reset.
8056 err = tg3_init_rings(tp);
8060 if (tg3_flag(tp, 57765_PLUS)) {
8061 val = tr32(TG3PCI_DMA_RW_CTRL) &
8062 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8063 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8064 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8065 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8066 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8067 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8068 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8069 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8071 /* This value is determined during the probe time DMA
8072 * engine test, tg3_test_dma.
8074 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8077 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8078 GRC_MODE_4X_NIC_SEND_RINGS |
8079 GRC_MODE_NO_TX_PHDR_CSUM |
8080 GRC_MODE_NO_RX_PHDR_CSUM);
8081 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8083 /* Pseudo-header checksum is done by hardware logic and not
8084 * the offload processers, so make the chip do the pseudo-
8085 * header checksums on receive. For transmit it is more
8086 * convenient to do the pseudo-header checksum in software
8087 * as Linux does that on transmit for us in all cases.
8089 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8093 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8095 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8096 val = tr32(GRC_MISC_CFG);
8098 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8099 tw32(GRC_MISC_CFG, val);
8101 /* Initialize MBUF/DESC pool. */
8102 if (tg3_flag(tp, 5750_PLUS)) {
8104 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8105 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8107 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8109 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8110 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8111 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8112 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8115 fw_len = tp->fw_len;
8116 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8117 tw32(BUFMGR_MB_POOL_ADDR,
8118 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8119 tw32(BUFMGR_MB_POOL_SIZE,
8120 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8123 if (tp->dev->mtu <= ETH_DATA_LEN) {
8124 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8125 tp->bufmgr_config.mbuf_read_dma_low_water);
8126 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8127 tp->bufmgr_config.mbuf_mac_rx_low_water);
8128 tw32(BUFMGR_MB_HIGH_WATER,
8129 tp->bufmgr_config.mbuf_high_water);
8131 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8132 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8133 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8134 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8135 tw32(BUFMGR_MB_HIGH_WATER,
8136 tp->bufmgr_config.mbuf_high_water_jumbo);
8138 tw32(BUFMGR_DMA_LOW_WATER,
8139 tp->bufmgr_config.dma_low_water);
8140 tw32(BUFMGR_DMA_HIGH_WATER,
8141 tp->bufmgr_config.dma_high_water);
8143 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8144 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8145 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8147 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8148 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8149 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8150 tw32(BUFMGR_MODE, val);
8151 for (i = 0; i < 2000; i++) {
8152 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8157 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8161 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8162 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8164 tg3_setup_rxbd_thresholds(tp);
8166 /* Initialize TG3_BDINFO's at:
8167 * RCVDBDI_STD_BD: standard eth size rx ring
8168 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8169 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8172 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8173 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8174 * ring attribute flags
8175 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8177 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8178 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8180 * The size of each ring is fixed in the firmware, but the location is
8183 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8184 ((u64) tpr->rx_std_mapping >> 32));
8185 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8186 ((u64) tpr->rx_std_mapping & 0xffffffff));
8187 if (!tg3_flag(tp, 5717_PLUS))
8188 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8189 NIC_SRAM_RX_BUFFER_DESC);
8191 /* Disable the mini ring */
8192 if (!tg3_flag(tp, 5705_PLUS))
8193 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8194 BDINFO_FLAGS_DISABLED);
8196 /* Program the jumbo buffer descriptor ring control
8197 * blocks on those devices that have them.
8199 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8200 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8202 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8203 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8204 ((u64) tpr->rx_jmb_mapping >> 32));
8205 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8206 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8207 val = TG3_RX_JMB_RING_SIZE(tp) <<
8208 BDINFO_FLAGS_MAXLEN_SHIFT;
8209 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8210 val | BDINFO_FLAGS_USE_EXT_RECV);
8211 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8212 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8213 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8214 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8216 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8217 BDINFO_FLAGS_DISABLED);
8220 if (tg3_flag(tp, 57765_PLUS)) {
8221 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8222 val = TG3_RX_STD_MAX_SIZE_5700;
8224 val = TG3_RX_STD_MAX_SIZE_5717;
8225 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8226 val |= (TG3_RX_STD_DMA_SZ << 2);
8228 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8230 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8232 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8234 tpr->rx_std_prod_idx = tp->rx_pending;
8235 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8237 tpr->rx_jmb_prod_idx =
8238 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8239 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8241 tg3_rings_reset(tp);
8243 /* Initialize MAC address and backoff seed. */
8244 __tg3_set_mac_addr(tp, 0);
8246 /* MTU + ethernet header + FCS + optional VLAN tag */
8247 tw32(MAC_RX_MTU_SIZE,
8248 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8250 /* The slot time is changed by tg3_setup_phy if we
8251 * run at gigabit with half duplex.
8253 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8254 (6 << TX_LENGTHS_IPG_SHIFT) |
8255 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8257 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8258 val |= tr32(MAC_TX_LENGTHS) &
8259 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8260 TX_LENGTHS_CNT_DWN_VAL_MSK);
8262 tw32(MAC_TX_LENGTHS, val);
8264 /* Receive rules. */
8265 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8266 tw32(RCVLPC_CONFIG, 0x0181);
8268 /* Calculate RDMAC_MODE setting early, we need it to determine
8269 * the RCVLPC_STATE_ENABLE mask.
8271 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8272 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8273 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8274 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8275 RDMAC_MODE_LNGREAD_ENAB);
8277 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8278 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8280 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8281 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8282 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8283 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8284 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8285 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8287 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8288 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8289 if (tg3_flag(tp, TSO_CAPABLE) &&
8290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8291 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8292 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8293 !tg3_flag(tp, IS_5788)) {
8294 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8298 if (tg3_flag(tp, PCI_EXPRESS))
8299 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8301 if (tg3_flag(tp, HW_TSO_1) ||
8302 tg3_flag(tp, HW_TSO_2) ||
8303 tg3_flag(tp, HW_TSO_3))
8304 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8306 if (tg3_flag(tp, HW_TSO_3) ||
8307 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8308 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8309 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8312 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8314 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8315 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8316 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8317 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8318 tg3_flag(tp, 57765_PLUS)) {
8319 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8320 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8321 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8322 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8323 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8324 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8325 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8326 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8327 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8329 tw32(TG3_RDMA_RSRVCTRL_REG,
8330 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8333 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8334 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8335 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8336 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8337 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8338 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8341 /* Receive/send statistics. */
8342 if (tg3_flag(tp, 5750_PLUS)) {
8343 val = tr32(RCVLPC_STATS_ENABLE);
8344 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8345 tw32(RCVLPC_STATS_ENABLE, val);
8346 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8347 tg3_flag(tp, TSO_CAPABLE)) {
8348 val = tr32(RCVLPC_STATS_ENABLE);
8349 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8350 tw32(RCVLPC_STATS_ENABLE, val);
8352 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8354 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8355 tw32(SNDDATAI_STATSENAB, 0xffffff);
8356 tw32(SNDDATAI_STATSCTRL,
8357 (SNDDATAI_SCTRL_ENABLE |
8358 SNDDATAI_SCTRL_FASTUPD));
8360 /* Setup host coalescing engine. */
8361 tw32(HOSTCC_MODE, 0);
8362 for (i = 0; i < 2000; i++) {
8363 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8368 __tg3_set_coalesce(tp, &tp->coal);
8370 if (!tg3_flag(tp, 5705_PLUS)) {
8371 /* Status/statistics block address. See tg3_timer,
8372 * the tg3_periodic_fetch_stats call there, and
8373 * tg3_get_stats to see how this works for 5705/5750 chips.
8375 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8376 ((u64) tp->stats_mapping >> 32));
8377 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8378 ((u64) tp->stats_mapping & 0xffffffff));
8379 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8381 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8383 /* Clear statistics and status block memory areas */
8384 for (i = NIC_SRAM_STATS_BLK;
8385 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8387 tg3_write_mem(tp, i, 0);
8392 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8394 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8395 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8396 if (!tg3_flag(tp, 5705_PLUS))
8397 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8399 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8400 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8401 /* reset to prevent losing 1st rx packet intermittently */
8402 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8406 if (tg3_flag(tp, ENABLE_APE))
8407 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8410 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8411 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8412 if (!tg3_flag(tp, 5705_PLUS) &&
8413 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8414 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8415 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8416 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8419 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8420 * If TG3_FLAG_IS_NIC is zero, we should read the
8421 * register to preserve the GPIO settings for LOMs. The GPIOs,
8422 * whether used as inputs or outputs, are set by boot code after
8425 if (!tg3_flag(tp, IS_NIC)) {
8428 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8429 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8430 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8433 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8434 GRC_LCLCTRL_GPIO_OUTPUT3;
8436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8437 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8439 tp->grc_local_ctrl &= ~gpio_mask;
8440 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8442 /* GPIO1 must be driven high for eeprom write protect */
8443 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8444 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8445 GRC_LCLCTRL_GPIO_OUTPUT1);
8447 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8450 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8451 val = tr32(MSGINT_MODE);
8452 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8453 tw32(MSGINT_MODE, val);
8456 if (!tg3_flag(tp, 5705_PLUS)) {
8457 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8461 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8462 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8463 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8464 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8465 WDMAC_MODE_LNGREAD_ENAB);
8467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8468 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8469 if (tg3_flag(tp, TSO_CAPABLE) &&
8470 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8471 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8473 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8474 !tg3_flag(tp, IS_5788)) {
8475 val |= WDMAC_MODE_RX_ACCEL;
8479 /* Enable host coalescing bug fix */
8480 if (tg3_flag(tp, 5755_PLUS))
8481 val |= WDMAC_MODE_STATUS_TAG_FIX;
8483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8484 val |= WDMAC_MODE_BURST_ALL_DATA;
8486 tw32_f(WDMAC_MODE, val);
8489 if (tg3_flag(tp, PCIX_MODE)) {
8492 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8494 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8495 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8496 pcix_cmd |= PCI_X_CMD_READ_2K;
8497 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8498 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8499 pcix_cmd |= PCI_X_CMD_READ_2K;
8501 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8505 tw32_f(RDMAC_MODE, rdmac_mode);
8508 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8509 if (!tg3_flag(tp, 5705_PLUS))
8510 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8512 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8514 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8516 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8518 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8519 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8520 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8521 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8522 val |= RCVDBDI_MODE_LRG_RING_SZ;
8523 tw32(RCVDBDI_MODE, val);
8524 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8525 if (tg3_flag(tp, HW_TSO_1) ||
8526 tg3_flag(tp, HW_TSO_2) ||
8527 tg3_flag(tp, HW_TSO_3))
8528 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8529 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8530 if (tg3_flag(tp, ENABLE_TSS))
8531 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8532 tw32(SNDBDI_MODE, val);
8533 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8535 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8536 err = tg3_load_5701_a0_firmware_fix(tp);
8541 if (tg3_flag(tp, TSO_CAPABLE)) {
8542 err = tg3_load_tso_firmware(tp);
8547 tp->tx_mode = TX_MODE_ENABLE;
8549 if (tg3_flag(tp, 5755_PLUS) ||
8550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8551 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8553 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8554 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8555 tp->tx_mode &= ~val;
8556 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8559 tw32_f(MAC_TX_MODE, tp->tx_mode);
8562 if (tg3_flag(tp, ENABLE_RSS)) {
8563 u32 reg = MAC_RSS_INDIR_TBL_0;
8564 u8 *ent = (u8 *)&val;
8566 /* Setup the indirection table */
8567 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8568 int idx = i % sizeof(val);
8570 ent[idx] = i % (tp->irq_cnt - 1);
8571 if (idx == sizeof(val) - 1) {
8577 /* Setup the "secret" hash key. */
8578 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8579 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8580 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8581 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8582 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8583 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8584 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8585 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8586 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8587 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8590 tp->rx_mode = RX_MODE_ENABLE;
8591 if (tg3_flag(tp, 5755_PLUS))
8592 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8594 if (tg3_flag(tp, ENABLE_RSS))
8595 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8596 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8597 RX_MODE_RSS_IPV6_HASH_EN |
8598 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8599 RX_MODE_RSS_IPV4_HASH_EN |
8600 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8602 tw32_f(MAC_RX_MODE, tp->rx_mode);
8605 tw32(MAC_LED_CTRL, tp->led_ctrl);
8607 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8608 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8609 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8612 tw32_f(MAC_RX_MODE, tp->rx_mode);
8615 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8616 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8617 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8618 /* Set drive transmission level to 1.2V */
8619 /* only if the signal pre-emphasis bit is not set */
8620 val = tr32(MAC_SERDES_CFG);
8623 tw32(MAC_SERDES_CFG, val);
8625 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8626 tw32(MAC_SERDES_CFG, 0x616000);
8629 /* Prevent chip from dropping frames when flow control
8632 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8636 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8639 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8640 /* Use hardware link auto-negotiation */
8641 tg3_flag_set(tp, HW_AUTONEG);
8644 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8645 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8648 tmp = tr32(SERDES_RX_CTRL);
8649 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8650 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8651 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8652 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8655 if (!tg3_flag(tp, USE_PHYLIB)) {
8656 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8657 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8658 tp->link_config.speed = tp->link_config.orig_speed;
8659 tp->link_config.duplex = tp->link_config.orig_duplex;
8660 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8663 err = tg3_setup_phy(tp, 0);
8667 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8668 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8671 /* Clear CRC stats. */
8672 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8673 tg3_writephy(tp, MII_TG3_TEST1,
8674 tmp | MII_TG3_TEST1_CRC_EN);
8675 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8680 __tg3_set_rx_mode(tp->dev);
8682 /* Initialize receive rules. */
8683 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8684 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8685 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8686 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8688 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8692 if (tg3_flag(tp, ENABLE_ASF))
8696 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8698 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8700 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8702 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8704 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8706 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8708 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8710 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8712 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8714 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8716 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8718 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8720 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8722 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8730 if (tg3_flag(tp, ENABLE_APE))
8731 /* Write our heartbeat update interval to APE. */
8732 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8733 APE_HOST_HEARTBEAT_INT_DISABLE);
8735 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8740 /* Called at device open time to get the chip ready for
8741 * packet processing. Invoked with tp->lock held.
8743 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8745 tg3_switch_clocks(tp);
8747 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8749 return tg3_reset_hw(tp, reset_phy);
8752 #define TG3_STAT_ADD32(PSTAT, REG) \
8753 do { u32 __val = tr32(REG); \
8754 (PSTAT)->low += __val; \
8755 if ((PSTAT)->low < __val) \
8756 (PSTAT)->high += 1; \
8759 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8761 struct tg3_hw_stats *sp = tp->hw_stats;
8763 if (!netif_carrier_ok(tp->dev))
8766 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8767 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8768 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8769 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8770 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8771 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8772 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8773 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8774 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8775 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8776 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8777 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8778 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8780 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8781 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8782 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8783 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8784 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8785 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8786 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8787 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8788 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8789 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8790 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8791 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8792 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8793 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8795 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8796 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
8797 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8799 u32 val = tr32(HOSTCC_FLOW_ATTN);
8800 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8802 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8803 sp->rx_discards.low += val;
8804 if (sp->rx_discards.low < val)
8805 sp->rx_discards.high += 1;
8807 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8809 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8812 static void tg3_timer(unsigned long __opaque)
8814 struct tg3 *tp = (struct tg3 *) __opaque;
8819 spin_lock(&tp->lock);
8821 if (!tg3_flag(tp, TAGGED_STATUS)) {
8822 /* All of this garbage is because when using non-tagged
8823 * IRQ status the mailbox/status_block protocol the chip
8824 * uses with the cpu is race prone.
8826 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8827 tw32(GRC_LOCAL_CTRL,
8828 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8830 tw32(HOSTCC_MODE, tp->coalesce_mode |
8831 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8834 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8835 tg3_flag_set(tp, RESTART_TIMER);
8836 spin_unlock(&tp->lock);
8837 schedule_work(&tp->reset_task);
8842 /* This part only runs once per second. */
8843 if (!--tp->timer_counter) {
8844 if (tg3_flag(tp, 5705_PLUS))
8845 tg3_periodic_fetch_stats(tp);
8847 if (tp->setlpicnt && !--tp->setlpicnt) {
8848 u32 val = tr32(TG3_CPMU_EEE_MODE);
8849 tw32(TG3_CPMU_EEE_MODE,
8850 val | TG3_CPMU_EEEMD_LPI_ENABLE);
8853 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8857 mac_stat = tr32(MAC_STATUS);
8860 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8861 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8863 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8867 tg3_setup_phy(tp, 0);
8868 } else if (tg3_flag(tp, POLL_SERDES)) {
8869 u32 mac_stat = tr32(MAC_STATUS);
8872 if (netif_carrier_ok(tp->dev) &&
8873 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8876 if (!netif_carrier_ok(tp->dev) &&
8877 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8878 MAC_STATUS_SIGNAL_DET))) {
8882 if (!tp->serdes_counter) {
8885 ~MAC_MODE_PORT_MODE_MASK));
8887 tw32_f(MAC_MODE, tp->mac_mode);
8890 tg3_setup_phy(tp, 0);
8892 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8893 tg3_flag(tp, 5780_CLASS)) {
8894 tg3_serdes_parallel_detect(tp);
8897 tp->timer_counter = tp->timer_multiplier;
8900 /* Heartbeat is only sent once every 2 seconds.
8902 * The heartbeat is to tell the ASF firmware that the host
8903 * driver is still alive. In the event that the OS crashes,
8904 * ASF needs to reset the hardware to free up the FIFO space
8905 * that may be filled with rx packets destined for the host.
8906 * If the FIFO is full, ASF will no longer function properly.
8908 * Unintended resets have been reported on real time kernels
8909 * where the timer doesn't run on time. Netpoll will also have
8912 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8913 * to check the ring condition when the heartbeat is expiring
8914 * before doing the reset. This will prevent most unintended
8917 if (!--tp->asf_counter) {
8918 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8919 tg3_wait_for_event_ack(tp);
8921 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8922 FWCMD_NICDRV_ALIVE3);
8923 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8924 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8925 TG3_FW_UPDATE_TIMEOUT_SEC);
8927 tg3_generate_fw_event(tp);
8929 tp->asf_counter = tp->asf_multiplier;
8932 spin_unlock(&tp->lock);
8935 tp->timer.expires = jiffies + tp->timer_offset;
8936 add_timer(&tp->timer);
8939 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8942 unsigned long flags;
8944 struct tg3_napi *tnapi = &tp->napi[irq_num];
8946 if (tp->irq_cnt == 1)
8947 name = tp->dev->name;
8949 name = &tnapi->irq_lbl[0];
8950 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8951 name[IFNAMSIZ-1] = 0;
8954 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8956 if (tg3_flag(tp, 1SHOT_MSI))
8961 if (tg3_flag(tp, TAGGED_STATUS))
8962 fn = tg3_interrupt_tagged;
8963 flags = IRQF_SHARED;
8966 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8969 static int tg3_test_interrupt(struct tg3 *tp)
8971 struct tg3_napi *tnapi = &tp->napi[0];
8972 struct net_device *dev = tp->dev;
8973 int err, i, intr_ok = 0;
8976 if (!netif_running(dev))
8979 tg3_disable_ints(tp);
8981 free_irq(tnapi->irq_vec, tnapi);
8984 * Turn off MSI one shot mode. Otherwise this test has no
8985 * observable way to know whether the interrupt was delivered.
8987 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
8988 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8989 tw32(MSGINT_MODE, val);
8992 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8993 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8997 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8998 tg3_enable_ints(tp);
9000 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9003 for (i = 0; i < 5; i++) {
9004 u32 int_mbox, misc_host_ctrl;
9006 int_mbox = tr32_mailbox(tnapi->int_mbox);
9007 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9009 if ((int_mbox != 0) ||
9010 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9018 tg3_disable_ints(tp);
9020 free_irq(tnapi->irq_vec, tnapi);
9022 err = tg3_request_irq(tp, 0);
9028 /* Reenable MSI one shot mode. */
9029 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9030 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9031 tw32(MSGINT_MODE, val);
9039 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9040 * successfully restored
9042 static int tg3_test_msi(struct tg3 *tp)
9047 if (!tg3_flag(tp, USING_MSI))
9050 /* Turn off SERR reporting in case MSI terminates with Master
9053 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9054 pci_write_config_word(tp->pdev, PCI_COMMAND,
9055 pci_cmd & ~PCI_COMMAND_SERR);
9057 err = tg3_test_interrupt(tp);
9059 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9064 /* other failures */
9068 /* MSI test failed, go back to INTx mode */
9069 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9070 "to INTx mode. Please report this failure to the PCI "
9071 "maintainer and include system chipset information\n");
9073 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9075 pci_disable_msi(tp->pdev);
9077 tg3_flag_clear(tp, USING_MSI);
9078 tp->napi[0].irq_vec = tp->pdev->irq;
9080 err = tg3_request_irq(tp, 0);
9084 /* Need to reset the chip because the MSI cycle may have terminated
9085 * with Master Abort.
9087 tg3_full_lock(tp, 1);
9089 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9090 err = tg3_init_hw(tp, 1);
9092 tg3_full_unlock(tp);
9095 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9100 static int tg3_request_firmware(struct tg3 *tp)
9102 const __be32 *fw_data;
9104 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9105 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9110 fw_data = (void *)tp->fw->data;
9112 /* Firmware blob starts with version numbers, followed by
9113 * start address and _full_ length including BSS sections
9114 * (which must be longer than the actual data, of course
9117 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9118 if (tp->fw_len < (tp->fw->size - 12)) {
9119 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9120 tp->fw_len, tp->fw_needed);
9121 release_firmware(tp->fw);
9126 /* We no longer need firmware; we have it. */
9127 tp->fw_needed = NULL;
9131 static bool tg3_enable_msix(struct tg3 *tp)
9133 int i, rc, cpus = num_online_cpus();
9134 struct msix_entry msix_ent[tp->irq_max];
9137 /* Just fallback to the simpler MSI mode. */
9141 * We want as many rx rings enabled as there are cpus.
9142 * The first MSIX vector only deals with link interrupts, etc,
9143 * so we add one to the number of vectors we are requesting.
9145 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9147 for (i = 0; i < tp->irq_max; i++) {
9148 msix_ent[i].entry = i;
9149 msix_ent[i].vector = 0;
9152 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9155 } else if (rc != 0) {
9156 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9158 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9163 for (i = 0; i < tp->irq_max; i++)
9164 tp->napi[i].irq_vec = msix_ent[i].vector;
9166 netif_set_real_num_tx_queues(tp->dev, 1);
9167 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9168 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9169 pci_disable_msix(tp->pdev);
9173 if (tp->irq_cnt > 1) {
9174 tg3_flag_set(tp, ENABLE_RSS);
9176 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9177 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9178 tg3_flag_set(tp, ENABLE_TSS);
9179 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9186 static void tg3_ints_init(struct tg3 *tp)
9188 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9189 !tg3_flag(tp, TAGGED_STATUS)) {
9190 /* All MSI supporting chips should support tagged
9191 * status. Assert that this is the case.
9193 netdev_warn(tp->dev,
9194 "MSI without TAGGED_STATUS? Not using MSI\n");
9198 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9199 tg3_flag_set(tp, USING_MSIX);
9200 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9201 tg3_flag_set(tp, USING_MSI);
9203 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9204 u32 msi_mode = tr32(MSGINT_MODE);
9205 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9206 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9207 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9210 if (!tg3_flag(tp, USING_MSIX)) {
9212 tp->napi[0].irq_vec = tp->pdev->irq;
9213 netif_set_real_num_tx_queues(tp->dev, 1);
9214 netif_set_real_num_rx_queues(tp->dev, 1);
9218 static void tg3_ints_fini(struct tg3 *tp)
9220 if (tg3_flag(tp, USING_MSIX))
9221 pci_disable_msix(tp->pdev);
9222 else if (tg3_flag(tp, USING_MSI))
9223 pci_disable_msi(tp->pdev);
9224 tg3_flag_clear(tp, USING_MSI);
9225 tg3_flag_clear(tp, USING_MSIX);
9226 tg3_flag_clear(tp, ENABLE_RSS);
9227 tg3_flag_clear(tp, ENABLE_TSS);
9230 static int tg3_open(struct net_device *dev)
9232 struct tg3 *tp = netdev_priv(dev);
9235 if (tp->fw_needed) {
9236 err = tg3_request_firmware(tp);
9237 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9241 netdev_warn(tp->dev, "TSO capability disabled\n");
9242 tg3_flag_clear(tp, TSO_CAPABLE);
9243 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9244 netdev_notice(tp->dev, "TSO capability restored\n");
9245 tg3_flag_set(tp, TSO_CAPABLE);
9249 netif_carrier_off(tp->dev);
9251 err = tg3_power_up(tp);
9255 tg3_full_lock(tp, 0);
9257 tg3_disable_ints(tp);
9258 tg3_flag_clear(tp, INIT_COMPLETE);
9260 tg3_full_unlock(tp);
9263 * Setup interrupts first so we know how
9264 * many NAPI resources to allocate
9268 /* The placement of this call is tied
9269 * to the setup and use of Host TX descriptors.
9271 err = tg3_alloc_consistent(tp);
9277 tg3_napi_enable(tp);
9279 for (i = 0; i < tp->irq_cnt; i++) {
9280 struct tg3_napi *tnapi = &tp->napi[i];
9281 err = tg3_request_irq(tp, i);
9283 for (i--; i >= 0; i--)
9284 free_irq(tnapi->irq_vec, tnapi);
9292 tg3_full_lock(tp, 0);
9294 err = tg3_init_hw(tp, 1);
9296 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9299 if (tg3_flag(tp, TAGGED_STATUS))
9300 tp->timer_offset = HZ;
9302 tp->timer_offset = HZ / 10;
9304 BUG_ON(tp->timer_offset > HZ);
9305 tp->timer_counter = tp->timer_multiplier =
9306 (HZ / tp->timer_offset);
9307 tp->asf_counter = tp->asf_multiplier =
9308 ((HZ / tp->timer_offset) * 2);
9310 init_timer(&tp->timer);
9311 tp->timer.expires = jiffies + tp->timer_offset;
9312 tp->timer.data = (unsigned long) tp;
9313 tp->timer.function = tg3_timer;
9316 tg3_full_unlock(tp);
9321 if (tg3_flag(tp, USING_MSI)) {
9322 err = tg3_test_msi(tp);
9325 tg3_full_lock(tp, 0);
9326 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9328 tg3_full_unlock(tp);
9333 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9334 u32 val = tr32(PCIE_TRANSACTION_CFG);
9336 tw32(PCIE_TRANSACTION_CFG,
9337 val | PCIE_TRANS_CFG_1SHOT_MSI);
9343 tg3_full_lock(tp, 0);
9345 add_timer(&tp->timer);
9346 tg3_flag_set(tp, INIT_COMPLETE);
9347 tg3_enable_ints(tp);
9349 tg3_full_unlock(tp);
9351 netif_tx_start_all_queues(dev);
9354 * Reset loopback feature if it was turned on while the device was down
9355 * make sure that it's installed properly now.
9357 if (dev->features & NETIF_F_LOOPBACK)
9358 tg3_set_loopback(dev, dev->features);
9363 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9364 struct tg3_napi *tnapi = &tp->napi[i];
9365 free_irq(tnapi->irq_vec, tnapi);
9369 tg3_napi_disable(tp);
9371 tg3_free_consistent(tp);
9378 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9379 struct rtnl_link_stats64 *);
9380 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9382 static int tg3_close(struct net_device *dev)
9385 struct tg3 *tp = netdev_priv(dev);
9387 tg3_napi_disable(tp);
9388 cancel_work_sync(&tp->reset_task);
9390 netif_tx_stop_all_queues(dev);
9392 del_timer_sync(&tp->timer);
9396 tg3_full_lock(tp, 1);
9398 tg3_disable_ints(tp);
9400 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9402 tg3_flag_clear(tp, INIT_COMPLETE);
9404 tg3_full_unlock(tp);
9406 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9407 struct tg3_napi *tnapi = &tp->napi[i];
9408 free_irq(tnapi->irq_vec, tnapi);
9413 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9415 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9416 sizeof(tp->estats_prev));
9420 tg3_free_consistent(tp);
9424 netif_carrier_off(tp->dev);
9429 static inline u64 get_stat64(tg3_stat64_t *val)
9431 return ((u64)val->high << 32) | ((u64)val->low);
9434 static u64 calc_crc_errors(struct tg3 *tp)
9436 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9438 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9439 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9440 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9443 spin_lock_bh(&tp->lock);
9444 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9445 tg3_writephy(tp, MII_TG3_TEST1,
9446 val | MII_TG3_TEST1_CRC_EN);
9447 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9450 spin_unlock_bh(&tp->lock);
9452 tp->phy_crc_errors += val;
9454 return tp->phy_crc_errors;
9457 return get_stat64(&hw_stats->rx_fcs_errors);
9460 #define ESTAT_ADD(member) \
9461 estats->member = old_estats->member + \
9462 get_stat64(&hw_stats->member)
9464 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9466 struct tg3_ethtool_stats *estats = &tp->estats;
9467 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9468 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9473 ESTAT_ADD(rx_octets);
9474 ESTAT_ADD(rx_fragments);
9475 ESTAT_ADD(rx_ucast_packets);
9476 ESTAT_ADD(rx_mcast_packets);
9477 ESTAT_ADD(rx_bcast_packets);
9478 ESTAT_ADD(rx_fcs_errors);
9479 ESTAT_ADD(rx_align_errors);
9480 ESTAT_ADD(rx_xon_pause_rcvd);
9481 ESTAT_ADD(rx_xoff_pause_rcvd);
9482 ESTAT_ADD(rx_mac_ctrl_rcvd);
9483 ESTAT_ADD(rx_xoff_entered);
9484 ESTAT_ADD(rx_frame_too_long_errors);
9485 ESTAT_ADD(rx_jabbers);
9486 ESTAT_ADD(rx_undersize_packets);
9487 ESTAT_ADD(rx_in_length_errors);
9488 ESTAT_ADD(rx_out_length_errors);
9489 ESTAT_ADD(rx_64_or_less_octet_packets);
9490 ESTAT_ADD(rx_65_to_127_octet_packets);
9491 ESTAT_ADD(rx_128_to_255_octet_packets);
9492 ESTAT_ADD(rx_256_to_511_octet_packets);
9493 ESTAT_ADD(rx_512_to_1023_octet_packets);
9494 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9495 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9496 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9497 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9498 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9500 ESTAT_ADD(tx_octets);
9501 ESTAT_ADD(tx_collisions);
9502 ESTAT_ADD(tx_xon_sent);
9503 ESTAT_ADD(tx_xoff_sent);
9504 ESTAT_ADD(tx_flow_control);
9505 ESTAT_ADD(tx_mac_errors);
9506 ESTAT_ADD(tx_single_collisions);
9507 ESTAT_ADD(tx_mult_collisions);
9508 ESTAT_ADD(tx_deferred);
9509 ESTAT_ADD(tx_excessive_collisions);
9510 ESTAT_ADD(tx_late_collisions);
9511 ESTAT_ADD(tx_collide_2times);
9512 ESTAT_ADD(tx_collide_3times);
9513 ESTAT_ADD(tx_collide_4times);
9514 ESTAT_ADD(tx_collide_5times);
9515 ESTAT_ADD(tx_collide_6times);
9516 ESTAT_ADD(tx_collide_7times);
9517 ESTAT_ADD(tx_collide_8times);
9518 ESTAT_ADD(tx_collide_9times);
9519 ESTAT_ADD(tx_collide_10times);
9520 ESTAT_ADD(tx_collide_11times);
9521 ESTAT_ADD(tx_collide_12times);
9522 ESTAT_ADD(tx_collide_13times);
9523 ESTAT_ADD(tx_collide_14times);
9524 ESTAT_ADD(tx_collide_15times);
9525 ESTAT_ADD(tx_ucast_packets);
9526 ESTAT_ADD(tx_mcast_packets);
9527 ESTAT_ADD(tx_bcast_packets);
9528 ESTAT_ADD(tx_carrier_sense_errors);
9529 ESTAT_ADD(tx_discards);
9530 ESTAT_ADD(tx_errors);
9532 ESTAT_ADD(dma_writeq_full);
9533 ESTAT_ADD(dma_write_prioq_full);
9534 ESTAT_ADD(rxbds_empty);
9535 ESTAT_ADD(rx_discards);
9536 ESTAT_ADD(rx_errors);
9537 ESTAT_ADD(rx_threshold_hit);
9539 ESTAT_ADD(dma_readq_full);
9540 ESTAT_ADD(dma_read_prioq_full);
9541 ESTAT_ADD(tx_comp_queue_full);
9543 ESTAT_ADD(ring_set_send_prod_index);
9544 ESTAT_ADD(ring_status_update);
9545 ESTAT_ADD(nic_irqs);
9546 ESTAT_ADD(nic_avoided_irqs);
9547 ESTAT_ADD(nic_tx_threshold_hit);
9552 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9553 struct rtnl_link_stats64 *stats)
9555 struct tg3 *tp = netdev_priv(dev);
9556 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9557 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9562 stats->rx_packets = old_stats->rx_packets +
9563 get_stat64(&hw_stats->rx_ucast_packets) +
9564 get_stat64(&hw_stats->rx_mcast_packets) +
9565 get_stat64(&hw_stats->rx_bcast_packets);
9567 stats->tx_packets = old_stats->tx_packets +
9568 get_stat64(&hw_stats->tx_ucast_packets) +
9569 get_stat64(&hw_stats->tx_mcast_packets) +
9570 get_stat64(&hw_stats->tx_bcast_packets);
9572 stats->rx_bytes = old_stats->rx_bytes +
9573 get_stat64(&hw_stats->rx_octets);
9574 stats->tx_bytes = old_stats->tx_bytes +
9575 get_stat64(&hw_stats->tx_octets);
9577 stats->rx_errors = old_stats->rx_errors +
9578 get_stat64(&hw_stats->rx_errors);
9579 stats->tx_errors = old_stats->tx_errors +
9580 get_stat64(&hw_stats->tx_errors) +
9581 get_stat64(&hw_stats->tx_mac_errors) +
9582 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9583 get_stat64(&hw_stats->tx_discards);
9585 stats->multicast = old_stats->multicast +
9586 get_stat64(&hw_stats->rx_mcast_packets);
9587 stats->collisions = old_stats->collisions +
9588 get_stat64(&hw_stats->tx_collisions);
9590 stats->rx_length_errors = old_stats->rx_length_errors +
9591 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9592 get_stat64(&hw_stats->rx_undersize_packets);
9594 stats->rx_over_errors = old_stats->rx_over_errors +
9595 get_stat64(&hw_stats->rxbds_empty);
9596 stats->rx_frame_errors = old_stats->rx_frame_errors +
9597 get_stat64(&hw_stats->rx_align_errors);
9598 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9599 get_stat64(&hw_stats->tx_discards);
9600 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9601 get_stat64(&hw_stats->tx_carrier_sense_errors);
9603 stats->rx_crc_errors = old_stats->rx_crc_errors +
9604 calc_crc_errors(tp);
9606 stats->rx_missed_errors = old_stats->rx_missed_errors +
9607 get_stat64(&hw_stats->rx_discards);
9609 stats->rx_dropped = tp->rx_dropped;
9614 static inline u32 calc_crc(unsigned char *buf, int len)
9622 for (j = 0; j < len; j++) {
9625 for (k = 0; k < 8; k++) {
9638 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9640 /* accept or reject all multicast frames */
9641 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9642 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9643 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9644 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9647 static void __tg3_set_rx_mode(struct net_device *dev)
9649 struct tg3 *tp = netdev_priv(dev);
9652 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9653 RX_MODE_KEEP_VLAN_TAG);
9655 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9656 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9659 if (!tg3_flag(tp, ENABLE_ASF))
9660 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9663 if (dev->flags & IFF_PROMISC) {
9664 /* Promiscuous mode. */
9665 rx_mode |= RX_MODE_PROMISC;
9666 } else if (dev->flags & IFF_ALLMULTI) {
9667 /* Accept all multicast. */
9668 tg3_set_multi(tp, 1);
9669 } else if (netdev_mc_empty(dev)) {
9670 /* Reject all multicast. */
9671 tg3_set_multi(tp, 0);
9673 /* Accept one or more multicast(s). */
9674 struct netdev_hw_addr *ha;
9675 u32 mc_filter[4] = { 0, };
9680 netdev_for_each_mc_addr(ha, dev) {
9681 crc = calc_crc(ha->addr, ETH_ALEN);
9683 regidx = (bit & 0x60) >> 5;
9685 mc_filter[regidx] |= (1 << bit);
9688 tw32(MAC_HASH_REG_0, mc_filter[0]);
9689 tw32(MAC_HASH_REG_1, mc_filter[1]);
9690 tw32(MAC_HASH_REG_2, mc_filter[2]);
9691 tw32(MAC_HASH_REG_3, mc_filter[3]);
9694 if (rx_mode != tp->rx_mode) {
9695 tp->rx_mode = rx_mode;
9696 tw32_f(MAC_RX_MODE, rx_mode);
9701 static void tg3_set_rx_mode(struct net_device *dev)
9703 struct tg3 *tp = netdev_priv(dev);
9705 if (!netif_running(dev))
9708 tg3_full_lock(tp, 0);
9709 __tg3_set_rx_mode(dev);
9710 tg3_full_unlock(tp);
9713 static int tg3_get_regs_len(struct net_device *dev)
9715 return TG3_REG_BLK_SIZE;
9718 static void tg3_get_regs(struct net_device *dev,
9719 struct ethtool_regs *regs, void *_p)
9721 struct tg3 *tp = netdev_priv(dev);
9725 memset(_p, 0, TG3_REG_BLK_SIZE);
9727 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9730 tg3_full_lock(tp, 0);
9732 tg3_dump_legacy_regs(tp, (u32 *)_p);
9734 tg3_full_unlock(tp);
9737 static int tg3_get_eeprom_len(struct net_device *dev)
9739 struct tg3 *tp = netdev_priv(dev);
9741 return tp->nvram_size;
9744 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9746 struct tg3 *tp = netdev_priv(dev);
9749 u32 i, offset, len, b_offset, b_count;
9752 if (tg3_flag(tp, NO_NVRAM))
9755 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9758 offset = eeprom->offset;
9762 eeprom->magic = TG3_EEPROM_MAGIC;
9765 /* adjustments to start on required 4 byte boundary */
9766 b_offset = offset & 3;
9767 b_count = 4 - b_offset;
9768 if (b_count > len) {
9769 /* i.e. offset=1 len=2 */
9772 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9775 memcpy(data, ((char *)&val) + b_offset, b_count);
9778 eeprom->len += b_count;
9781 /* read bytes up to the last 4 byte boundary */
9782 pd = &data[eeprom->len];
9783 for (i = 0; i < (len - (len & 3)); i += 4) {
9784 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9789 memcpy(pd + i, &val, 4);
9794 /* read last bytes not ending on 4 byte boundary */
9795 pd = &data[eeprom->len];
9797 b_offset = offset + len - b_count;
9798 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9801 memcpy(pd, &val, b_count);
9802 eeprom->len += b_count;
9807 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9809 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9811 struct tg3 *tp = netdev_priv(dev);
9813 u32 offset, len, b_offset, odd_len;
9817 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9820 if (tg3_flag(tp, NO_NVRAM) ||
9821 eeprom->magic != TG3_EEPROM_MAGIC)
9824 offset = eeprom->offset;
9827 if ((b_offset = (offset & 3))) {
9828 /* adjustments to start on required 4 byte boundary */
9829 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9840 /* adjustments to end on required 4 byte boundary */
9842 len = (len + 3) & ~3;
9843 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9849 if (b_offset || odd_len) {
9850 buf = kmalloc(len, GFP_KERNEL);
9854 memcpy(buf, &start, 4);
9856 memcpy(buf+len-4, &end, 4);
9857 memcpy(buf + b_offset, data, eeprom->len);
9860 ret = tg3_nvram_write_block(tp, offset, len, buf);
9868 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9870 struct tg3 *tp = netdev_priv(dev);
9872 if (tg3_flag(tp, USE_PHYLIB)) {
9873 struct phy_device *phydev;
9874 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9876 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9877 return phy_ethtool_gset(phydev, cmd);
9880 cmd->supported = (SUPPORTED_Autoneg);
9882 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9883 cmd->supported |= (SUPPORTED_1000baseT_Half |
9884 SUPPORTED_1000baseT_Full);
9886 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9887 cmd->supported |= (SUPPORTED_100baseT_Half |
9888 SUPPORTED_100baseT_Full |
9889 SUPPORTED_10baseT_Half |
9890 SUPPORTED_10baseT_Full |
9892 cmd->port = PORT_TP;
9894 cmd->supported |= SUPPORTED_FIBRE;
9895 cmd->port = PORT_FIBRE;
9898 cmd->advertising = tp->link_config.advertising;
9899 if (netif_running(dev)) {
9900 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9901 cmd->duplex = tp->link_config.active_duplex;
9903 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9904 cmd->duplex = DUPLEX_INVALID;
9906 cmd->phy_address = tp->phy_addr;
9907 cmd->transceiver = XCVR_INTERNAL;
9908 cmd->autoneg = tp->link_config.autoneg;
9914 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9916 struct tg3 *tp = netdev_priv(dev);
9917 u32 speed = ethtool_cmd_speed(cmd);
9919 if (tg3_flag(tp, USE_PHYLIB)) {
9920 struct phy_device *phydev;
9921 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9923 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9924 return phy_ethtool_sset(phydev, cmd);
9927 if (cmd->autoneg != AUTONEG_ENABLE &&
9928 cmd->autoneg != AUTONEG_DISABLE)
9931 if (cmd->autoneg == AUTONEG_DISABLE &&
9932 cmd->duplex != DUPLEX_FULL &&
9933 cmd->duplex != DUPLEX_HALF)
9936 if (cmd->autoneg == AUTONEG_ENABLE) {
9937 u32 mask = ADVERTISED_Autoneg |
9939 ADVERTISED_Asym_Pause;
9941 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9942 mask |= ADVERTISED_1000baseT_Half |
9943 ADVERTISED_1000baseT_Full;
9945 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9946 mask |= ADVERTISED_100baseT_Half |
9947 ADVERTISED_100baseT_Full |
9948 ADVERTISED_10baseT_Half |
9949 ADVERTISED_10baseT_Full |
9952 mask |= ADVERTISED_FIBRE;
9954 if (cmd->advertising & ~mask)
9957 mask &= (ADVERTISED_1000baseT_Half |
9958 ADVERTISED_1000baseT_Full |
9959 ADVERTISED_100baseT_Half |
9960 ADVERTISED_100baseT_Full |
9961 ADVERTISED_10baseT_Half |
9962 ADVERTISED_10baseT_Full);
9964 cmd->advertising &= mask;
9966 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9967 if (speed != SPEED_1000)
9970 if (cmd->duplex != DUPLEX_FULL)
9973 if (speed != SPEED_100 &&
9979 tg3_full_lock(tp, 0);
9981 tp->link_config.autoneg = cmd->autoneg;
9982 if (cmd->autoneg == AUTONEG_ENABLE) {
9983 tp->link_config.advertising = (cmd->advertising |
9984 ADVERTISED_Autoneg);
9985 tp->link_config.speed = SPEED_INVALID;
9986 tp->link_config.duplex = DUPLEX_INVALID;
9988 tp->link_config.advertising = 0;
9989 tp->link_config.speed = speed;
9990 tp->link_config.duplex = cmd->duplex;
9993 tp->link_config.orig_speed = tp->link_config.speed;
9994 tp->link_config.orig_duplex = tp->link_config.duplex;
9995 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9997 if (netif_running(dev))
9998 tg3_setup_phy(tp, 1);
10000 tg3_full_unlock(tp);
10005 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10007 struct tg3 *tp = netdev_priv(dev);
10009 strcpy(info->driver, DRV_MODULE_NAME);
10010 strcpy(info->version, DRV_MODULE_VERSION);
10011 strcpy(info->fw_version, tp->fw_ver);
10012 strcpy(info->bus_info, pci_name(tp->pdev));
10015 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10017 struct tg3 *tp = netdev_priv(dev);
10019 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10020 wol->supported = WAKE_MAGIC;
10022 wol->supported = 0;
10024 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10025 wol->wolopts = WAKE_MAGIC;
10026 memset(&wol->sopass, 0, sizeof(wol->sopass));
10029 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10031 struct tg3 *tp = netdev_priv(dev);
10032 struct device *dp = &tp->pdev->dev;
10034 if (wol->wolopts & ~WAKE_MAGIC)
10036 if ((wol->wolopts & WAKE_MAGIC) &&
10037 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10040 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10042 spin_lock_bh(&tp->lock);
10043 if (device_may_wakeup(dp))
10044 tg3_flag_set(tp, WOL_ENABLE);
10046 tg3_flag_clear(tp, WOL_ENABLE);
10047 spin_unlock_bh(&tp->lock);
10052 static u32 tg3_get_msglevel(struct net_device *dev)
10054 struct tg3 *tp = netdev_priv(dev);
10055 return tp->msg_enable;
10058 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10060 struct tg3 *tp = netdev_priv(dev);
10061 tp->msg_enable = value;
10064 static int tg3_nway_reset(struct net_device *dev)
10066 struct tg3 *tp = netdev_priv(dev);
10069 if (!netif_running(dev))
10072 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10075 if (tg3_flag(tp, USE_PHYLIB)) {
10076 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10078 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10082 spin_lock_bh(&tp->lock);
10084 tg3_readphy(tp, MII_BMCR, &bmcr);
10085 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10086 ((bmcr & BMCR_ANENABLE) ||
10087 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10088 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10092 spin_unlock_bh(&tp->lock);
10098 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10100 struct tg3 *tp = netdev_priv(dev);
10102 ering->rx_max_pending = tp->rx_std_ring_mask;
10103 ering->rx_mini_max_pending = 0;
10104 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10105 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10107 ering->rx_jumbo_max_pending = 0;
10109 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10111 ering->rx_pending = tp->rx_pending;
10112 ering->rx_mini_pending = 0;
10113 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10114 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10116 ering->rx_jumbo_pending = 0;
10118 ering->tx_pending = tp->napi[0].tx_pending;
10121 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10123 struct tg3 *tp = netdev_priv(dev);
10124 int i, irq_sync = 0, err = 0;
10126 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10127 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10128 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10129 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10130 (tg3_flag(tp, TSO_BUG) &&
10131 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10134 if (netif_running(dev)) {
10136 tg3_netif_stop(tp);
10140 tg3_full_lock(tp, irq_sync);
10142 tp->rx_pending = ering->rx_pending;
10144 if (tg3_flag(tp, MAX_RXPEND_64) &&
10145 tp->rx_pending > 63)
10146 tp->rx_pending = 63;
10147 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10149 for (i = 0; i < tp->irq_max; i++)
10150 tp->napi[i].tx_pending = ering->tx_pending;
10152 if (netif_running(dev)) {
10153 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10154 err = tg3_restart_hw(tp, 1);
10156 tg3_netif_start(tp);
10159 tg3_full_unlock(tp);
10161 if (irq_sync && !err)
10167 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10169 struct tg3 *tp = netdev_priv(dev);
10171 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10173 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10174 epause->rx_pause = 1;
10176 epause->rx_pause = 0;
10178 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10179 epause->tx_pause = 1;
10181 epause->tx_pause = 0;
10184 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10186 struct tg3 *tp = netdev_priv(dev);
10189 if (tg3_flag(tp, USE_PHYLIB)) {
10191 struct phy_device *phydev;
10193 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10195 if (!(phydev->supported & SUPPORTED_Pause) ||
10196 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10197 (epause->rx_pause != epause->tx_pause)))
10200 tp->link_config.flowctrl = 0;
10201 if (epause->rx_pause) {
10202 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10204 if (epause->tx_pause) {
10205 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10206 newadv = ADVERTISED_Pause;
10208 newadv = ADVERTISED_Pause |
10209 ADVERTISED_Asym_Pause;
10210 } else if (epause->tx_pause) {
10211 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10212 newadv = ADVERTISED_Asym_Pause;
10216 if (epause->autoneg)
10217 tg3_flag_set(tp, PAUSE_AUTONEG);
10219 tg3_flag_clear(tp, PAUSE_AUTONEG);
10221 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10222 u32 oldadv = phydev->advertising &
10223 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10224 if (oldadv != newadv) {
10225 phydev->advertising &=
10226 ~(ADVERTISED_Pause |
10227 ADVERTISED_Asym_Pause);
10228 phydev->advertising |= newadv;
10229 if (phydev->autoneg) {
10231 * Always renegotiate the link to
10232 * inform our link partner of our
10233 * flow control settings, even if the
10234 * flow control is forced. Let
10235 * tg3_adjust_link() do the final
10236 * flow control setup.
10238 return phy_start_aneg(phydev);
10242 if (!epause->autoneg)
10243 tg3_setup_flow_control(tp, 0, 0);
10245 tp->link_config.orig_advertising &=
10246 ~(ADVERTISED_Pause |
10247 ADVERTISED_Asym_Pause);
10248 tp->link_config.orig_advertising |= newadv;
10253 if (netif_running(dev)) {
10254 tg3_netif_stop(tp);
10258 tg3_full_lock(tp, irq_sync);
10260 if (epause->autoneg)
10261 tg3_flag_set(tp, PAUSE_AUTONEG);
10263 tg3_flag_clear(tp, PAUSE_AUTONEG);
10264 if (epause->rx_pause)
10265 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10267 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10268 if (epause->tx_pause)
10269 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10271 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10273 if (netif_running(dev)) {
10274 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10275 err = tg3_restart_hw(tp, 1);
10277 tg3_netif_start(tp);
10280 tg3_full_unlock(tp);
10286 static int tg3_get_sset_count(struct net_device *dev, int sset)
10290 return TG3_NUM_TEST;
10292 return TG3_NUM_STATS;
10294 return -EOPNOTSUPP;
10298 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10300 switch (stringset) {
10302 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10305 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10308 WARN_ON(1); /* we need a WARN() */
10313 static int tg3_set_phys_id(struct net_device *dev,
10314 enum ethtool_phys_id_state state)
10316 struct tg3 *tp = netdev_priv(dev);
10318 if (!netif_running(tp->dev))
10322 case ETHTOOL_ID_ACTIVE:
10323 return 1; /* cycle on/off once per second */
10325 case ETHTOOL_ID_ON:
10326 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10327 LED_CTRL_1000MBPS_ON |
10328 LED_CTRL_100MBPS_ON |
10329 LED_CTRL_10MBPS_ON |
10330 LED_CTRL_TRAFFIC_OVERRIDE |
10331 LED_CTRL_TRAFFIC_BLINK |
10332 LED_CTRL_TRAFFIC_LED);
10335 case ETHTOOL_ID_OFF:
10336 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10337 LED_CTRL_TRAFFIC_OVERRIDE);
10340 case ETHTOOL_ID_INACTIVE:
10341 tw32(MAC_LED_CTRL, tp->led_ctrl);
10348 static void tg3_get_ethtool_stats(struct net_device *dev,
10349 struct ethtool_stats *estats, u64 *tmp_stats)
10351 struct tg3 *tp = netdev_priv(dev);
10352 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10355 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10359 u32 offset = 0, len = 0;
10362 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10365 if (magic == TG3_EEPROM_MAGIC) {
10366 for (offset = TG3_NVM_DIR_START;
10367 offset < TG3_NVM_DIR_END;
10368 offset += TG3_NVM_DIRENT_SIZE) {
10369 if (tg3_nvram_read(tp, offset, &val))
10372 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10373 TG3_NVM_DIRTYPE_EXTVPD)
10377 if (offset != TG3_NVM_DIR_END) {
10378 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10379 if (tg3_nvram_read(tp, offset + 4, &offset))
10382 offset = tg3_nvram_logical_addr(tp, offset);
10386 if (!offset || !len) {
10387 offset = TG3_NVM_VPD_OFF;
10388 len = TG3_NVM_VPD_LEN;
10391 buf = kmalloc(len, GFP_KERNEL);
10395 if (magic == TG3_EEPROM_MAGIC) {
10396 for (i = 0; i < len; i += 4) {
10397 /* The data is in little-endian format in NVRAM.
10398 * Use the big-endian read routines to preserve
10399 * the byte order as it exists in NVRAM.
10401 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10407 unsigned int pos = 0;
10409 ptr = (u8 *)&buf[0];
10410 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10411 cnt = pci_read_vpd(tp->pdev, pos,
10413 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10429 #define NVRAM_TEST_SIZE 0x100
10430 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10431 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10432 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10433 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10434 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10436 static int tg3_test_nvram(struct tg3 *tp)
10440 int i, j, k, err = 0, size;
10442 if (tg3_flag(tp, NO_NVRAM))
10445 if (tg3_nvram_read(tp, 0, &magic) != 0)
10448 if (magic == TG3_EEPROM_MAGIC)
10449 size = NVRAM_TEST_SIZE;
10450 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10451 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10452 TG3_EEPROM_SB_FORMAT_1) {
10453 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10454 case TG3_EEPROM_SB_REVISION_0:
10455 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10457 case TG3_EEPROM_SB_REVISION_2:
10458 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10460 case TG3_EEPROM_SB_REVISION_3:
10461 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10468 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10469 size = NVRAM_SELFBOOT_HW_SIZE;
10473 buf = kmalloc(size, GFP_KERNEL);
10478 for (i = 0, j = 0; i < size; i += 4, j++) {
10479 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10486 /* Selfboot format */
10487 magic = be32_to_cpu(buf[0]);
10488 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10489 TG3_EEPROM_MAGIC_FW) {
10490 u8 *buf8 = (u8 *) buf, csum8 = 0;
10492 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10493 TG3_EEPROM_SB_REVISION_2) {
10494 /* For rev 2, the csum doesn't include the MBA. */
10495 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10497 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10500 for (i = 0; i < size; i++)
10513 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10514 TG3_EEPROM_MAGIC_HW) {
10515 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10516 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10517 u8 *buf8 = (u8 *) buf;
10519 /* Separate the parity bits and the data bytes. */
10520 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10521 if ((i == 0) || (i == 8)) {
10525 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10526 parity[k++] = buf8[i] & msk;
10528 } else if (i == 16) {
10532 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10533 parity[k++] = buf8[i] & msk;
10536 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10537 parity[k++] = buf8[i] & msk;
10540 data[j++] = buf8[i];
10544 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10545 u8 hw8 = hweight8(data[i]);
10547 if ((hw8 & 0x1) && parity[i])
10549 else if (!(hw8 & 0x1) && !parity[i])
10558 /* Bootstrap checksum at offset 0x10 */
10559 csum = calc_crc((unsigned char *) buf, 0x10);
10560 if (csum != le32_to_cpu(buf[0x10/4]))
10563 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10564 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10565 if (csum != le32_to_cpu(buf[0xfc/4]))
10570 buf = tg3_vpd_readblock(tp);
10574 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10575 PCI_VPD_LRDT_RO_DATA);
10577 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10581 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10584 i += PCI_VPD_LRDT_TAG_SIZE;
10585 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10586 PCI_VPD_RO_KEYWORD_CHKSUM);
10590 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10592 for (i = 0; i <= j; i++)
10593 csum8 += ((u8 *)buf)[i];
10607 #define TG3_SERDES_TIMEOUT_SEC 2
10608 #define TG3_COPPER_TIMEOUT_SEC 6
10610 static int tg3_test_link(struct tg3 *tp)
10614 if (!netif_running(tp->dev))
10617 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10618 max = TG3_SERDES_TIMEOUT_SEC;
10620 max = TG3_COPPER_TIMEOUT_SEC;
10622 for (i = 0; i < max; i++) {
10623 if (netif_carrier_ok(tp->dev))
10626 if (msleep_interruptible(1000))
10633 /* Only test the commonly used registers */
10634 static int tg3_test_registers(struct tg3 *tp)
10636 int i, is_5705, is_5750;
10637 u32 offset, read_mask, write_mask, val, save_val, read_val;
10641 #define TG3_FL_5705 0x1
10642 #define TG3_FL_NOT_5705 0x2
10643 #define TG3_FL_NOT_5788 0x4
10644 #define TG3_FL_NOT_5750 0x8
10648 /* MAC Control Registers */
10649 { MAC_MODE, TG3_FL_NOT_5705,
10650 0x00000000, 0x00ef6f8c },
10651 { MAC_MODE, TG3_FL_5705,
10652 0x00000000, 0x01ef6b8c },
10653 { MAC_STATUS, TG3_FL_NOT_5705,
10654 0x03800107, 0x00000000 },
10655 { MAC_STATUS, TG3_FL_5705,
10656 0x03800100, 0x00000000 },
10657 { MAC_ADDR_0_HIGH, 0x0000,
10658 0x00000000, 0x0000ffff },
10659 { MAC_ADDR_0_LOW, 0x0000,
10660 0x00000000, 0xffffffff },
10661 { MAC_RX_MTU_SIZE, 0x0000,
10662 0x00000000, 0x0000ffff },
10663 { MAC_TX_MODE, 0x0000,
10664 0x00000000, 0x00000070 },
10665 { MAC_TX_LENGTHS, 0x0000,
10666 0x00000000, 0x00003fff },
10667 { MAC_RX_MODE, TG3_FL_NOT_5705,
10668 0x00000000, 0x000007fc },
10669 { MAC_RX_MODE, TG3_FL_5705,
10670 0x00000000, 0x000007dc },
10671 { MAC_HASH_REG_0, 0x0000,
10672 0x00000000, 0xffffffff },
10673 { MAC_HASH_REG_1, 0x0000,
10674 0x00000000, 0xffffffff },
10675 { MAC_HASH_REG_2, 0x0000,
10676 0x00000000, 0xffffffff },
10677 { MAC_HASH_REG_3, 0x0000,
10678 0x00000000, 0xffffffff },
10680 /* Receive Data and Receive BD Initiator Control Registers. */
10681 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10682 0x00000000, 0xffffffff },
10683 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10684 0x00000000, 0xffffffff },
10685 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10686 0x00000000, 0x00000003 },
10687 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10688 0x00000000, 0xffffffff },
10689 { RCVDBDI_STD_BD+0, 0x0000,
10690 0x00000000, 0xffffffff },
10691 { RCVDBDI_STD_BD+4, 0x0000,
10692 0x00000000, 0xffffffff },
10693 { RCVDBDI_STD_BD+8, 0x0000,
10694 0x00000000, 0xffff0002 },
10695 { RCVDBDI_STD_BD+0xc, 0x0000,
10696 0x00000000, 0xffffffff },
10698 /* Receive BD Initiator Control Registers. */
10699 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10700 0x00000000, 0xffffffff },
10701 { RCVBDI_STD_THRESH, TG3_FL_5705,
10702 0x00000000, 0x000003ff },
10703 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10704 0x00000000, 0xffffffff },
10706 /* Host Coalescing Control Registers. */
10707 { HOSTCC_MODE, TG3_FL_NOT_5705,
10708 0x00000000, 0x00000004 },
10709 { HOSTCC_MODE, TG3_FL_5705,
10710 0x00000000, 0x000000f6 },
10711 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10712 0x00000000, 0xffffffff },
10713 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10714 0x00000000, 0x000003ff },
10715 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10716 0x00000000, 0xffffffff },
10717 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10718 0x00000000, 0x000003ff },
10719 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10720 0x00000000, 0xffffffff },
10721 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10722 0x00000000, 0x000000ff },
10723 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10724 0x00000000, 0xffffffff },
10725 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10726 0x00000000, 0x000000ff },
10727 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10728 0x00000000, 0xffffffff },
10729 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10730 0x00000000, 0xffffffff },
10731 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10732 0x00000000, 0xffffffff },
10733 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10734 0x00000000, 0x000000ff },
10735 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10736 0x00000000, 0xffffffff },
10737 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10738 0x00000000, 0x000000ff },
10739 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10740 0x00000000, 0xffffffff },
10741 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10742 0x00000000, 0xffffffff },
10743 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10744 0x00000000, 0xffffffff },
10745 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10746 0x00000000, 0xffffffff },
10747 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10748 0x00000000, 0xffffffff },
10749 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10750 0xffffffff, 0x00000000 },
10751 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10752 0xffffffff, 0x00000000 },
10754 /* Buffer Manager Control Registers. */
10755 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10756 0x00000000, 0x007fff80 },
10757 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10758 0x00000000, 0x007fffff },
10759 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10760 0x00000000, 0x0000003f },
10761 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10762 0x00000000, 0x000001ff },
10763 { BUFMGR_MB_HIGH_WATER, 0x0000,
10764 0x00000000, 0x000001ff },
10765 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10766 0xffffffff, 0x00000000 },
10767 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10768 0xffffffff, 0x00000000 },
10770 /* Mailbox Registers */
10771 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10772 0x00000000, 0x000001ff },
10773 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10774 0x00000000, 0x000001ff },
10775 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10776 0x00000000, 0x000007ff },
10777 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10778 0x00000000, 0x000001ff },
10780 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10783 is_5705 = is_5750 = 0;
10784 if (tg3_flag(tp, 5705_PLUS)) {
10786 if (tg3_flag(tp, 5750_PLUS))
10790 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10791 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10794 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10797 if (tg3_flag(tp, IS_5788) &&
10798 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10801 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10804 offset = (u32) reg_tbl[i].offset;
10805 read_mask = reg_tbl[i].read_mask;
10806 write_mask = reg_tbl[i].write_mask;
10808 /* Save the original register content */
10809 save_val = tr32(offset);
10811 /* Determine the read-only value. */
10812 read_val = save_val & read_mask;
10814 /* Write zero to the register, then make sure the read-only bits
10815 * are not changed and the read/write bits are all zeros.
10819 val = tr32(offset);
10821 /* Test the read-only and read/write bits. */
10822 if (((val & read_mask) != read_val) || (val & write_mask))
10825 /* Write ones to all the bits defined by RdMask and WrMask, then
10826 * make sure the read-only bits are not changed and the
10827 * read/write bits are all ones.
10829 tw32(offset, read_mask | write_mask);
10831 val = tr32(offset);
10833 /* Test the read-only bits. */
10834 if ((val & read_mask) != read_val)
10837 /* Test the read/write bits. */
10838 if ((val & write_mask) != write_mask)
10841 tw32(offset, save_val);
10847 if (netif_msg_hw(tp))
10848 netdev_err(tp->dev,
10849 "Register test failed at offset %x\n", offset);
10850 tw32(offset, save_val);
10854 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10856 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10860 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10861 for (j = 0; j < len; j += 4) {
10864 tg3_write_mem(tp, offset + j, test_pattern[i]);
10865 tg3_read_mem(tp, offset + j, &val);
10866 if (val != test_pattern[i])
10873 static int tg3_test_memory(struct tg3 *tp)
10875 static struct mem_entry {
10878 } mem_tbl_570x[] = {
10879 { 0x00000000, 0x00b50},
10880 { 0x00002000, 0x1c000},
10881 { 0xffffffff, 0x00000}
10882 }, mem_tbl_5705[] = {
10883 { 0x00000100, 0x0000c},
10884 { 0x00000200, 0x00008},
10885 { 0x00004000, 0x00800},
10886 { 0x00006000, 0x01000},
10887 { 0x00008000, 0x02000},
10888 { 0x00010000, 0x0e000},
10889 { 0xffffffff, 0x00000}
10890 }, mem_tbl_5755[] = {
10891 { 0x00000200, 0x00008},
10892 { 0x00004000, 0x00800},
10893 { 0x00006000, 0x00800},
10894 { 0x00008000, 0x02000},
10895 { 0x00010000, 0x0c000},
10896 { 0xffffffff, 0x00000}
10897 }, mem_tbl_5906[] = {
10898 { 0x00000200, 0x00008},
10899 { 0x00004000, 0x00400},
10900 { 0x00006000, 0x00400},
10901 { 0x00008000, 0x01000},
10902 { 0x00010000, 0x01000},
10903 { 0xffffffff, 0x00000}
10904 }, mem_tbl_5717[] = {
10905 { 0x00000200, 0x00008},
10906 { 0x00010000, 0x0a000},
10907 { 0x00020000, 0x13c00},
10908 { 0xffffffff, 0x00000}
10909 }, mem_tbl_57765[] = {
10910 { 0x00000200, 0x00008},
10911 { 0x00004000, 0x00800},
10912 { 0x00006000, 0x09800},
10913 { 0x00010000, 0x0a000},
10914 { 0xffffffff, 0x00000}
10916 struct mem_entry *mem_tbl;
10920 if (tg3_flag(tp, 5717_PLUS))
10921 mem_tbl = mem_tbl_5717;
10922 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10923 mem_tbl = mem_tbl_57765;
10924 else if (tg3_flag(tp, 5755_PLUS))
10925 mem_tbl = mem_tbl_5755;
10926 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10927 mem_tbl = mem_tbl_5906;
10928 else if (tg3_flag(tp, 5705_PLUS))
10929 mem_tbl = mem_tbl_5705;
10931 mem_tbl = mem_tbl_570x;
10933 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10934 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10942 #define TG3_MAC_LOOPBACK 0
10943 #define TG3_PHY_LOOPBACK 1
10944 #define TG3_TSO_LOOPBACK 2
10946 #define TG3_TSO_MSS 500
10948 #define TG3_TSO_IP_HDR_LEN 20
10949 #define TG3_TSO_TCP_HDR_LEN 20
10950 #define TG3_TSO_TCP_OPT_LEN 12
10952 static const u8 tg3_tso_header[] = {
10954 0x45, 0x00, 0x00, 0x00,
10955 0x00, 0x00, 0x40, 0x00,
10956 0x40, 0x06, 0x00, 0x00,
10957 0x0a, 0x00, 0x00, 0x01,
10958 0x0a, 0x00, 0x00, 0x02,
10959 0x0d, 0x00, 0xe0, 0x00,
10960 0x00, 0x00, 0x01, 0x00,
10961 0x00, 0x00, 0x02, 0x00,
10962 0x80, 0x10, 0x10, 0x00,
10963 0x14, 0x09, 0x00, 0x00,
10964 0x01, 0x01, 0x08, 0x0a,
10965 0x11, 0x11, 0x11, 0x11,
10966 0x11, 0x11, 0x11, 0x11,
10969 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10971 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10972 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10973 struct sk_buff *skb, *rx_skb;
10976 int num_pkts, tx_len, rx_len, i, err;
10977 struct tg3_rx_buffer_desc *desc;
10978 struct tg3_napi *tnapi, *rnapi;
10979 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10981 tnapi = &tp->napi[0];
10982 rnapi = &tp->napi[0];
10983 if (tp->irq_cnt > 1) {
10984 if (tg3_flag(tp, ENABLE_RSS))
10985 rnapi = &tp->napi[1];
10986 if (tg3_flag(tp, ENABLE_TSS))
10987 tnapi = &tp->napi[1];
10989 coal_now = tnapi->coal_now | rnapi->coal_now;
10991 if (loopback_mode == TG3_MAC_LOOPBACK) {
10992 /* HW errata - mac loopback fails in some cases on 5780.
10993 * Normal traffic and PHY loopback are not affected by
10994 * errata. Also, the MAC loopback test is deprecated for
10995 * all newer ASIC revisions.
10997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10998 tg3_flag(tp, CPMU_PRESENT))
11001 mac_mode = tp->mac_mode &
11002 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11003 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11004 if (!tg3_flag(tp, 5705_PLUS))
11005 mac_mode |= MAC_MODE_LINK_POLARITY;
11006 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11007 mac_mode |= MAC_MODE_PORT_MODE_MII;
11009 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11010 tw32(MAC_MODE, mac_mode);
11012 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11013 tg3_phy_fet_toggle_apd(tp, false);
11014 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11016 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11018 tg3_phy_toggle_automdix(tp, 0);
11020 tg3_writephy(tp, MII_BMCR, val);
11023 mac_mode = tp->mac_mode &
11024 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11025 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11026 tg3_writephy(tp, MII_TG3_FET_PTEST,
11027 MII_TG3_FET_PTEST_FRC_TX_LINK |
11028 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11029 /* The write needs to be flushed for the AC131 */
11030 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11031 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11032 mac_mode |= MAC_MODE_PORT_MODE_MII;
11034 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11036 /* reset to prevent losing 1st rx packet intermittently */
11037 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11038 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11040 tw32_f(MAC_RX_MODE, tp->rx_mode);
11042 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11043 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11044 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11045 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11046 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11047 mac_mode |= MAC_MODE_LINK_POLARITY;
11048 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11049 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11051 tw32(MAC_MODE, mac_mode);
11053 /* Wait for link */
11054 for (i = 0; i < 100; i++) {
11055 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11064 skb = netdev_alloc_skb(tp->dev, tx_len);
11068 tx_data = skb_put(skb, tx_len);
11069 memcpy(tx_data, tp->dev->dev_addr, 6);
11070 memset(tx_data + 6, 0x0, 8);
11072 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11074 if (loopback_mode == TG3_TSO_LOOPBACK) {
11075 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11077 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11078 TG3_TSO_TCP_OPT_LEN;
11080 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11081 sizeof(tg3_tso_header));
11084 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11085 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11087 /* Set the total length field in the IP header */
11088 iph->tot_len = htons((u16)(mss + hdr_len));
11090 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11091 TXD_FLAG_CPU_POST_DMA);
11093 if (tg3_flag(tp, HW_TSO_1) ||
11094 tg3_flag(tp, HW_TSO_2) ||
11095 tg3_flag(tp, HW_TSO_3)) {
11097 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11098 th = (struct tcphdr *)&tx_data[val];
11101 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11103 if (tg3_flag(tp, HW_TSO_3)) {
11104 mss |= (hdr_len & 0xc) << 12;
11105 if (hdr_len & 0x10)
11106 base_flags |= 0x00000010;
11107 base_flags |= (hdr_len & 0x3e0) << 5;
11108 } else if (tg3_flag(tp, HW_TSO_2))
11109 mss |= hdr_len << 9;
11110 else if (tg3_flag(tp, HW_TSO_1) ||
11111 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11112 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11114 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11117 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11120 data_off = ETH_HLEN;
11123 for (i = data_off; i < tx_len; i++)
11124 tx_data[i] = (u8) (i & 0xff);
11126 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11127 if (pci_dma_mapping_error(tp->pdev, map)) {
11128 dev_kfree_skb(skb);
11132 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11137 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11139 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11140 base_flags, (mss << 1) | 1);
11144 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11145 tr32_mailbox(tnapi->prodmbox);
11149 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11150 for (i = 0; i < 35; i++) {
11151 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11156 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11157 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11158 if ((tx_idx == tnapi->tx_prod) &&
11159 (rx_idx == (rx_start_idx + num_pkts)))
11163 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11164 dev_kfree_skb(skb);
11166 if (tx_idx != tnapi->tx_prod)
11169 if (rx_idx != rx_start_idx + num_pkts)
11173 while (rx_idx != rx_start_idx) {
11174 desc = &rnapi->rx_rcb[rx_start_idx++];
11175 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11176 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11178 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11179 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11182 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11185 if (loopback_mode != TG3_TSO_LOOPBACK) {
11186 if (rx_len != tx_len)
11189 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11190 if (opaque_key != RXD_OPAQUE_RING_STD)
11193 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11196 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11197 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11198 >> RXD_TCPCSUM_SHIFT == 0xffff) {
11202 if (opaque_key == RXD_OPAQUE_RING_STD) {
11203 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11204 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11206 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11207 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11208 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11213 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11214 PCI_DMA_FROMDEVICE);
11216 for (i = data_off; i < rx_len; i++, val++) {
11217 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11224 /* tg3_free_rings will unmap and free the rx_skb */
11229 #define TG3_STD_LOOPBACK_FAILED 1
11230 #define TG3_JMB_LOOPBACK_FAILED 2
11231 #define TG3_TSO_LOOPBACK_FAILED 4
11233 #define TG3_MAC_LOOPBACK_SHIFT 0
11234 #define TG3_PHY_LOOPBACK_SHIFT 4
11235 #define TG3_LOOPBACK_FAILED 0x00000077
11237 static int tg3_test_loopback(struct tg3 *tp)
11240 u32 eee_cap, cpmuctrl = 0;
11242 if (!netif_running(tp->dev))
11243 return TG3_LOOPBACK_FAILED;
11245 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11246 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11248 err = tg3_reset_hw(tp, 1);
11250 err = TG3_LOOPBACK_FAILED;
11254 if (tg3_flag(tp, ENABLE_RSS)) {
11257 /* Reroute all rx packets to the 1st queue */
11258 for (i = MAC_RSS_INDIR_TBL_0;
11259 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11263 /* Turn off gphy autopowerdown. */
11264 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11265 tg3_phy_toggle_apd(tp, false);
11267 if (tg3_flag(tp, CPMU_PRESENT)) {
11271 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11273 /* Wait for up to 40 microseconds to acquire lock. */
11274 for (i = 0; i < 4; i++) {
11275 status = tr32(TG3_CPMU_MUTEX_GNT);
11276 if (status == CPMU_MUTEX_GNT_DRIVER)
11281 if (status != CPMU_MUTEX_GNT_DRIVER) {
11282 err = TG3_LOOPBACK_FAILED;
11286 /* Turn off link-based power management. */
11287 cpmuctrl = tr32(TG3_CPMU_CTRL);
11288 tw32(TG3_CPMU_CTRL,
11289 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11290 CPMU_CTRL_LINK_AWARE_MODE));
11293 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11294 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11296 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11297 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11298 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11300 if (tg3_flag(tp, CPMU_PRESENT)) {
11301 tw32(TG3_CPMU_CTRL, cpmuctrl);
11303 /* Release the mutex */
11304 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11307 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11308 !tg3_flag(tp, USE_PHYLIB)) {
11309 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11310 err |= TG3_STD_LOOPBACK_FAILED <<
11311 TG3_PHY_LOOPBACK_SHIFT;
11312 if (tg3_flag(tp, TSO_CAPABLE) &&
11313 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11314 err |= TG3_TSO_LOOPBACK_FAILED <<
11315 TG3_PHY_LOOPBACK_SHIFT;
11316 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11317 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11318 err |= TG3_JMB_LOOPBACK_FAILED <<
11319 TG3_PHY_LOOPBACK_SHIFT;
11322 /* Re-enable gphy autopowerdown. */
11323 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11324 tg3_phy_toggle_apd(tp, true);
11327 tp->phy_flags |= eee_cap;
11332 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11335 struct tg3 *tp = netdev_priv(dev);
11337 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11340 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11342 if (tg3_test_nvram(tp) != 0) {
11343 etest->flags |= ETH_TEST_FL_FAILED;
11346 if (tg3_test_link(tp) != 0) {
11347 etest->flags |= ETH_TEST_FL_FAILED;
11350 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11351 int err, err2 = 0, irq_sync = 0;
11353 if (netif_running(dev)) {
11355 tg3_netif_stop(tp);
11359 tg3_full_lock(tp, irq_sync);
11361 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11362 err = tg3_nvram_lock(tp);
11363 tg3_halt_cpu(tp, RX_CPU_BASE);
11364 if (!tg3_flag(tp, 5705_PLUS))
11365 tg3_halt_cpu(tp, TX_CPU_BASE);
11367 tg3_nvram_unlock(tp);
11369 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11372 if (tg3_test_registers(tp) != 0) {
11373 etest->flags |= ETH_TEST_FL_FAILED;
11376 if (tg3_test_memory(tp) != 0) {
11377 etest->flags |= ETH_TEST_FL_FAILED;
11380 if ((data[4] = tg3_test_loopback(tp)) != 0)
11381 etest->flags |= ETH_TEST_FL_FAILED;
11383 tg3_full_unlock(tp);
11385 if (tg3_test_interrupt(tp) != 0) {
11386 etest->flags |= ETH_TEST_FL_FAILED;
11390 tg3_full_lock(tp, 0);
11392 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11393 if (netif_running(dev)) {
11394 tg3_flag_set(tp, INIT_COMPLETE);
11395 err2 = tg3_restart_hw(tp, 1);
11397 tg3_netif_start(tp);
11400 tg3_full_unlock(tp);
11402 if (irq_sync && !err2)
11405 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11406 tg3_power_down(tp);
11410 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11412 struct mii_ioctl_data *data = if_mii(ifr);
11413 struct tg3 *tp = netdev_priv(dev);
11416 if (tg3_flag(tp, USE_PHYLIB)) {
11417 struct phy_device *phydev;
11418 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11420 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11421 return phy_mii_ioctl(phydev, ifr, cmd);
11426 data->phy_id = tp->phy_addr;
11429 case SIOCGMIIREG: {
11432 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11433 break; /* We have no PHY */
11435 if (!netif_running(dev))
11438 spin_lock_bh(&tp->lock);
11439 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11440 spin_unlock_bh(&tp->lock);
11442 data->val_out = mii_regval;
11448 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11449 break; /* We have no PHY */
11451 if (!netif_running(dev))
11454 spin_lock_bh(&tp->lock);
11455 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11456 spin_unlock_bh(&tp->lock);
11464 return -EOPNOTSUPP;
11467 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11469 struct tg3 *tp = netdev_priv(dev);
11471 memcpy(ec, &tp->coal, sizeof(*ec));
11475 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11477 struct tg3 *tp = netdev_priv(dev);
11478 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11479 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11481 if (!tg3_flag(tp, 5705_PLUS)) {
11482 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11483 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11484 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11485 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11488 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11489 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11490 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11491 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11492 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11493 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11494 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11495 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11496 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11497 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11500 /* No rx interrupts will be generated if both are zero */
11501 if ((ec->rx_coalesce_usecs == 0) &&
11502 (ec->rx_max_coalesced_frames == 0))
11505 /* No tx interrupts will be generated if both are zero */
11506 if ((ec->tx_coalesce_usecs == 0) &&
11507 (ec->tx_max_coalesced_frames == 0))
11510 /* Only copy relevant parameters, ignore all others. */
11511 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11512 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11513 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11514 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11515 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11516 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11517 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11518 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11519 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11521 if (netif_running(dev)) {
11522 tg3_full_lock(tp, 0);
11523 __tg3_set_coalesce(tp, &tp->coal);
11524 tg3_full_unlock(tp);
11529 static const struct ethtool_ops tg3_ethtool_ops = {
11530 .get_settings = tg3_get_settings,
11531 .set_settings = tg3_set_settings,
11532 .get_drvinfo = tg3_get_drvinfo,
11533 .get_regs_len = tg3_get_regs_len,
11534 .get_regs = tg3_get_regs,
11535 .get_wol = tg3_get_wol,
11536 .set_wol = tg3_set_wol,
11537 .get_msglevel = tg3_get_msglevel,
11538 .set_msglevel = tg3_set_msglevel,
11539 .nway_reset = tg3_nway_reset,
11540 .get_link = ethtool_op_get_link,
11541 .get_eeprom_len = tg3_get_eeprom_len,
11542 .get_eeprom = tg3_get_eeprom,
11543 .set_eeprom = tg3_set_eeprom,
11544 .get_ringparam = tg3_get_ringparam,
11545 .set_ringparam = tg3_set_ringparam,
11546 .get_pauseparam = tg3_get_pauseparam,
11547 .set_pauseparam = tg3_set_pauseparam,
11548 .self_test = tg3_self_test,
11549 .get_strings = tg3_get_strings,
11550 .set_phys_id = tg3_set_phys_id,
11551 .get_ethtool_stats = tg3_get_ethtool_stats,
11552 .get_coalesce = tg3_get_coalesce,
11553 .set_coalesce = tg3_set_coalesce,
11554 .get_sset_count = tg3_get_sset_count,
11557 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11559 u32 cursize, val, magic;
11561 tp->nvram_size = EEPROM_CHIP_SIZE;
11563 if (tg3_nvram_read(tp, 0, &magic) != 0)
11566 if ((magic != TG3_EEPROM_MAGIC) &&
11567 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11568 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11572 * Size the chip by reading offsets at increasing powers of two.
11573 * When we encounter our validation signature, we know the addressing
11574 * has wrapped around, and thus have our chip size.
11578 while (cursize < tp->nvram_size) {
11579 if (tg3_nvram_read(tp, cursize, &val) != 0)
11588 tp->nvram_size = cursize;
11591 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11595 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11598 /* Selfboot format */
11599 if (val != TG3_EEPROM_MAGIC) {
11600 tg3_get_eeprom_size(tp);
11604 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11606 /* This is confusing. We want to operate on the
11607 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11608 * call will read from NVRAM and byteswap the data
11609 * according to the byteswapping settings for all
11610 * other register accesses. This ensures the data we
11611 * want will always reside in the lower 16-bits.
11612 * However, the data in NVRAM is in LE format, which
11613 * means the data from the NVRAM read will always be
11614 * opposite the endianness of the CPU. The 16-bit
11615 * byteswap then brings the data to CPU endianness.
11617 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11621 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11624 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11628 nvcfg1 = tr32(NVRAM_CFG1);
11629 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11630 tg3_flag_set(tp, FLASH);
11632 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11633 tw32(NVRAM_CFG1, nvcfg1);
11636 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11637 tg3_flag(tp, 5780_CLASS)) {
11638 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11639 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11640 tp->nvram_jedecnum = JEDEC_ATMEL;
11641 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11642 tg3_flag_set(tp, NVRAM_BUFFERED);
11644 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11645 tp->nvram_jedecnum = JEDEC_ATMEL;
11646 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11648 case FLASH_VENDOR_ATMEL_EEPROM:
11649 tp->nvram_jedecnum = JEDEC_ATMEL;
11650 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11651 tg3_flag_set(tp, NVRAM_BUFFERED);
11653 case FLASH_VENDOR_ST:
11654 tp->nvram_jedecnum = JEDEC_ST;
11655 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11656 tg3_flag_set(tp, NVRAM_BUFFERED);
11658 case FLASH_VENDOR_SAIFUN:
11659 tp->nvram_jedecnum = JEDEC_SAIFUN;
11660 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11662 case FLASH_VENDOR_SST_SMALL:
11663 case FLASH_VENDOR_SST_LARGE:
11664 tp->nvram_jedecnum = JEDEC_SST;
11665 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11669 tp->nvram_jedecnum = JEDEC_ATMEL;
11670 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11671 tg3_flag_set(tp, NVRAM_BUFFERED);
11675 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11677 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11678 case FLASH_5752PAGE_SIZE_256:
11679 tp->nvram_pagesize = 256;
11681 case FLASH_5752PAGE_SIZE_512:
11682 tp->nvram_pagesize = 512;
11684 case FLASH_5752PAGE_SIZE_1K:
11685 tp->nvram_pagesize = 1024;
11687 case FLASH_5752PAGE_SIZE_2K:
11688 tp->nvram_pagesize = 2048;
11690 case FLASH_5752PAGE_SIZE_4K:
11691 tp->nvram_pagesize = 4096;
11693 case FLASH_5752PAGE_SIZE_264:
11694 tp->nvram_pagesize = 264;
11696 case FLASH_5752PAGE_SIZE_528:
11697 tp->nvram_pagesize = 528;
11702 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11706 nvcfg1 = tr32(NVRAM_CFG1);
11708 /* NVRAM protection for TPM */
11709 if (nvcfg1 & (1 << 27))
11710 tg3_flag_set(tp, PROTECTED_NVRAM);
11712 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11713 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11714 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11715 tp->nvram_jedecnum = JEDEC_ATMEL;
11716 tg3_flag_set(tp, NVRAM_BUFFERED);
11718 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11719 tp->nvram_jedecnum = JEDEC_ATMEL;
11720 tg3_flag_set(tp, NVRAM_BUFFERED);
11721 tg3_flag_set(tp, FLASH);
11723 case FLASH_5752VENDOR_ST_M45PE10:
11724 case FLASH_5752VENDOR_ST_M45PE20:
11725 case FLASH_5752VENDOR_ST_M45PE40:
11726 tp->nvram_jedecnum = JEDEC_ST;
11727 tg3_flag_set(tp, NVRAM_BUFFERED);
11728 tg3_flag_set(tp, FLASH);
11732 if (tg3_flag(tp, FLASH)) {
11733 tg3_nvram_get_pagesize(tp, nvcfg1);
11735 /* For eeprom, set pagesize to maximum eeprom size */
11736 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11738 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11739 tw32(NVRAM_CFG1, nvcfg1);
11743 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11745 u32 nvcfg1, protect = 0;
11747 nvcfg1 = tr32(NVRAM_CFG1);
11749 /* NVRAM protection for TPM */
11750 if (nvcfg1 & (1 << 27)) {
11751 tg3_flag_set(tp, PROTECTED_NVRAM);
11755 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11757 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11758 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11759 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11760 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11761 tp->nvram_jedecnum = JEDEC_ATMEL;
11762 tg3_flag_set(tp, NVRAM_BUFFERED);
11763 tg3_flag_set(tp, FLASH);
11764 tp->nvram_pagesize = 264;
11765 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11766 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11767 tp->nvram_size = (protect ? 0x3e200 :
11768 TG3_NVRAM_SIZE_512KB);
11769 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11770 tp->nvram_size = (protect ? 0x1f200 :
11771 TG3_NVRAM_SIZE_256KB);
11773 tp->nvram_size = (protect ? 0x1f200 :
11774 TG3_NVRAM_SIZE_128KB);
11776 case FLASH_5752VENDOR_ST_M45PE10:
11777 case FLASH_5752VENDOR_ST_M45PE20:
11778 case FLASH_5752VENDOR_ST_M45PE40:
11779 tp->nvram_jedecnum = JEDEC_ST;
11780 tg3_flag_set(tp, NVRAM_BUFFERED);
11781 tg3_flag_set(tp, FLASH);
11782 tp->nvram_pagesize = 256;
11783 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11784 tp->nvram_size = (protect ?
11785 TG3_NVRAM_SIZE_64KB :
11786 TG3_NVRAM_SIZE_128KB);
11787 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11788 tp->nvram_size = (protect ?
11789 TG3_NVRAM_SIZE_64KB :
11790 TG3_NVRAM_SIZE_256KB);
11792 tp->nvram_size = (protect ?
11793 TG3_NVRAM_SIZE_128KB :
11794 TG3_NVRAM_SIZE_512KB);
11799 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11803 nvcfg1 = tr32(NVRAM_CFG1);
11805 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11806 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11807 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11808 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11809 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11810 tp->nvram_jedecnum = JEDEC_ATMEL;
11811 tg3_flag_set(tp, NVRAM_BUFFERED);
11812 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11814 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11815 tw32(NVRAM_CFG1, nvcfg1);
11817 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11818 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11819 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11820 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11821 tp->nvram_jedecnum = JEDEC_ATMEL;
11822 tg3_flag_set(tp, NVRAM_BUFFERED);
11823 tg3_flag_set(tp, FLASH);
11824 tp->nvram_pagesize = 264;
11826 case FLASH_5752VENDOR_ST_M45PE10:
11827 case FLASH_5752VENDOR_ST_M45PE20:
11828 case FLASH_5752VENDOR_ST_M45PE40:
11829 tp->nvram_jedecnum = JEDEC_ST;
11830 tg3_flag_set(tp, NVRAM_BUFFERED);
11831 tg3_flag_set(tp, FLASH);
11832 tp->nvram_pagesize = 256;
11837 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11839 u32 nvcfg1, protect = 0;
11841 nvcfg1 = tr32(NVRAM_CFG1);
11843 /* NVRAM protection for TPM */
11844 if (nvcfg1 & (1 << 27)) {
11845 tg3_flag_set(tp, PROTECTED_NVRAM);
11849 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11851 case FLASH_5761VENDOR_ATMEL_ADB021D:
11852 case FLASH_5761VENDOR_ATMEL_ADB041D:
11853 case FLASH_5761VENDOR_ATMEL_ADB081D:
11854 case FLASH_5761VENDOR_ATMEL_ADB161D:
11855 case FLASH_5761VENDOR_ATMEL_MDB021D:
11856 case FLASH_5761VENDOR_ATMEL_MDB041D:
11857 case FLASH_5761VENDOR_ATMEL_MDB081D:
11858 case FLASH_5761VENDOR_ATMEL_MDB161D:
11859 tp->nvram_jedecnum = JEDEC_ATMEL;
11860 tg3_flag_set(tp, NVRAM_BUFFERED);
11861 tg3_flag_set(tp, FLASH);
11862 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11863 tp->nvram_pagesize = 256;
11865 case FLASH_5761VENDOR_ST_A_M45PE20:
11866 case FLASH_5761VENDOR_ST_A_M45PE40:
11867 case FLASH_5761VENDOR_ST_A_M45PE80:
11868 case FLASH_5761VENDOR_ST_A_M45PE16:
11869 case FLASH_5761VENDOR_ST_M_M45PE20:
11870 case FLASH_5761VENDOR_ST_M_M45PE40:
11871 case FLASH_5761VENDOR_ST_M_M45PE80:
11872 case FLASH_5761VENDOR_ST_M_M45PE16:
11873 tp->nvram_jedecnum = JEDEC_ST;
11874 tg3_flag_set(tp, NVRAM_BUFFERED);
11875 tg3_flag_set(tp, FLASH);
11876 tp->nvram_pagesize = 256;
11881 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11884 case FLASH_5761VENDOR_ATMEL_ADB161D:
11885 case FLASH_5761VENDOR_ATMEL_MDB161D:
11886 case FLASH_5761VENDOR_ST_A_M45PE16:
11887 case FLASH_5761VENDOR_ST_M_M45PE16:
11888 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11890 case FLASH_5761VENDOR_ATMEL_ADB081D:
11891 case FLASH_5761VENDOR_ATMEL_MDB081D:
11892 case FLASH_5761VENDOR_ST_A_M45PE80:
11893 case FLASH_5761VENDOR_ST_M_M45PE80:
11894 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11896 case FLASH_5761VENDOR_ATMEL_ADB041D:
11897 case FLASH_5761VENDOR_ATMEL_MDB041D:
11898 case FLASH_5761VENDOR_ST_A_M45PE40:
11899 case FLASH_5761VENDOR_ST_M_M45PE40:
11900 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11902 case FLASH_5761VENDOR_ATMEL_ADB021D:
11903 case FLASH_5761VENDOR_ATMEL_MDB021D:
11904 case FLASH_5761VENDOR_ST_A_M45PE20:
11905 case FLASH_5761VENDOR_ST_M_M45PE20:
11906 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11912 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11914 tp->nvram_jedecnum = JEDEC_ATMEL;
11915 tg3_flag_set(tp, NVRAM_BUFFERED);
11916 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11919 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11923 nvcfg1 = tr32(NVRAM_CFG1);
11925 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11926 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11927 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11928 tp->nvram_jedecnum = JEDEC_ATMEL;
11929 tg3_flag_set(tp, NVRAM_BUFFERED);
11930 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11932 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11933 tw32(NVRAM_CFG1, nvcfg1);
11935 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11936 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11937 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11938 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11939 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11940 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11941 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11942 tp->nvram_jedecnum = JEDEC_ATMEL;
11943 tg3_flag_set(tp, NVRAM_BUFFERED);
11944 tg3_flag_set(tp, FLASH);
11946 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11947 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11948 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11949 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11950 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11952 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11953 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11954 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11956 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11957 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11958 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11962 case FLASH_5752VENDOR_ST_M45PE10:
11963 case FLASH_5752VENDOR_ST_M45PE20:
11964 case FLASH_5752VENDOR_ST_M45PE40:
11965 tp->nvram_jedecnum = JEDEC_ST;
11966 tg3_flag_set(tp, NVRAM_BUFFERED);
11967 tg3_flag_set(tp, FLASH);
11969 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11970 case FLASH_5752VENDOR_ST_M45PE10:
11971 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11973 case FLASH_5752VENDOR_ST_M45PE20:
11974 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11976 case FLASH_5752VENDOR_ST_M45PE40:
11977 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11982 tg3_flag_set(tp, NO_NVRAM);
11986 tg3_nvram_get_pagesize(tp, nvcfg1);
11987 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11988 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11992 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11996 nvcfg1 = tr32(NVRAM_CFG1);
11998 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11999 case FLASH_5717VENDOR_ATMEL_EEPROM:
12000 case FLASH_5717VENDOR_MICRO_EEPROM:
12001 tp->nvram_jedecnum = JEDEC_ATMEL;
12002 tg3_flag_set(tp, NVRAM_BUFFERED);
12003 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12005 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12006 tw32(NVRAM_CFG1, nvcfg1);
12008 case FLASH_5717VENDOR_ATMEL_MDB011D:
12009 case FLASH_5717VENDOR_ATMEL_ADB011B:
12010 case FLASH_5717VENDOR_ATMEL_ADB011D:
12011 case FLASH_5717VENDOR_ATMEL_MDB021D:
12012 case FLASH_5717VENDOR_ATMEL_ADB021B:
12013 case FLASH_5717VENDOR_ATMEL_ADB021D:
12014 case FLASH_5717VENDOR_ATMEL_45USPT:
12015 tp->nvram_jedecnum = JEDEC_ATMEL;
12016 tg3_flag_set(tp, NVRAM_BUFFERED);
12017 tg3_flag_set(tp, FLASH);
12019 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12020 case FLASH_5717VENDOR_ATMEL_MDB021D:
12021 /* Detect size with tg3_nvram_get_size() */
12023 case FLASH_5717VENDOR_ATMEL_ADB021B:
12024 case FLASH_5717VENDOR_ATMEL_ADB021D:
12025 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12028 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12032 case FLASH_5717VENDOR_ST_M_M25PE10:
12033 case FLASH_5717VENDOR_ST_A_M25PE10:
12034 case FLASH_5717VENDOR_ST_M_M45PE10:
12035 case FLASH_5717VENDOR_ST_A_M45PE10:
12036 case FLASH_5717VENDOR_ST_M_M25PE20:
12037 case FLASH_5717VENDOR_ST_A_M25PE20:
12038 case FLASH_5717VENDOR_ST_M_M45PE20:
12039 case FLASH_5717VENDOR_ST_A_M45PE20:
12040 case FLASH_5717VENDOR_ST_25USPT:
12041 case FLASH_5717VENDOR_ST_45USPT:
12042 tp->nvram_jedecnum = JEDEC_ST;
12043 tg3_flag_set(tp, NVRAM_BUFFERED);
12044 tg3_flag_set(tp, FLASH);
12046 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12047 case FLASH_5717VENDOR_ST_M_M25PE20:
12048 case FLASH_5717VENDOR_ST_M_M45PE20:
12049 /* Detect size with tg3_nvram_get_size() */
12051 case FLASH_5717VENDOR_ST_A_M25PE20:
12052 case FLASH_5717VENDOR_ST_A_M45PE20:
12053 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12056 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12061 tg3_flag_set(tp, NO_NVRAM);
12065 tg3_nvram_get_pagesize(tp, nvcfg1);
12066 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12067 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12070 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12072 u32 nvcfg1, nvmpinstrp;
12074 nvcfg1 = tr32(NVRAM_CFG1);
12075 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12077 switch (nvmpinstrp) {
12078 case FLASH_5720_EEPROM_HD:
12079 case FLASH_5720_EEPROM_LD:
12080 tp->nvram_jedecnum = JEDEC_ATMEL;
12081 tg3_flag_set(tp, NVRAM_BUFFERED);
12083 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12084 tw32(NVRAM_CFG1, nvcfg1);
12085 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12086 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12088 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12090 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12091 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12092 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12093 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12094 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12095 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12096 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12097 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12098 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12099 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12100 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12101 case FLASH_5720VENDOR_ATMEL_45USPT:
12102 tp->nvram_jedecnum = JEDEC_ATMEL;
12103 tg3_flag_set(tp, NVRAM_BUFFERED);
12104 tg3_flag_set(tp, FLASH);
12106 switch (nvmpinstrp) {
12107 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12108 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12109 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12110 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12112 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12113 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12114 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12115 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12117 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12118 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12119 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12122 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12126 case FLASH_5720VENDOR_M_ST_M25PE10:
12127 case FLASH_5720VENDOR_M_ST_M45PE10:
12128 case FLASH_5720VENDOR_A_ST_M25PE10:
12129 case FLASH_5720VENDOR_A_ST_M45PE10:
12130 case FLASH_5720VENDOR_M_ST_M25PE20:
12131 case FLASH_5720VENDOR_M_ST_M45PE20:
12132 case FLASH_5720VENDOR_A_ST_M25PE20:
12133 case FLASH_5720VENDOR_A_ST_M45PE20:
12134 case FLASH_5720VENDOR_M_ST_M25PE40:
12135 case FLASH_5720VENDOR_M_ST_M45PE40:
12136 case FLASH_5720VENDOR_A_ST_M25PE40:
12137 case FLASH_5720VENDOR_A_ST_M45PE40:
12138 case FLASH_5720VENDOR_M_ST_M25PE80:
12139 case FLASH_5720VENDOR_M_ST_M45PE80:
12140 case FLASH_5720VENDOR_A_ST_M25PE80:
12141 case FLASH_5720VENDOR_A_ST_M45PE80:
12142 case FLASH_5720VENDOR_ST_25USPT:
12143 case FLASH_5720VENDOR_ST_45USPT:
12144 tp->nvram_jedecnum = JEDEC_ST;
12145 tg3_flag_set(tp, NVRAM_BUFFERED);
12146 tg3_flag_set(tp, FLASH);
12148 switch (nvmpinstrp) {
12149 case FLASH_5720VENDOR_M_ST_M25PE20:
12150 case FLASH_5720VENDOR_M_ST_M45PE20:
12151 case FLASH_5720VENDOR_A_ST_M25PE20:
12152 case FLASH_5720VENDOR_A_ST_M45PE20:
12153 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12155 case FLASH_5720VENDOR_M_ST_M25PE40:
12156 case FLASH_5720VENDOR_M_ST_M45PE40:
12157 case FLASH_5720VENDOR_A_ST_M25PE40:
12158 case FLASH_5720VENDOR_A_ST_M45PE40:
12159 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12161 case FLASH_5720VENDOR_M_ST_M25PE80:
12162 case FLASH_5720VENDOR_M_ST_M45PE80:
12163 case FLASH_5720VENDOR_A_ST_M25PE80:
12164 case FLASH_5720VENDOR_A_ST_M45PE80:
12165 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12168 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12173 tg3_flag_set(tp, NO_NVRAM);
12177 tg3_nvram_get_pagesize(tp, nvcfg1);
12178 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12179 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12182 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12183 static void __devinit tg3_nvram_init(struct tg3 *tp)
12185 tw32_f(GRC_EEPROM_ADDR,
12186 (EEPROM_ADDR_FSM_RESET |
12187 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12188 EEPROM_ADDR_CLKPERD_SHIFT)));
12192 /* Enable seeprom accesses. */
12193 tw32_f(GRC_LOCAL_CTRL,
12194 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12197 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12198 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12199 tg3_flag_set(tp, NVRAM);
12201 if (tg3_nvram_lock(tp)) {
12202 netdev_warn(tp->dev,
12203 "Cannot get nvram lock, %s failed\n",
12207 tg3_enable_nvram_access(tp);
12209 tp->nvram_size = 0;
12211 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12212 tg3_get_5752_nvram_info(tp);
12213 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12214 tg3_get_5755_nvram_info(tp);
12215 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12217 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12218 tg3_get_5787_nvram_info(tp);
12219 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12220 tg3_get_5761_nvram_info(tp);
12221 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12222 tg3_get_5906_nvram_info(tp);
12223 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12224 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12225 tg3_get_57780_nvram_info(tp);
12226 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12228 tg3_get_5717_nvram_info(tp);
12229 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12230 tg3_get_5720_nvram_info(tp);
12232 tg3_get_nvram_info(tp);
12234 if (tp->nvram_size == 0)
12235 tg3_get_nvram_size(tp);
12237 tg3_disable_nvram_access(tp);
12238 tg3_nvram_unlock(tp);
12241 tg3_flag_clear(tp, NVRAM);
12242 tg3_flag_clear(tp, NVRAM_BUFFERED);
12244 tg3_get_eeprom_size(tp);
12248 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12249 u32 offset, u32 len, u8 *buf)
12254 for (i = 0; i < len; i += 4) {
12260 memcpy(&data, buf + i, 4);
12263 * The SEEPROM interface expects the data to always be opposite
12264 * the native endian format. We accomplish this by reversing
12265 * all the operations that would have been performed on the
12266 * data from a call to tg3_nvram_read_be32().
12268 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12270 val = tr32(GRC_EEPROM_ADDR);
12271 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12273 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12275 tw32(GRC_EEPROM_ADDR, val |
12276 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12277 (addr & EEPROM_ADDR_ADDR_MASK) |
12278 EEPROM_ADDR_START |
12279 EEPROM_ADDR_WRITE);
12281 for (j = 0; j < 1000; j++) {
12282 val = tr32(GRC_EEPROM_ADDR);
12284 if (val & EEPROM_ADDR_COMPLETE)
12288 if (!(val & EEPROM_ADDR_COMPLETE)) {
12297 /* offset and length are dword aligned */
12298 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12302 u32 pagesize = tp->nvram_pagesize;
12303 u32 pagemask = pagesize - 1;
12307 tmp = kmalloc(pagesize, GFP_KERNEL);
12313 u32 phy_addr, page_off, size;
12315 phy_addr = offset & ~pagemask;
12317 for (j = 0; j < pagesize; j += 4) {
12318 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12319 (__be32 *) (tmp + j));
12326 page_off = offset & pagemask;
12333 memcpy(tmp + page_off, buf, size);
12335 offset = offset + (pagesize - page_off);
12337 tg3_enable_nvram_access(tp);
12340 * Before we can erase the flash page, we need
12341 * to issue a special "write enable" command.
12343 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12345 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12348 /* Erase the target page */
12349 tw32(NVRAM_ADDR, phy_addr);
12351 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12352 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12354 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12357 /* Issue another write enable to start the write. */
12358 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12360 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12363 for (j = 0; j < pagesize; j += 4) {
12366 data = *((__be32 *) (tmp + j));
12368 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12370 tw32(NVRAM_ADDR, phy_addr + j);
12372 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12376 nvram_cmd |= NVRAM_CMD_FIRST;
12377 else if (j == (pagesize - 4))
12378 nvram_cmd |= NVRAM_CMD_LAST;
12380 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12387 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12388 tg3_nvram_exec_cmd(tp, nvram_cmd);
12395 /* offset and length are dword aligned */
12396 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12401 for (i = 0; i < len; i += 4, offset += 4) {
12402 u32 page_off, phy_addr, nvram_cmd;
12405 memcpy(&data, buf + i, 4);
12406 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12408 page_off = offset % tp->nvram_pagesize;
12410 phy_addr = tg3_nvram_phys_addr(tp, offset);
12412 tw32(NVRAM_ADDR, phy_addr);
12414 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12416 if (page_off == 0 || i == 0)
12417 nvram_cmd |= NVRAM_CMD_FIRST;
12418 if (page_off == (tp->nvram_pagesize - 4))
12419 nvram_cmd |= NVRAM_CMD_LAST;
12421 if (i == (len - 4))
12422 nvram_cmd |= NVRAM_CMD_LAST;
12424 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12425 !tg3_flag(tp, 5755_PLUS) &&
12426 (tp->nvram_jedecnum == JEDEC_ST) &&
12427 (nvram_cmd & NVRAM_CMD_FIRST)) {
12429 if ((ret = tg3_nvram_exec_cmd(tp,
12430 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12435 if (!tg3_flag(tp, FLASH)) {
12436 /* We always do complete word writes to eeprom. */
12437 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12440 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12446 /* offset and length are dword aligned */
12447 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12451 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12452 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12453 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12457 if (!tg3_flag(tp, NVRAM)) {
12458 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12462 ret = tg3_nvram_lock(tp);
12466 tg3_enable_nvram_access(tp);
12467 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12468 tw32(NVRAM_WRITE1, 0x406);
12470 grc_mode = tr32(GRC_MODE);
12471 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12473 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12474 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12477 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12481 grc_mode = tr32(GRC_MODE);
12482 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12484 tg3_disable_nvram_access(tp);
12485 tg3_nvram_unlock(tp);
12488 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12489 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12496 struct subsys_tbl_ent {
12497 u16 subsys_vendor, subsys_devid;
12501 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12502 /* Broadcom boards. */
12503 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12504 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12505 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12506 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12507 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12508 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12509 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12510 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12511 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12512 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12513 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12514 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12515 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12516 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12517 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12518 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12519 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12520 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12521 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12522 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12523 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12524 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12527 { TG3PCI_SUBVENDOR_ID_3COM,
12528 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12529 { TG3PCI_SUBVENDOR_ID_3COM,
12530 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12531 { TG3PCI_SUBVENDOR_ID_3COM,
12532 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12533 { TG3PCI_SUBVENDOR_ID_3COM,
12534 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12535 { TG3PCI_SUBVENDOR_ID_3COM,
12536 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12539 { TG3PCI_SUBVENDOR_ID_DELL,
12540 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12541 { TG3PCI_SUBVENDOR_ID_DELL,
12542 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12543 { TG3PCI_SUBVENDOR_ID_DELL,
12544 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12545 { TG3PCI_SUBVENDOR_ID_DELL,
12546 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12548 /* Compaq boards. */
12549 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12550 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12551 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12552 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12553 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12554 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12555 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12556 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12557 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12558 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12561 { TG3PCI_SUBVENDOR_ID_IBM,
12562 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12565 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12569 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12570 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12571 tp->pdev->subsystem_vendor) &&
12572 (subsys_id_to_phy_id[i].subsys_devid ==
12573 tp->pdev->subsystem_device))
12574 return &subsys_id_to_phy_id[i];
12579 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12584 /* On some early chips the SRAM cannot be accessed in D3hot state,
12585 * so need make sure we're in D0.
12587 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12588 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12589 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12592 /* Make sure register accesses (indirect or otherwise)
12593 * will function correctly.
12595 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12596 tp->misc_host_ctrl);
12598 /* The memory arbiter has to be enabled in order for SRAM accesses
12599 * to succeed. Normally on powerup the tg3 chip firmware will make
12600 * sure it is enabled, but other entities such as system netboot
12601 * code might disable it.
12603 val = tr32(MEMARB_MODE);
12604 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12606 tp->phy_id = TG3_PHY_ID_INVALID;
12607 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12609 /* Assume an onboard device and WOL capable by default. */
12610 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12611 tg3_flag_set(tp, WOL_CAP);
12613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12614 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12615 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12616 tg3_flag_set(tp, IS_NIC);
12618 val = tr32(VCPU_CFGSHDW);
12619 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12620 tg3_flag_set(tp, ASPM_WORKAROUND);
12621 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12622 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12623 tg3_flag_set(tp, WOL_ENABLE);
12624 device_set_wakeup_enable(&tp->pdev->dev, true);
12629 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12630 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12631 u32 nic_cfg, led_cfg;
12632 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12633 int eeprom_phy_serdes = 0;
12635 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12636 tp->nic_sram_data_cfg = nic_cfg;
12638 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12639 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12640 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12641 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12642 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12643 (ver > 0) && (ver < 0x100))
12644 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12647 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12649 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12650 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12651 eeprom_phy_serdes = 1;
12653 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12654 if (nic_phy_id != 0) {
12655 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12656 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12658 eeprom_phy_id = (id1 >> 16) << 10;
12659 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12660 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12664 tp->phy_id = eeprom_phy_id;
12665 if (eeprom_phy_serdes) {
12666 if (!tg3_flag(tp, 5705_PLUS))
12667 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12669 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12672 if (tg3_flag(tp, 5750_PLUS))
12673 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12674 SHASTA_EXT_LED_MODE_MASK);
12676 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12680 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12681 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12684 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12685 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12688 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12689 tp->led_ctrl = LED_CTRL_MODE_MAC;
12691 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12692 * read on some older 5700/5701 bootcode.
12694 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12696 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12698 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12702 case SHASTA_EXT_LED_SHARED:
12703 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12704 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12705 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12706 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12707 LED_CTRL_MODE_PHY_2);
12710 case SHASTA_EXT_LED_MAC:
12711 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12714 case SHASTA_EXT_LED_COMBO:
12715 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12716 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12717 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12718 LED_CTRL_MODE_PHY_2);
12723 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12724 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12725 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12726 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12728 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12729 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12731 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12732 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12733 if ((tp->pdev->subsystem_vendor ==
12734 PCI_VENDOR_ID_ARIMA) &&
12735 (tp->pdev->subsystem_device == 0x205a ||
12736 tp->pdev->subsystem_device == 0x2063))
12737 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12739 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12740 tg3_flag_set(tp, IS_NIC);
12743 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12744 tg3_flag_set(tp, ENABLE_ASF);
12745 if (tg3_flag(tp, 5750_PLUS))
12746 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12749 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12750 tg3_flag(tp, 5750_PLUS))
12751 tg3_flag_set(tp, ENABLE_APE);
12753 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12754 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12755 tg3_flag_clear(tp, WOL_CAP);
12757 if (tg3_flag(tp, WOL_CAP) &&
12758 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12759 tg3_flag_set(tp, WOL_ENABLE);
12760 device_set_wakeup_enable(&tp->pdev->dev, true);
12763 if (cfg2 & (1 << 17))
12764 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12766 /* serdes signal pre-emphasis in register 0x590 set by */
12767 /* bootcode if bit 18 is set */
12768 if (cfg2 & (1 << 18))
12769 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12771 if ((tg3_flag(tp, 57765_PLUS) ||
12772 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12773 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12774 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12775 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12777 if (tg3_flag(tp, PCI_EXPRESS) &&
12778 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12779 !tg3_flag(tp, 57765_PLUS)) {
12782 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12783 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12784 tg3_flag_set(tp, ASPM_WORKAROUND);
12787 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12788 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12789 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12790 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12791 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12792 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12795 if (tg3_flag(tp, WOL_CAP))
12796 device_set_wakeup_enable(&tp->pdev->dev,
12797 tg3_flag(tp, WOL_ENABLE));
12799 device_set_wakeup_capable(&tp->pdev->dev, false);
12802 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12807 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12808 tw32(OTP_CTRL, cmd);
12810 /* Wait for up to 1 ms for command to execute. */
12811 for (i = 0; i < 100; i++) {
12812 val = tr32(OTP_STATUS);
12813 if (val & OTP_STATUS_CMD_DONE)
12818 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12821 /* Read the gphy configuration from the OTP region of the chip. The gphy
12822 * configuration is a 32-bit value that straddles the alignment boundary.
12823 * We do two 32-bit reads and then shift and merge the results.
12825 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12827 u32 bhalf_otp, thalf_otp;
12829 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12831 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12834 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12836 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12839 thalf_otp = tr32(OTP_READ_DATA);
12841 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12843 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12846 bhalf_otp = tr32(OTP_READ_DATA);
12848 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12851 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12853 u32 adv = ADVERTISED_Autoneg |
12856 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12857 adv |= ADVERTISED_1000baseT_Half |
12858 ADVERTISED_1000baseT_Full;
12860 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12861 adv |= ADVERTISED_100baseT_Half |
12862 ADVERTISED_100baseT_Full |
12863 ADVERTISED_10baseT_Half |
12864 ADVERTISED_10baseT_Full |
12867 adv |= ADVERTISED_FIBRE;
12869 tp->link_config.advertising = adv;
12870 tp->link_config.speed = SPEED_INVALID;
12871 tp->link_config.duplex = DUPLEX_INVALID;
12872 tp->link_config.autoneg = AUTONEG_ENABLE;
12873 tp->link_config.active_speed = SPEED_INVALID;
12874 tp->link_config.active_duplex = DUPLEX_INVALID;
12875 tp->link_config.orig_speed = SPEED_INVALID;
12876 tp->link_config.orig_duplex = DUPLEX_INVALID;
12877 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12880 static int __devinit tg3_phy_probe(struct tg3 *tp)
12882 u32 hw_phy_id_1, hw_phy_id_2;
12883 u32 hw_phy_id, hw_phy_id_masked;
12886 /* flow control autonegotiation is default behavior */
12887 tg3_flag_set(tp, PAUSE_AUTONEG);
12888 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12890 if (tg3_flag(tp, USE_PHYLIB))
12891 return tg3_phy_init(tp);
12893 /* Reading the PHY ID register can conflict with ASF
12894 * firmware access to the PHY hardware.
12897 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12898 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12900 /* Now read the physical PHY_ID from the chip and verify
12901 * that it is sane. If it doesn't look good, we fall back
12902 * to either the hard-coded table based PHY_ID and failing
12903 * that the value found in the eeprom area.
12905 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12906 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12908 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12909 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12910 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12912 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12915 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12916 tp->phy_id = hw_phy_id;
12917 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12918 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12920 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12922 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12923 /* Do nothing, phy ID already set up in
12924 * tg3_get_eeprom_hw_cfg().
12927 struct subsys_tbl_ent *p;
12929 /* No eeprom signature? Try the hardcoded
12930 * subsys device table.
12932 p = tg3_lookup_by_subsys(tp);
12936 tp->phy_id = p->phy_id;
12938 tp->phy_id == TG3_PHY_ID_BCM8002)
12939 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12943 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12944 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12945 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12946 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12947 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12948 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12950 tg3_phy_init_link_config(tp);
12952 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12953 !tg3_flag(tp, ENABLE_APE) &&
12954 !tg3_flag(tp, ENABLE_ASF)) {
12955 u32 bmsr, adv_reg, tg3_ctrl, mask;
12957 tg3_readphy(tp, MII_BMSR, &bmsr);
12958 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12959 (bmsr & BMSR_LSTATUS))
12960 goto skip_phy_reset;
12962 err = tg3_phy_reset(tp);
12966 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12967 ADVERTISE_100HALF | ADVERTISE_100FULL |
12968 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12970 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
12971 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12972 MII_TG3_CTRL_ADV_1000_FULL);
12973 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12974 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12975 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12976 MII_TG3_CTRL_ENABLE_AS_MASTER);
12979 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12980 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12981 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12982 if (!tg3_copper_is_advertising_all(tp, mask)) {
12983 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12985 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12986 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12988 tg3_writephy(tp, MII_BMCR,
12989 BMCR_ANENABLE | BMCR_ANRESTART);
12991 tg3_phy_set_wirespeed(tp);
12993 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12994 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12995 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12999 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13000 err = tg3_init_5401phy_dsp(tp);
13004 err = tg3_init_5401phy_dsp(tp);
13010 static void __devinit tg3_read_vpd(struct tg3 *tp)
13013 unsigned int block_end, rosize, len;
13016 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13020 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13021 PCI_VPD_LRDT_RO_DATA);
13023 goto out_not_found;
13025 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13026 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13027 i += PCI_VPD_LRDT_TAG_SIZE;
13029 if (block_end > TG3_NVM_VPD_LEN)
13030 goto out_not_found;
13032 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13033 PCI_VPD_RO_KEYWORD_MFR_ID);
13035 len = pci_vpd_info_field_size(&vpd_data[j]);
13037 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13038 if (j + len > block_end || len != 4 ||
13039 memcmp(&vpd_data[j], "1028", 4))
13042 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13043 PCI_VPD_RO_KEYWORD_VENDOR0);
13047 len = pci_vpd_info_field_size(&vpd_data[j]);
13049 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13050 if (j + len > block_end)
13053 memcpy(tp->fw_ver, &vpd_data[j], len);
13054 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13058 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13059 PCI_VPD_RO_KEYWORD_PARTNO);
13061 goto out_not_found;
13063 len = pci_vpd_info_field_size(&vpd_data[i]);
13065 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13066 if (len > TG3_BPN_SIZE ||
13067 (len + i) > TG3_NVM_VPD_LEN)
13068 goto out_not_found;
13070 memcpy(tp->board_part_number, &vpd_data[i], len);
13074 if (tp->board_part_number[0])
13078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13079 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13080 strcpy(tp->board_part_number, "BCM5717");
13081 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13082 strcpy(tp->board_part_number, "BCM5718");
13085 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13086 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13087 strcpy(tp->board_part_number, "BCM57780");
13088 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13089 strcpy(tp->board_part_number, "BCM57760");
13090 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13091 strcpy(tp->board_part_number, "BCM57790");
13092 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13093 strcpy(tp->board_part_number, "BCM57788");
13096 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13097 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13098 strcpy(tp->board_part_number, "BCM57761");
13099 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13100 strcpy(tp->board_part_number, "BCM57765");
13101 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13102 strcpy(tp->board_part_number, "BCM57781");
13103 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13104 strcpy(tp->board_part_number, "BCM57785");
13105 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13106 strcpy(tp->board_part_number, "BCM57791");
13107 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13108 strcpy(tp->board_part_number, "BCM57795");
13111 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13112 strcpy(tp->board_part_number, "BCM95906");
13115 strcpy(tp->board_part_number, "none");
13119 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13123 if (tg3_nvram_read(tp, offset, &val) ||
13124 (val & 0xfc000000) != 0x0c000000 ||
13125 tg3_nvram_read(tp, offset + 4, &val) ||
13132 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13134 u32 val, offset, start, ver_offset;
13136 bool newver = false;
13138 if (tg3_nvram_read(tp, 0xc, &offset) ||
13139 tg3_nvram_read(tp, 0x4, &start))
13142 offset = tg3_nvram_logical_addr(tp, offset);
13144 if (tg3_nvram_read(tp, offset, &val))
13147 if ((val & 0xfc000000) == 0x0c000000) {
13148 if (tg3_nvram_read(tp, offset + 4, &val))
13155 dst_off = strlen(tp->fw_ver);
13158 if (TG3_VER_SIZE - dst_off < 16 ||
13159 tg3_nvram_read(tp, offset + 8, &ver_offset))
13162 offset = offset + ver_offset - start;
13163 for (i = 0; i < 16; i += 4) {
13165 if (tg3_nvram_read_be32(tp, offset + i, &v))
13168 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13173 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13176 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13177 TG3_NVM_BCVER_MAJSFT;
13178 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13179 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13180 "v%d.%02d", major, minor);
13184 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13186 u32 val, major, minor;
13188 /* Use native endian representation */
13189 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13192 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13193 TG3_NVM_HWSB_CFG1_MAJSFT;
13194 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13195 TG3_NVM_HWSB_CFG1_MINSFT;
13197 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13200 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13202 u32 offset, major, minor, build;
13204 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13206 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13209 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13210 case TG3_EEPROM_SB_REVISION_0:
13211 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13213 case TG3_EEPROM_SB_REVISION_2:
13214 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13216 case TG3_EEPROM_SB_REVISION_3:
13217 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13219 case TG3_EEPROM_SB_REVISION_4:
13220 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13222 case TG3_EEPROM_SB_REVISION_5:
13223 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13225 case TG3_EEPROM_SB_REVISION_6:
13226 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13232 if (tg3_nvram_read(tp, offset, &val))
13235 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13236 TG3_EEPROM_SB_EDH_BLD_SHFT;
13237 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13238 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13239 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13241 if (minor > 99 || build > 26)
13244 offset = strlen(tp->fw_ver);
13245 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13246 " v%d.%02d", major, minor);
13249 offset = strlen(tp->fw_ver);
13250 if (offset < TG3_VER_SIZE - 1)
13251 tp->fw_ver[offset] = 'a' + build - 1;
13255 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13257 u32 val, offset, start;
13260 for (offset = TG3_NVM_DIR_START;
13261 offset < TG3_NVM_DIR_END;
13262 offset += TG3_NVM_DIRENT_SIZE) {
13263 if (tg3_nvram_read(tp, offset, &val))
13266 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13270 if (offset == TG3_NVM_DIR_END)
13273 if (!tg3_flag(tp, 5705_PLUS))
13274 start = 0x08000000;
13275 else if (tg3_nvram_read(tp, offset - 4, &start))
13278 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13279 !tg3_fw_img_is_valid(tp, offset) ||
13280 tg3_nvram_read(tp, offset + 8, &val))
13283 offset += val - start;
13285 vlen = strlen(tp->fw_ver);
13287 tp->fw_ver[vlen++] = ',';
13288 tp->fw_ver[vlen++] = ' ';
13290 for (i = 0; i < 4; i++) {
13292 if (tg3_nvram_read_be32(tp, offset, &v))
13295 offset += sizeof(v);
13297 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13298 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13302 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13307 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13313 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13316 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13317 if (apedata != APE_SEG_SIG_MAGIC)
13320 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13321 if (!(apedata & APE_FW_STATUS_READY))
13324 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13326 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13327 tg3_flag_set(tp, APE_HAS_NCSI);
13333 vlen = strlen(tp->fw_ver);
13335 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13337 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13338 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13339 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13340 (apedata & APE_FW_VERSION_BLDMSK));
13343 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13346 bool vpd_vers = false;
13348 if (tp->fw_ver[0] != 0)
13351 if (tg3_flag(tp, NO_NVRAM)) {
13352 strcat(tp->fw_ver, "sb");
13356 if (tg3_nvram_read(tp, 0, &val))
13359 if (val == TG3_EEPROM_MAGIC)
13360 tg3_read_bc_ver(tp);
13361 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13362 tg3_read_sb_ver(tp, val);
13363 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13364 tg3_read_hwsb_ver(tp);
13368 if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13371 tg3_read_mgmtfw_ver(tp);
13374 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13377 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13379 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13381 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13382 return TG3_RX_RET_MAX_SIZE_5717;
13383 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13384 return TG3_RX_RET_MAX_SIZE_5700;
13386 return TG3_RX_RET_MAX_SIZE_5705;
13389 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13390 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13391 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13392 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13396 static int __devinit tg3_get_invariants(struct tg3 *tp)
13399 u32 pci_state_reg, grc_misc_cfg;
13404 /* Force memory write invalidate off. If we leave it on,
13405 * then on 5700_BX chips we have to enable a workaround.
13406 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13407 * to match the cacheline size. The Broadcom driver have this
13408 * workaround but turns MWI off all the times so never uses
13409 * it. This seems to suggest that the workaround is insufficient.
13411 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13412 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13413 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13415 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13416 * has the register indirect write enable bit set before
13417 * we try to access any of the MMIO registers. It is also
13418 * critical that the PCI-X hw workaround situation is decided
13419 * before that as well.
13421 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13424 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13425 MISC_HOST_CTRL_CHIPREV_SHIFT);
13426 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13427 u32 prod_id_asic_rev;
13429 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13430 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13431 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13432 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13433 pci_read_config_dword(tp->pdev,
13434 TG3PCI_GEN2_PRODID_ASICREV,
13435 &prod_id_asic_rev);
13436 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13437 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13438 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13439 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13440 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13441 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13442 pci_read_config_dword(tp->pdev,
13443 TG3PCI_GEN15_PRODID_ASICREV,
13444 &prod_id_asic_rev);
13446 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13447 &prod_id_asic_rev);
13449 tp->pci_chip_rev_id = prod_id_asic_rev;
13452 /* Wrong chip ID in 5752 A0. This code can be removed later
13453 * as A0 is not in production.
13455 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13456 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13458 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13459 * we need to disable memory and use config. cycles
13460 * only to access all registers. The 5702/03 chips
13461 * can mistakenly decode the special cycles from the
13462 * ICH chipsets as memory write cycles, causing corruption
13463 * of register and memory space. Only certain ICH bridges
13464 * will drive special cycles with non-zero data during the
13465 * address phase which can fall within the 5703's address
13466 * range. This is not an ICH bug as the PCI spec allows
13467 * non-zero address during special cycles. However, only
13468 * these ICH bridges are known to drive non-zero addresses
13469 * during special cycles.
13471 * Since special cycles do not cross PCI bridges, we only
13472 * enable this workaround if the 5703 is on the secondary
13473 * bus of these ICH bridges.
13475 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13476 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13477 static struct tg3_dev_id {
13481 } ich_chipsets[] = {
13482 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13484 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13486 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13488 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13492 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13493 struct pci_dev *bridge = NULL;
13495 while (pci_id->vendor != 0) {
13496 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13502 if (pci_id->rev != PCI_ANY_ID) {
13503 if (bridge->revision > pci_id->rev)
13506 if (bridge->subordinate &&
13507 (bridge->subordinate->number ==
13508 tp->pdev->bus->number)) {
13509 tg3_flag_set(tp, ICH_WORKAROUND);
13510 pci_dev_put(bridge);
13516 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13517 static struct tg3_dev_id {
13520 } bridge_chipsets[] = {
13521 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13522 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13525 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13526 struct pci_dev *bridge = NULL;
13528 while (pci_id->vendor != 0) {
13529 bridge = pci_get_device(pci_id->vendor,
13536 if (bridge->subordinate &&
13537 (bridge->subordinate->number <=
13538 tp->pdev->bus->number) &&
13539 (bridge->subordinate->subordinate >=
13540 tp->pdev->bus->number)) {
13541 tg3_flag_set(tp, 5701_DMA_BUG);
13542 pci_dev_put(bridge);
13548 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13549 * DMA addresses > 40-bit. This bridge may have other additional
13550 * 57xx devices behind it in some 4-port NIC designs for example.
13551 * Any tg3 device found behind the bridge will also need the 40-bit
13554 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13555 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13556 tg3_flag_set(tp, 5780_CLASS);
13557 tg3_flag_set(tp, 40BIT_DMA_BUG);
13558 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13560 struct pci_dev *bridge = NULL;
13563 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13564 PCI_DEVICE_ID_SERVERWORKS_EPB,
13566 if (bridge && bridge->subordinate &&
13567 (bridge->subordinate->number <=
13568 tp->pdev->bus->number) &&
13569 (bridge->subordinate->subordinate >=
13570 tp->pdev->bus->number)) {
13571 tg3_flag_set(tp, 40BIT_DMA_BUG);
13572 pci_dev_put(bridge);
13578 /* Initialize misc host control in PCI block. */
13579 tp->misc_host_ctrl |= (misc_ctrl_reg &
13580 MISC_HOST_CTRL_CHIPREV);
13581 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13582 tp->misc_host_ctrl);
13584 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13585 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13586 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13588 tp->pdev_peer = tg3_find_peer(tp);
13590 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13591 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13592 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13593 tg3_flag_set(tp, 5717_PLUS);
13595 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13596 tg3_flag(tp, 5717_PLUS))
13597 tg3_flag_set(tp, 57765_PLUS);
13599 /* Intentionally exclude ASIC_REV_5906 */
13600 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13601 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13602 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13603 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13604 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13605 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13606 tg3_flag(tp, 57765_PLUS))
13607 tg3_flag_set(tp, 5755_PLUS);
13609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13610 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13611 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13612 tg3_flag(tp, 5755_PLUS) ||
13613 tg3_flag(tp, 5780_CLASS))
13614 tg3_flag_set(tp, 5750_PLUS);
13616 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13617 tg3_flag(tp, 5750_PLUS))
13618 tg3_flag_set(tp, 5705_PLUS);
13620 /* 5700 B0 chips do not support checksumming correctly due
13621 * to hardware bugs.
13623 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
13624 u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
13626 if (tg3_flag(tp, 5755_PLUS))
13627 features |= NETIF_F_IPV6_CSUM;
13628 tp->dev->features |= features;
13629 tp->dev->hw_features |= features;
13630 tp->dev->vlan_features |= features;
13633 /* Determine TSO capabilities */
13634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13635 ; /* Do nothing. HW bug. */
13636 else if (tg3_flag(tp, 57765_PLUS))
13637 tg3_flag_set(tp, HW_TSO_3);
13638 else if (tg3_flag(tp, 5755_PLUS) ||
13639 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13640 tg3_flag_set(tp, HW_TSO_2);
13641 else if (tg3_flag(tp, 5750_PLUS)) {
13642 tg3_flag_set(tp, HW_TSO_1);
13643 tg3_flag_set(tp, TSO_BUG);
13644 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13645 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13646 tg3_flag_clear(tp, TSO_BUG);
13647 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13648 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13649 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13650 tg3_flag_set(tp, TSO_BUG);
13651 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13652 tp->fw_needed = FIRMWARE_TG3TSO5;
13654 tp->fw_needed = FIRMWARE_TG3TSO;
13659 if (tg3_flag(tp, 5750_PLUS)) {
13660 tg3_flag_set(tp, SUPPORT_MSI);
13661 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13662 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13663 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13664 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13665 tp->pdev_peer == tp->pdev))
13666 tg3_flag_clear(tp, SUPPORT_MSI);
13668 if (tg3_flag(tp, 5755_PLUS) ||
13669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13670 tg3_flag_set(tp, 1SHOT_MSI);
13673 if (tg3_flag(tp, 57765_PLUS)) {
13674 tg3_flag_set(tp, SUPPORT_MSIX);
13675 tp->irq_max = TG3_IRQ_MAX_VECS;
13679 /* All chips can get confused if TX buffers
13680 * straddle the 4GB address boundary.
13682 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13684 if (tg3_flag(tp, 5755_PLUS))
13685 tg3_flag_set(tp, SHORT_DMA_BUG);
13687 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13689 if (tg3_flag(tp, 5717_PLUS))
13690 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13692 if (tg3_flag(tp, 57765_PLUS) &&
13693 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13694 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13696 if (!tg3_flag(tp, 5705_PLUS) ||
13697 tg3_flag(tp, 5780_CLASS) ||
13698 tg3_flag(tp, USE_JUMBO_BDFLAG))
13699 tg3_flag_set(tp, JUMBO_CAPABLE);
13701 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13704 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13705 if (tp->pcie_cap != 0) {
13708 tg3_flag_set(tp, PCI_EXPRESS);
13710 tp->pcie_readrq = 4096;
13711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13713 tp->pcie_readrq = 2048;
13715 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13717 pci_read_config_word(tp->pdev,
13718 tp->pcie_cap + PCI_EXP_LNKCTL,
13720 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13722 tg3_flag_clear(tp, HW_TSO_2);
13723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13724 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13725 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13726 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13727 tg3_flag_set(tp, CLKREQ_BUG);
13728 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13729 tg3_flag_set(tp, L1PLLPD_EN);
13731 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13732 tg3_flag_set(tp, PCI_EXPRESS);
13733 } else if (!tg3_flag(tp, 5705_PLUS) ||
13734 tg3_flag(tp, 5780_CLASS)) {
13735 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13736 if (!tp->pcix_cap) {
13737 dev_err(&tp->pdev->dev,
13738 "Cannot find PCI-X capability, aborting\n");
13742 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13743 tg3_flag_set(tp, PCIX_MODE);
13746 /* If we have an AMD 762 or VIA K8T800 chipset, write
13747 * reordering to the mailbox registers done by the host
13748 * controller can cause major troubles. We read back from
13749 * every mailbox register write to force the writes to be
13750 * posted to the chip in order.
13752 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13753 !tg3_flag(tp, PCI_EXPRESS))
13754 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13756 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13757 &tp->pci_cacheline_sz);
13758 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13759 &tp->pci_lat_timer);
13760 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13761 tp->pci_lat_timer < 64) {
13762 tp->pci_lat_timer = 64;
13763 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13764 tp->pci_lat_timer);
13767 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13768 /* 5700 BX chips need to have their TX producer index
13769 * mailboxes written twice to workaround a bug.
13771 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13773 /* If we are in PCI-X mode, enable register write workaround.
13775 * The workaround is to use indirect register accesses
13776 * for all chip writes not to mailbox registers.
13778 if (tg3_flag(tp, PCIX_MODE)) {
13781 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13783 /* The chip can have it's power management PCI config
13784 * space registers clobbered due to this bug.
13785 * So explicitly force the chip into D0 here.
13787 pci_read_config_dword(tp->pdev,
13788 tp->pm_cap + PCI_PM_CTRL,
13790 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13791 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13792 pci_write_config_dword(tp->pdev,
13793 tp->pm_cap + PCI_PM_CTRL,
13796 /* Also, force SERR#/PERR# in PCI command. */
13797 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13798 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13799 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13803 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13804 tg3_flag_set(tp, PCI_HIGH_SPEED);
13805 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13806 tg3_flag_set(tp, PCI_32BIT);
13808 /* Chip-specific fixup from Broadcom driver */
13809 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13810 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13811 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13812 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13815 /* Default fast path register access methods */
13816 tp->read32 = tg3_read32;
13817 tp->write32 = tg3_write32;
13818 tp->read32_mbox = tg3_read32;
13819 tp->write32_mbox = tg3_write32;
13820 tp->write32_tx_mbox = tg3_write32;
13821 tp->write32_rx_mbox = tg3_write32;
13823 /* Various workaround register access methods */
13824 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13825 tp->write32 = tg3_write_indirect_reg32;
13826 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13827 (tg3_flag(tp, PCI_EXPRESS) &&
13828 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13830 * Back to back register writes can cause problems on these
13831 * chips, the workaround is to read back all reg writes
13832 * except those to mailbox regs.
13834 * See tg3_write_indirect_reg32().
13836 tp->write32 = tg3_write_flush_reg32;
13839 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13840 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13841 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13842 tp->write32_rx_mbox = tg3_write_flush_reg32;
13845 if (tg3_flag(tp, ICH_WORKAROUND)) {
13846 tp->read32 = tg3_read_indirect_reg32;
13847 tp->write32 = tg3_write_indirect_reg32;
13848 tp->read32_mbox = tg3_read_indirect_mbox;
13849 tp->write32_mbox = tg3_write_indirect_mbox;
13850 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13851 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13856 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13857 pci_cmd &= ~PCI_COMMAND_MEMORY;
13858 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13860 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13861 tp->read32_mbox = tg3_read32_mbox_5906;
13862 tp->write32_mbox = tg3_write32_mbox_5906;
13863 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13864 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13867 if (tp->write32 == tg3_write_indirect_reg32 ||
13868 (tg3_flag(tp, PCIX_MODE) &&
13869 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13870 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13871 tg3_flag_set(tp, SRAM_USE_CONFIG);
13873 /* Get eeprom hw config before calling tg3_set_power_state().
13874 * In particular, the TG3_FLAG_IS_NIC flag must be
13875 * determined before calling tg3_set_power_state() so that
13876 * we know whether or not to switch out of Vaux power.
13877 * When the flag is set, it means that GPIO1 is used for eeprom
13878 * write protect and also implies that it is a LOM where GPIOs
13879 * are not used to switch power.
13881 tg3_get_eeprom_hw_cfg(tp);
13883 if (tg3_flag(tp, ENABLE_APE)) {
13884 /* Allow reads and writes to the
13885 * APE register and memory space.
13887 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13888 PCISTATE_ALLOW_APE_SHMEM_WR |
13889 PCISTATE_ALLOW_APE_PSPACE_WR;
13890 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13894 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13895 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13896 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13898 tg3_flag(tp, 57765_PLUS))
13899 tg3_flag_set(tp, CPMU_PRESENT);
13901 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13902 * GPIO1 driven high will bring 5700's external PHY out of reset.
13903 * It is also used as eeprom write protect on LOMs.
13905 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13906 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13907 tg3_flag(tp, EEPROM_WRITE_PROT))
13908 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13909 GRC_LCLCTRL_GPIO_OUTPUT1);
13910 /* Unused GPIO3 must be driven as output on 5752 because there
13911 * are no pull-up resistors on unused GPIO pins.
13913 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13914 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13916 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13917 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13918 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13919 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13921 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13922 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13923 /* Turn off the debug UART. */
13924 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13925 if (tg3_flag(tp, IS_NIC))
13926 /* Keep VMain power. */
13927 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13928 GRC_LCLCTRL_GPIO_OUTPUT0;
13931 /* Force the chip into D0. */
13932 err = tg3_power_up(tp);
13934 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13938 /* Derive initial jumbo mode from MTU assigned in
13939 * ether_setup() via the alloc_etherdev() call
13941 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13942 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13944 /* Determine WakeOnLan speed to use. */
13945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13946 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13947 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13948 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13949 tg3_flag_clear(tp, WOL_SPEED_100MB);
13951 tg3_flag_set(tp, WOL_SPEED_100MB);
13954 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13955 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13957 /* A few boards don't want Ethernet@WireSpeed phy feature */
13958 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13959 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13960 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13961 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13962 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13963 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13964 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13966 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13967 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13968 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13969 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13970 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13972 if (tg3_flag(tp, 5705_PLUS) &&
13973 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13974 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13975 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13976 !tg3_flag(tp, 57765_PLUS)) {
13977 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13978 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13980 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13981 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13982 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13983 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13984 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13985 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13987 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13991 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13992 tp->phy_otp = tg3_read_otp_phycfg(tp);
13993 if (tp->phy_otp == 0)
13994 tp->phy_otp = TG3_OTP_DEFAULT;
13997 if (tg3_flag(tp, CPMU_PRESENT))
13998 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14000 tp->mi_mode = MAC_MI_MODE_BASE;
14002 tp->coalesce_mode = 0;
14003 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14004 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14005 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14007 /* Set these bits to enable statistics workaround. */
14008 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14009 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14010 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14011 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14012 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14015 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14016 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14017 tg3_flag_set(tp, USE_PHYLIB);
14019 err = tg3_mdio_init(tp);
14023 /* Initialize data/descriptor byte/word swapping. */
14024 val = tr32(GRC_MODE);
14025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14026 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14027 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14028 GRC_MODE_B2HRX_ENABLE |
14029 GRC_MODE_HTX2B_ENABLE |
14030 GRC_MODE_HOST_STACKUP);
14032 val &= GRC_MODE_HOST_STACKUP;
14034 tw32(GRC_MODE, val | tp->grc_mode);
14036 tg3_switch_clocks(tp);
14038 /* Clear this out for sanity. */
14039 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14041 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14043 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14044 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14045 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14047 if (chiprevid == CHIPREV_ID_5701_A0 ||
14048 chiprevid == CHIPREV_ID_5701_B0 ||
14049 chiprevid == CHIPREV_ID_5701_B2 ||
14050 chiprevid == CHIPREV_ID_5701_B5) {
14051 void __iomem *sram_base;
14053 /* Write some dummy words into the SRAM status block
14054 * area, see if it reads back correctly. If the return
14055 * value is bad, force enable the PCIX workaround.
14057 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14059 writel(0x00000000, sram_base);
14060 writel(0x00000000, sram_base + 4);
14061 writel(0xffffffff, sram_base + 4);
14062 if (readl(sram_base) != 0x00000000)
14063 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14068 tg3_nvram_init(tp);
14070 grc_misc_cfg = tr32(GRC_MISC_CFG);
14071 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14073 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14074 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14075 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14076 tg3_flag_set(tp, IS_5788);
14078 if (!tg3_flag(tp, IS_5788) &&
14079 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
14080 tg3_flag_set(tp, TAGGED_STATUS);
14081 if (tg3_flag(tp, TAGGED_STATUS)) {
14082 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14083 HOSTCC_MODE_CLRTICK_TXBD);
14085 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14086 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14087 tp->misc_host_ctrl);
14090 /* Preserve the APE MAC_MODE bits */
14091 if (tg3_flag(tp, ENABLE_APE))
14092 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14094 tp->mac_mode = TG3_DEF_MAC_MODE;
14096 /* these are limited to 10/100 only */
14097 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14098 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14099 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14100 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14101 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14102 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14103 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14104 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14105 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14106 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14107 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14108 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14109 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14110 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14111 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14112 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14114 err = tg3_phy_probe(tp);
14116 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14117 /* ... but do not return immediately ... */
14122 tg3_read_fw_ver(tp);
14124 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14125 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14127 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14128 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14130 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14133 /* 5700 {AX,BX} chips have a broken status block link
14134 * change bit implementation, so we must use the
14135 * status register in those cases.
14137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14138 tg3_flag_set(tp, USE_LINKCHG_REG);
14140 tg3_flag_clear(tp, USE_LINKCHG_REG);
14142 /* The led_ctrl is set during tg3_phy_probe, here we might
14143 * have to force the link status polling mechanism based
14144 * upon subsystem IDs.
14146 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14147 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14148 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14149 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14150 tg3_flag_set(tp, USE_LINKCHG_REG);
14153 /* For all SERDES we poll the MAC status register. */
14154 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14155 tg3_flag_set(tp, POLL_SERDES);
14157 tg3_flag_clear(tp, POLL_SERDES);
14159 tp->rx_offset = NET_IP_ALIGN;
14160 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14161 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14162 tg3_flag(tp, PCIX_MODE)) {
14164 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14165 tp->rx_copy_thresh = ~(u16)0;
14169 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14170 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14171 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14173 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14175 /* Increment the rx prod index on the rx std ring by at most
14176 * 8 for these chips to workaround hw errata.
14178 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14181 tp->rx_std_max_post = 8;
14183 if (tg3_flag(tp, ASPM_WORKAROUND))
14184 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14185 PCIE_PWR_MGMT_L1_THRESH_MSK;
14190 #ifdef CONFIG_SPARC
14191 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14193 struct net_device *dev = tp->dev;
14194 struct pci_dev *pdev = tp->pdev;
14195 struct device_node *dp = pci_device_to_OF_node(pdev);
14196 const unsigned char *addr;
14199 addr = of_get_property(dp, "local-mac-address", &len);
14200 if (addr && len == 6) {
14201 memcpy(dev->dev_addr, addr, 6);
14202 memcpy(dev->perm_addr, dev->dev_addr, 6);
14208 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14210 struct net_device *dev = tp->dev;
14212 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14213 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14218 static int __devinit tg3_get_device_address(struct tg3 *tp)
14220 struct net_device *dev = tp->dev;
14221 u32 hi, lo, mac_offset;
14224 #ifdef CONFIG_SPARC
14225 if (!tg3_get_macaddr_sparc(tp))
14230 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
14231 tg3_flag(tp, 5780_CLASS)) {
14232 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14234 if (tg3_nvram_lock(tp))
14235 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14237 tg3_nvram_unlock(tp);
14238 } else if (tg3_flag(tp, 5717_PLUS)) {
14239 if (PCI_FUNC(tp->pdev->devfn) & 1)
14241 if (PCI_FUNC(tp->pdev->devfn) > 1)
14242 mac_offset += 0x18c;
14243 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14246 /* First try to get it from MAC address mailbox. */
14247 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14248 if ((hi >> 16) == 0x484b) {
14249 dev->dev_addr[0] = (hi >> 8) & 0xff;
14250 dev->dev_addr[1] = (hi >> 0) & 0xff;
14252 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14253 dev->dev_addr[2] = (lo >> 24) & 0xff;
14254 dev->dev_addr[3] = (lo >> 16) & 0xff;
14255 dev->dev_addr[4] = (lo >> 8) & 0xff;
14256 dev->dev_addr[5] = (lo >> 0) & 0xff;
14258 /* Some old bootcode may report a 0 MAC address in SRAM */
14259 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14262 /* Next, try NVRAM. */
14263 if (!tg3_flag(tp, NO_NVRAM) &&
14264 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14265 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14266 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14267 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14269 /* Finally just fetch it out of the MAC control regs. */
14271 hi = tr32(MAC_ADDR_0_HIGH);
14272 lo = tr32(MAC_ADDR_0_LOW);
14274 dev->dev_addr[5] = lo & 0xff;
14275 dev->dev_addr[4] = (lo >> 8) & 0xff;
14276 dev->dev_addr[3] = (lo >> 16) & 0xff;
14277 dev->dev_addr[2] = (lo >> 24) & 0xff;
14278 dev->dev_addr[1] = hi & 0xff;
14279 dev->dev_addr[0] = (hi >> 8) & 0xff;
14283 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14284 #ifdef CONFIG_SPARC
14285 if (!tg3_get_default_macaddr_sparc(tp))
14290 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14294 #define BOUNDARY_SINGLE_CACHELINE 1
14295 #define BOUNDARY_MULTI_CACHELINE 2
14297 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14299 int cacheline_size;
14303 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14305 cacheline_size = 1024;
14307 cacheline_size = (int) byte * 4;
14309 /* On 5703 and later chips, the boundary bits have no
14312 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14313 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14314 !tg3_flag(tp, PCI_EXPRESS))
14317 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14318 goal = BOUNDARY_MULTI_CACHELINE;
14320 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14321 goal = BOUNDARY_SINGLE_CACHELINE;
14327 if (tg3_flag(tp, 57765_PLUS)) {
14328 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14335 /* PCI controllers on most RISC systems tend to disconnect
14336 * when a device tries to burst across a cache-line boundary.
14337 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14339 * Unfortunately, for PCI-E there are only limited
14340 * write-side controls for this, and thus for reads
14341 * we will still get the disconnects. We'll also waste
14342 * these PCI cycles for both read and write for chips
14343 * other than 5700 and 5701 which do not implement the
14346 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14347 switch (cacheline_size) {
14352 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14353 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14354 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14356 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14357 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14362 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14363 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14367 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14368 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14371 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14372 switch (cacheline_size) {
14376 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14377 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14378 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14384 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14385 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14389 switch (cacheline_size) {
14391 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14392 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14393 DMA_RWCTRL_WRITE_BNDRY_16);
14398 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14399 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14400 DMA_RWCTRL_WRITE_BNDRY_32);
14405 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14406 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14407 DMA_RWCTRL_WRITE_BNDRY_64);
14412 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14413 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14414 DMA_RWCTRL_WRITE_BNDRY_128);
14419 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14420 DMA_RWCTRL_WRITE_BNDRY_256);
14423 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14424 DMA_RWCTRL_WRITE_BNDRY_512);
14428 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14429 DMA_RWCTRL_WRITE_BNDRY_1024);
14438 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14440 struct tg3_internal_buffer_desc test_desc;
14441 u32 sram_dma_descs;
14444 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14446 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14447 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14448 tw32(RDMAC_STATUS, 0);
14449 tw32(WDMAC_STATUS, 0);
14451 tw32(BUFMGR_MODE, 0);
14452 tw32(FTQ_RESET, 0);
14454 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14455 test_desc.addr_lo = buf_dma & 0xffffffff;
14456 test_desc.nic_mbuf = 0x00002100;
14457 test_desc.len = size;
14460 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14461 * the *second* time the tg3 driver was getting loaded after an
14464 * Broadcom tells me:
14465 * ...the DMA engine is connected to the GRC block and a DMA
14466 * reset may affect the GRC block in some unpredictable way...
14467 * The behavior of resets to individual blocks has not been tested.
14469 * Broadcom noted the GRC reset will also reset all sub-components.
14472 test_desc.cqid_sqid = (13 << 8) | 2;
14474 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14477 test_desc.cqid_sqid = (16 << 8) | 7;
14479 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14482 test_desc.flags = 0x00000005;
14484 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14487 val = *(((u32 *)&test_desc) + i);
14488 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14489 sram_dma_descs + (i * sizeof(u32)));
14490 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14492 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14495 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14497 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14500 for (i = 0; i < 40; i++) {
14504 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14506 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14507 if ((val & 0xffff) == sram_dma_descs) {
14518 #define TEST_BUFFER_SIZE 0x2000
14520 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14521 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14525 static int __devinit tg3_test_dma(struct tg3 *tp)
14527 dma_addr_t buf_dma;
14528 u32 *buf, saved_dma_rwctrl;
14531 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14532 &buf_dma, GFP_KERNEL);
14538 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14539 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14541 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14543 if (tg3_flag(tp, 57765_PLUS))
14546 if (tg3_flag(tp, PCI_EXPRESS)) {
14547 /* DMA read watermark not used on PCIE */
14548 tp->dma_rwctrl |= 0x00180000;
14549 } else if (!tg3_flag(tp, PCIX_MODE)) {
14550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14552 tp->dma_rwctrl |= 0x003f0000;
14554 tp->dma_rwctrl |= 0x003f000f;
14556 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14557 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14558 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14559 u32 read_water = 0x7;
14561 /* If the 5704 is behind the EPB bridge, we can
14562 * do the less restrictive ONE_DMA workaround for
14563 * better performance.
14565 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14566 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14567 tp->dma_rwctrl |= 0x8000;
14568 else if (ccval == 0x6 || ccval == 0x7)
14569 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14573 /* Set bit 23 to enable PCIX hw bug fix */
14575 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14576 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14578 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14579 /* 5780 always in PCIX mode */
14580 tp->dma_rwctrl |= 0x00144000;
14581 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14582 /* 5714 always in PCIX mode */
14583 tp->dma_rwctrl |= 0x00148000;
14585 tp->dma_rwctrl |= 0x001b000f;
14589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14591 tp->dma_rwctrl &= 0xfffffff0;
14593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14594 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14595 /* Remove this if it causes problems for some boards. */
14596 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14598 /* On 5700/5701 chips, we need to set this bit.
14599 * Otherwise the chip will issue cacheline transactions
14600 * to streamable DMA memory with not all the byte
14601 * enables turned on. This is an error on several
14602 * RISC PCI controllers, in particular sparc64.
14604 * On 5703/5704 chips, this bit has been reassigned
14605 * a different meaning. In particular, it is used
14606 * on those chips to enable a PCI-X workaround.
14608 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14611 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14614 /* Unneeded, already done by tg3_get_invariants. */
14615 tg3_switch_clocks(tp);
14618 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14619 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14622 /* It is best to perform DMA test with maximum write burst size
14623 * to expose the 5700/5701 write DMA bug.
14625 saved_dma_rwctrl = tp->dma_rwctrl;
14626 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14627 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14632 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14635 /* Send the buffer to the chip. */
14636 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14638 dev_err(&tp->pdev->dev,
14639 "%s: Buffer write failed. err = %d\n",
14645 /* validate data reached card RAM correctly. */
14646 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14648 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14649 if (le32_to_cpu(val) != p[i]) {
14650 dev_err(&tp->pdev->dev,
14651 "%s: Buffer corrupted on device! "
14652 "(%d != %d)\n", __func__, val, i);
14653 /* ret = -ENODEV here? */
14658 /* Now read it back. */
14659 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14661 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14662 "err = %d\n", __func__, ret);
14667 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14671 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14672 DMA_RWCTRL_WRITE_BNDRY_16) {
14673 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14674 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14675 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14678 dev_err(&tp->pdev->dev,
14679 "%s: Buffer corrupted on read back! "
14680 "(%d != %d)\n", __func__, p[i], i);
14686 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14692 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14693 DMA_RWCTRL_WRITE_BNDRY_16) {
14694 /* DMA test passed without adjusting DMA boundary,
14695 * now look for chipsets that are known to expose the
14696 * DMA bug without failing the test.
14698 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14699 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14700 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14702 /* Safe to use the calculated DMA boundary. */
14703 tp->dma_rwctrl = saved_dma_rwctrl;
14706 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14710 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14715 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14717 if (tg3_flag(tp, 57765_PLUS)) {
14718 tp->bufmgr_config.mbuf_read_dma_low_water =
14719 DEFAULT_MB_RDMA_LOW_WATER_5705;
14720 tp->bufmgr_config.mbuf_mac_rx_low_water =
14721 DEFAULT_MB_MACRX_LOW_WATER_57765;
14722 tp->bufmgr_config.mbuf_high_water =
14723 DEFAULT_MB_HIGH_WATER_57765;
14725 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14726 DEFAULT_MB_RDMA_LOW_WATER_5705;
14727 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14728 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14729 tp->bufmgr_config.mbuf_high_water_jumbo =
14730 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14731 } else if (tg3_flag(tp, 5705_PLUS)) {
14732 tp->bufmgr_config.mbuf_read_dma_low_water =
14733 DEFAULT_MB_RDMA_LOW_WATER_5705;
14734 tp->bufmgr_config.mbuf_mac_rx_low_water =
14735 DEFAULT_MB_MACRX_LOW_WATER_5705;
14736 tp->bufmgr_config.mbuf_high_water =
14737 DEFAULT_MB_HIGH_WATER_5705;
14738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14739 tp->bufmgr_config.mbuf_mac_rx_low_water =
14740 DEFAULT_MB_MACRX_LOW_WATER_5906;
14741 tp->bufmgr_config.mbuf_high_water =
14742 DEFAULT_MB_HIGH_WATER_5906;
14745 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14746 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14747 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14748 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14749 tp->bufmgr_config.mbuf_high_water_jumbo =
14750 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14752 tp->bufmgr_config.mbuf_read_dma_low_water =
14753 DEFAULT_MB_RDMA_LOW_WATER;
14754 tp->bufmgr_config.mbuf_mac_rx_low_water =
14755 DEFAULT_MB_MACRX_LOW_WATER;
14756 tp->bufmgr_config.mbuf_high_water =
14757 DEFAULT_MB_HIGH_WATER;
14759 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14760 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14761 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14762 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14763 tp->bufmgr_config.mbuf_high_water_jumbo =
14764 DEFAULT_MB_HIGH_WATER_JUMBO;
14767 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14768 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14771 static char * __devinit tg3_phy_string(struct tg3 *tp)
14773 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14774 case TG3_PHY_ID_BCM5400: return "5400";
14775 case TG3_PHY_ID_BCM5401: return "5401";
14776 case TG3_PHY_ID_BCM5411: return "5411";
14777 case TG3_PHY_ID_BCM5701: return "5701";
14778 case TG3_PHY_ID_BCM5703: return "5703";
14779 case TG3_PHY_ID_BCM5704: return "5704";
14780 case TG3_PHY_ID_BCM5705: return "5705";
14781 case TG3_PHY_ID_BCM5750: return "5750";
14782 case TG3_PHY_ID_BCM5752: return "5752";
14783 case TG3_PHY_ID_BCM5714: return "5714";
14784 case TG3_PHY_ID_BCM5780: return "5780";
14785 case TG3_PHY_ID_BCM5755: return "5755";
14786 case TG3_PHY_ID_BCM5787: return "5787";
14787 case TG3_PHY_ID_BCM5784: return "5784";
14788 case TG3_PHY_ID_BCM5756: return "5722/5756";
14789 case TG3_PHY_ID_BCM5906: return "5906";
14790 case TG3_PHY_ID_BCM5761: return "5761";
14791 case TG3_PHY_ID_BCM5718C: return "5718C";
14792 case TG3_PHY_ID_BCM5718S: return "5718S";
14793 case TG3_PHY_ID_BCM57765: return "57765";
14794 case TG3_PHY_ID_BCM5719C: return "5719C";
14795 case TG3_PHY_ID_BCM5720C: return "5720C";
14796 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14797 case 0: return "serdes";
14798 default: return "unknown";
14802 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14804 if (tg3_flag(tp, PCI_EXPRESS)) {
14805 strcpy(str, "PCI Express");
14807 } else if (tg3_flag(tp, PCIX_MODE)) {
14808 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14810 strcpy(str, "PCIX:");
14812 if ((clock_ctrl == 7) ||
14813 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14814 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14815 strcat(str, "133MHz");
14816 else if (clock_ctrl == 0)
14817 strcat(str, "33MHz");
14818 else if (clock_ctrl == 2)
14819 strcat(str, "50MHz");
14820 else if (clock_ctrl == 4)
14821 strcat(str, "66MHz");
14822 else if (clock_ctrl == 6)
14823 strcat(str, "100MHz");
14825 strcpy(str, "PCI:");
14826 if (tg3_flag(tp, PCI_HIGH_SPEED))
14827 strcat(str, "66MHz");
14829 strcat(str, "33MHz");
14831 if (tg3_flag(tp, PCI_32BIT))
14832 strcat(str, ":32-bit");
14834 strcat(str, ":64-bit");
14838 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14840 struct pci_dev *peer;
14841 unsigned int func, devnr = tp->pdev->devfn & ~7;
14843 for (func = 0; func < 8; func++) {
14844 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14845 if (peer && peer != tp->pdev)
14849 /* 5704 can be configured in single-port mode, set peer to
14850 * tp->pdev in that case.
14858 * We don't need to keep the refcount elevated; there's no way
14859 * to remove one half of this device without removing the other
14866 static void __devinit tg3_init_coal(struct tg3 *tp)
14868 struct ethtool_coalesce *ec = &tp->coal;
14870 memset(ec, 0, sizeof(*ec));
14871 ec->cmd = ETHTOOL_GCOALESCE;
14872 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14873 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14874 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14875 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14876 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14877 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14878 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14879 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14880 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14882 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14883 HOSTCC_MODE_CLRTICK_TXBD)) {
14884 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14885 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14886 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14887 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14890 if (tg3_flag(tp, 5705_PLUS)) {
14891 ec->rx_coalesce_usecs_irq = 0;
14892 ec->tx_coalesce_usecs_irq = 0;
14893 ec->stats_block_coalesce_usecs = 0;
14897 static const struct net_device_ops tg3_netdev_ops = {
14898 .ndo_open = tg3_open,
14899 .ndo_stop = tg3_close,
14900 .ndo_start_xmit = tg3_start_xmit,
14901 .ndo_get_stats64 = tg3_get_stats64,
14902 .ndo_validate_addr = eth_validate_addr,
14903 .ndo_set_multicast_list = tg3_set_rx_mode,
14904 .ndo_set_mac_address = tg3_set_mac_addr,
14905 .ndo_do_ioctl = tg3_ioctl,
14906 .ndo_tx_timeout = tg3_tx_timeout,
14907 .ndo_change_mtu = tg3_change_mtu,
14908 .ndo_fix_features = tg3_fix_features,
14909 .ndo_set_features = tg3_set_features,
14910 #ifdef CONFIG_NET_POLL_CONTROLLER
14911 .ndo_poll_controller = tg3_poll_controller,
14915 static int __devinit tg3_init_one(struct pci_dev *pdev,
14916 const struct pci_device_id *ent)
14918 struct net_device *dev;
14920 int i, err, pm_cap;
14921 u32 sndmbx, rcvmbx, intmbx;
14923 u64 dma_mask, persist_dma_mask;
14924 u32 hw_features = 0;
14926 printk_once(KERN_INFO "%s\n", version);
14928 err = pci_enable_device(pdev);
14930 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14934 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14936 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14937 goto err_out_disable_pdev;
14940 pci_set_master(pdev);
14942 /* Find power-management capability. */
14943 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14945 dev_err(&pdev->dev,
14946 "Cannot find Power Management capability, aborting\n");
14948 goto err_out_free_res;
14951 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14953 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14955 goto err_out_free_res;
14958 SET_NETDEV_DEV(dev, &pdev->dev);
14960 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14962 tp = netdev_priv(dev);
14965 tp->pm_cap = pm_cap;
14966 tp->rx_mode = TG3_DEF_RX_MODE;
14967 tp->tx_mode = TG3_DEF_TX_MODE;
14970 tp->msg_enable = tg3_debug;
14972 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14974 /* The word/byte swap controls here control register access byte
14975 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14978 tp->misc_host_ctrl =
14979 MISC_HOST_CTRL_MASK_PCI_INT |
14980 MISC_HOST_CTRL_WORD_SWAP |
14981 MISC_HOST_CTRL_INDIR_ACCESS |
14982 MISC_HOST_CTRL_PCISTATE_RW;
14984 /* The NONFRM (non-frame) byte/word swap controls take effect
14985 * on descriptor entries, anything which isn't packet data.
14987 * The StrongARM chips on the board (one for tx, one for rx)
14988 * are running in big-endian mode.
14990 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14991 GRC_MODE_WSWAP_NONFRM_DATA);
14992 #ifdef __BIG_ENDIAN
14993 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14995 spin_lock_init(&tp->lock);
14996 spin_lock_init(&tp->indirect_lock);
14997 INIT_WORK(&tp->reset_task, tg3_reset_task);
14999 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15001 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15003 goto err_out_free_dev;
15006 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15007 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15009 dev->ethtool_ops = &tg3_ethtool_ops;
15010 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15011 dev->netdev_ops = &tg3_netdev_ops;
15012 dev->irq = pdev->irq;
15014 err = tg3_get_invariants(tp);
15016 dev_err(&pdev->dev,
15017 "Problem fetching invariants of chip, aborting\n");
15018 goto err_out_iounmap;
15021 /* The EPB bridge inside 5714, 5715, and 5780 and any
15022 * device behind the EPB cannot support DMA addresses > 40-bit.
15023 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15024 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15025 * do DMA address check in tg3_start_xmit().
15027 if (tg3_flag(tp, IS_5788))
15028 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15029 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15030 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15031 #ifdef CONFIG_HIGHMEM
15032 dma_mask = DMA_BIT_MASK(64);
15035 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15037 /* Configure DMA attributes. */
15038 if (dma_mask > DMA_BIT_MASK(32)) {
15039 err = pci_set_dma_mask(pdev, dma_mask);
15041 dev->features |= NETIF_F_HIGHDMA;
15042 err = pci_set_consistent_dma_mask(pdev,
15045 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15046 "DMA for consistent allocations\n");
15047 goto err_out_iounmap;
15051 if (err || dma_mask == DMA_BIT_MASK(32)) {
15052 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15054 dev_err(&pdev->dev,
15055 "No usable DMA configuration, aborting\n");
15056 goto err_out_iounmap;
15060 tg3_init_bufmgr_config(tp);
15062 /* Selectively allow TSO based on operating conditions */
15063 if ((tg3_flag(tp, HW_TSO_1) ||
15064 tg3_flag(tp, HW_TSO_2) ||
15065 tg3_flag(tp, HW_TSO_3)) ||
15066 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
15067 tg3_flag_set(tp, TSO_CAPABLE);
15069 tg3_flag_clear(tp, TSO_CAPABLE);
15070 tg3_flag_clear(tp, TSO_BUG);
15071 tp->fw_needed = NULL;
15074 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15075 tp->fw_needed = FIRMWARE_TG3;
15077 /* TSO is on by default on chips that support hardware TSO.
15078 * Firmware TSO on older chips gives lower performance, so it
15079 * is off by default, but can be enabled using ethtool.
15081 if ((tg3_flag(tp, HW_TSO_1) ||
15082 tg3_flag(tp, HW_TSO_2) ||
15083 tg3_flag(tp, HW_TSO_3)) &&
15084 (dev->features & NETIF_F_IP_CSUM))
15085 hw_features |= NETIF_F_TSO;
15086 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15087 if (dev->features & NETIF_F_IPV6_CSUM)
15088 hw_features |= NETIF_F_TSO6;
15089 if (tg3_flag(tp, HW_TSO_3) ||
15090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15091 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15092 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15095 hw_features |= NETIF_F_TSO_ECN;
15098 dev->hw_features |= hw_features;
15099 dev->features |= hw_features;
15100 dev->vlan_features |= hw_features;
15103 * Add loopback capability only for a subset of devices that support
15104 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15105 * loopback for the remaining devices.
15107 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15108 !tg3_flag(tp, CPMU_PRESENT))
15109 /* Add the loopback capability */
15110 dev->hw_features |= NETIF_F_LOOPBACK;
15112 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15113 !tg3_flag(tp, TSO_CAPABLE) &&
15114 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15115 tg3_flag_set(tp, MAX_RXPEND_64);
15116 tp->rx_pending = 63;
15119 err = tg3_get_device_address(tp);
15121 dev_err(&pdev->dev,
15122 "Could not obtain valid ethernet address, aborting\n");
15123 goto err_out_iounmap;
15126 if (tg3_flag(tp, ENABLE_APE)) {
15127 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15128 if (!tp->aperegs) {
15129 dev_err(&pdev->dev,
15130 "Cannot map APE registers, aborting\n");
15132 goto err_out_iounmap;
15135 tg3_ape_lock_init(tp);
15137 if (tg3_flag(tp, ENABLE_ASF))
15138 tg3_read_dash_ver(tp);
15142 * Reset chip in case UNDI or EFI driver did not shutdown
15143 * DMA self test will enable WDMAC and we'll see (spurious)
15144 * pending DMA on the PCI bus at that point.
15146 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15147 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15148 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15149 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15152 err = tg3_test_dma(tp);
15154 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15155 goto err_out_apeunmap;
15158 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15159 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15160 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15161 for (i = 0; i < tp->irq_max; i++) {
15162 struct tg3_napi *tnapi = &tp->napi[i];
15165 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15167 tnapi->int_mbox = intmbx;
15173 tnapi->consmbox = rcvmbx;
15174 tnapi->prodmbox = sndmbx;
15177 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15179 tnapi->coal_now = HOSTCC_MODE_NOW;
15181 if (!tg3_flag(tp, SUPPORT_MSIX))
15185 * If we support MSIX, we'll be using RSS. If we're using
15186 * RSS, the first vector only handles link interrupts and the
15187 * remaining vectors handle rx and tx interrupts. Reuse the
15188 * mailbox values for the next iteration. The values we setup
15189 * above are still useful for the single vectored mode.
15204 pci_set_drvdata(pdev, dev);
15206 err = register_netdev(dev);
15208 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15209 goto err_out_apeunmap;
15212 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15213 tp->board_part_number,
15214 tp->pci_chip_rev_id,
15215 tg3_bus_string(tp, str),
15218 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15219 struct phy_device *phydev;
15220 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15222 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15223 phydev->drv->name, dev_name(&phydev->dev));
15227 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15228 ethtype = "10/100Base-TX";
15229 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15230 ethtype = "1000Base-SX";
15232 ethtype = "10/100/1000Base-T";
15234 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15235 "(WireSpeed[%d], EEE[%d])\n",
15236 tg3_phy_string(tp), ethtype,
15237 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15238 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15241 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15242 (dev->features & NETIF_F_RXCSUM) != 0,
15243 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15244 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15245 tg3_flag(tp, ENABLE_ASF) != 0,
15246 tg3_flag(tp, TSO_CAPABLE) != 0);
15247 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15249 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15250 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15252 pci_save_state(pdev);
15258 iounmap(tp->aperegs);
15259 tp->aperegs = NULL;
15272 pci_release_regions(pdev);
15274 err_out_disable_pdev:
15275 pci_disable_device(pdev);
15276 pci_set_drvdata(pdev, NULL);
15280 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15282 struct net_device *dev = pci_get_drvdata(pdev);
15285 struct tg3 *tp = netdev_priv(dev);
15288 release_firmware(tp->fw);
15290 cancel_work_sync(&tp->reset_task);
15292 if (!tg3_flag(tp, USE_PHYLIB)) {
15297 unregister_netdev(dev);
15299 iounmap(tp->aperegs);
15300 tp->aperegs = NULL;
15307 pci_release_regions(pdev);
15308 pci_disable_device(pdev);
15309 pci_set_drvdata(pdev, NULL);
15313 #ifdef CONFIG_PM_SLEEP
15314 static int tg3_suspend(struct device *device)
15316 struct pci_dev *pdev = to_pci_dev(device);
15317 struct net_device *dev = pci_get_drvdata(pdev);
15318 struct tg3 *tp = netdev_priv(dev);
15321 if (!netif_running(dev))
15324 flush_work_sync(&tp->reset_task);
15326 tg3_netif_stop(tp);
15328 del_timer_sync(&tp->timer);
15330 tg3_full_lock(tp, 1);
15331 tg3_disable_ints(tp);
15332 tg3_full_unlock(tp);
15334 netif_device_detach(dev);
15336 tg3_full_lock(tp, 0);
15337 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15338 tg3_flag_clear(tp, INIT_COMPLETE);
15339 tg3_full_unlock(tp);
15341 err = tg3_power_down_prepare(tp);
15345 tg3_full_lock(tp, 0);
15347 tg3_flag_set(tp, INIT_COMPLETE);
15348 err2 = tg3_restart_hw(tp, 1);
15352 tp->timer.expires = jiffies + tp->timer_offset;
15353 add_timer(&tp->timer);
15355 netif_device_attach(dev);
15356 tg3_netif_start(tp);
15359 tg3_full_unlock(tp);
15368 static int tg3_resume(struct device *device)
15370 struct pci_dev *pdev = to_pci_dev(device);
15371 struct net_device *dev = pci_get_drvdata(pdev);
15372 struct tg3 *tp = netdev_priv(dev);
15375 if (!netif_running(dev))
15378 netif_device_attach(dev);
15380 tg3_full_lock(tp, 0);
15382 tg3_flag_set(tp, INIT_COMPLETE);
15383 err = tg3_restart_hw(tp, 1);
15387 tp->timer.expires = jiffies + tp->timer_offset;
15388 add_timer(&tp->timer);
15390 tg3_netif_start(tp);
15393 tg3_full_unlock(tp);
15401 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15402 #define TG3_PM_OPS (&tg3_pm_ops)
15406 #define TG3_PM_OPS NULL
15408 #endif /* CONFIG_PM_SLEEP */
15411 * tg3_io_error_detected - called when PCI error is detected
15412 * @pdev: Pointer to PCI device
15413 * @state: The current pci connection state
15415 * This function is called after a PCI bus error affecting
15416 * this device has been detected.
15418 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15419 pci_channel_state_t state)
15421 struct net_device *netdev = pci_get_drvdata(pdev);
15422 struct tg3 *tp = netdev_priv(netdev);
15423 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15425 netdev_info(netdev, "PCI I/O error detected\n");
15429 if (!netif_running(netdev))
15434 tg3_netif_stop(tp);
15436 del_timer_sync(&tp->timer);
15437 tg3_flag_clear(tp, RESTART_TIMER);
15439 /* Want to make sure that the reset task doesn't run */
15440 cancel_work_sync(&tp->reset_task);
15441 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15442 tg3_flag_clear(tp, RESTART_TIMER);
15444 netif_device_detach(netdev);
15446 /* Clean up software state, even if MMIO is blocked */
15447 tg3_full_lock(tp, 0);
15448 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15449 tg3_full_unlock(tp);
15452 if (state == pci_channel_io_perm_failure)
15453 err = PCI_ERS_RESULT_DISCONNECT;
15455 pci_disable_device(pdev);
15463 * tg3_io_slot_reset - called after the pci bus has been reset.
15464 * @pdev: Pointer to PCI device
15466 * Restart the card from scratch, as if from a cold-boot.
15467 * At this point, the card has exprienced a hard reset,
15468 * followed by fixups by BIOS, and has its config space
15469 * set up identically to what it was at cold boot.
15471 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15473 struct net_device *netdev = pci_get_drvdata(pdev);
15474 struct tg3 *tp = netdev_priv(netdev);
15475 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15480 if (pci_enable_device(pdev)) {
15481 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15485 pci_set_master(pdev);
15486 pci_restore_state(pdev);
15487 pci_save_state(pdev);
15489 if (!netif_running(netdev)) {
15490 rc = PCI_ERS_RESULT_RECOVERED;
15494 err = tg3_power_up(tp);
15496 netdev_err(netdev, "Failed to restore register access.\n");
15500 rc = PCI_ERS_RESULT_RECOVERED;
15509 * tg3_io_resume - called when traffic can start flowing again.
15510 * @pdev: Pointer to PCI device
15512 * This callback is called when the error recovery driver tells
15513 * us that its OK to resume normal operation.
15515 static void tg3_io_resume(struct pci_dev *pdev)
15517 struct net_device *netdev = pci_get_drvdata(pdev);
15518 struct tg3 *tp = netdev_priv(netdev);
15523 if (!netif_running(netdev))
15526 tg3_full_lock(tp, 0);
15527 tg3_flag_set(tp, INIT_COMPLETE);
15528 err = tg3_restart_hw(tp, 1);
15529 tg3_full_unlock(tp);
15531 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15535 netif_device_attach(netdev);
15537 tp->timer.expires = jiffies + tp->timer_offset;
15538 add_timer(&tp->timer);
15540 tg3_netif_start(tp);
15548 static struct pci_error_handlers tg3_err_handler = {
15549 .error_detected = tg3_io_error_detected,
15550 .slot_reset = tg3_io_slot_reset,
15551 .resume = tg3_io_resume
15554 static struct pci_driver tg3_driver = {
15555 .name = DRV_MODULE_NAME,
15556 .id_table = tg3_pci_tbl,
15557 .probe = tg3_init_one,
15558 .remove = __devexit_p(tg3_remove_one),
15559 .err_handler = &tg3_err_handler,
15560 .driver.pm = TG3_PM_OPS,
15563 static int __init tg3_init(void)
15565 return pci_register_driver(&tg3_driver);
15568 static void __exit tg3_cleanup(void)
15570 pci_unregister_driver(&tg3_driver);
15573 module_init(tg3_init);
15574 module_exit(tg3_cleanup);