2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/phy.h>
37 #include <linux/brcmphy.h>
38 #include <linux/if_vlan.h>
40 #include <linux/tcp.h>
41 #include <linux/workqueue.h>
42 #include <linux/prefetch.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/firmware.h>
46 #include <net/checksum.h>
49 #include <asm/system.h>
51 #include <asm/byteorder.h>
52 #include <asm/uaccess.h>
55 #include <asm/idprom.h>
62 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
63 #define TG3_VLAN_TAG_USED 1
65 #define TG3_VLAN_TAG_USED 0
70 #define DRV_MODULE_NAME "tg3"
72 #define TG3_MIN_NUM 112
73 #define DRV_MODULE_VERSION \
74 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
75 #define DRV_MODULE_RELDATE "July 11, 2010"
77 #define TG3_DEF_MAC_MODE 0
78 #define TG3_DEF_RX_MODE 0
79 #define TG3_DEF_TX_MODE 0
80 #define TG3_DEF_MSG_ENABLE \
90 /* length of time before we decide the hardware is borked,
91 * and dev->tx_timeout() should be called to fix the problem
93 #define TG3_TX_TIMEOUT (5 * HZ)
95 /* hardware minimum and maximum for a single frame's data payload */
96 #define TG3_MIN_MTU 60
97 #define TG3_MAX_MTU(tp) \
98 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
100 /* These numbers seem to be hard coded in the NIC firmware somehow.
101 * You can't change the ring sizes, but you can change where you place
102 * them in the NIC onboard memory.
104 #define TG3_RX_RING_SIZE 512
105 #define TG3_DEF_RX_RING_PENDING 200
106 #define TG3_RX_JUMBO_RING_SIZE 256
107 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
108 #define TG3_RSS_INDIR_TBL_SIZE 128
110 /* Do not place this n-ring entries value into the tp struct itself,
111 * we really want to expose these constants to GCC so that modulo et
112 * al. operations are done with shifts and masks instead of with
113 * hw multiply/modulo instructions. Another solution would be to
114 * replace things like '% foo' with '& (foo - 1)'.
116 #define TG3_RX_RCB_RING_SIZE(tp) \
117 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
118 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
120 #define TG3_TX_RING_SIZE 512
121 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
123 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
125 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
126 TG3_RX_JUMBO_RING_SIZE)
127 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
128 TG3_RX_RCB_RING_SIZE(tp))
129 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
131 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
133 #define TG3_RX_DMA_ALIGN 16
134 #define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
136 #define TG3_DMA_BYTE_ENAB 64
138 #define TG3_RX_STD_DMA_SZ 1536
139 #define TG3_RX_JMB_DMA_SZ 9046
141 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
143 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
144 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
146 #define TG3_RX_STD_BUFF_RING_SIZE \
147 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
149 #define TG3_RX_JMB_BUFF_RING_SIZE \
150 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
152 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
153 * that are at least dword aligned when used in PCIX mode. The driver
154 * works around this bug by double copying the packet. This workaround
155 * is built into the normal double copy length check for efficiency.
157 * However, the double copy is only necessary on those architectures
158 * where unaligned memory accesses are inefficient. For those architectures
159 * where unaligned memory accesses incur little penalty, we can reintegrate
160 * the 5701 in the normal rx path. Doing so saves a device structure
161 * dereference by hardcoding the double copy threshold in place.
163 #define TG3_RX_COPY_THRESHOLD 256
164 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
165 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
167 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
170 /* minimum number of free TX descriptors required to wake up TX process */
171 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
173 #define TG3_RAW_IP_ALIGN 2
175 /* number of ETHTOOL_GSTATS u64's */
176 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
178 #define TG3_NUM_TEST 6
180 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
182 #define FIRMWARE_TG3 "tigon/tg3.bin"
183 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
184 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
186 static char version[] __devinitdata =
187 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
189 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
190 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
191 MODULE_LICENSE("GPL");
192 MODULE_VERSION(DRV_MODULE_VERSION);
193 MODULE_FIRMWARE(FIRMWARE_TG3);
194 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
195 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
197 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
198 module_param(tg3_debug, int, 0);
199 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
201 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
278 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
279 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
280 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
281 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
282 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
283 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
284 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
288 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
290 static const struct {
291 const char string[ETH_GSTRING_LEN];
292 } ethtool_stats_keys[TG3_NUM_STATS] = {
295 { "rx_ucast_packets" },
296 { "rx_mcast_packets" },
297 { "rx_bcast_packets" },
299 { "rx_align_errors" },
300 { "rx_xon_pause_rcvd" },
301 { "rx_xoff_pause_rcvd" },
302 { "rx_mac_ctrl_rcvd" },
303 { "rx_xoff_entered" },
304 { "rx_frame_too_long_errors" },
306 { "rx_undersize_packets" },
307 { "rx_in_length_errors" },
308 { "rx_out_length_errors" },
309 { "rx_64_or_less_octet_packets" },
310 { "rx_65_to_127_octet_packets" },
311 { "rx_128_to_255_octet_packets" },
312 { "rx_256_to_511_octet_packets" },
313 { "rx_512_to_1023_octet_packets" },
314 { "rx_1024_to_1522_octet_packets" },
315 { "rx_1523_to_2047_octet_packets" },
316 { "rx_2048_to_4095_octet_packets" },
317 { "rx_4096_to_8191_octet_packets" },
318 { "rx_8192_to_9022_octet_packets" },
325 { "tx_flow_control" },
327 { "tx_single_collisions" },
328 { "tx_mult_collisions" },
330 { "tx_excessive_collisions" },
331 { "tx_late_collisions" },
332 { "tx_collide_2times" },
333 { "tx_collide_3times" },
334 { "tx_collide_4times" },
335 { "tx_collide_5times" },
336 { "tx_collide_6times" },
337 { "tx_collide_7times" },
338 { "tx_collide_8times" },
339 { "tx_collide_9times" },
340 { "tx_collide_10times" },
341 { "tx_collide_11times" },
342 { "tx_collide_12times" },
343 { "tx_collide_13times" },
344 { "tx_collide_14times" },
345 { "tx_collide_15times" },
346 { "tx_ucast_packets" },
347 { "tx_mcast_packets" },
348 { "tx_bcast_packets" },
349 { "tx_carrier_sense_errors" },
353 { "dma_writeq_full" },
354 { "dma_write_prioq_full" },
358 { "rx_threshold_hit" },
360 { "dma_readq_full" },
361 { "dma_read_prioq_full" },
362 { "tx_comp_queue_full" },
364 { "ring_set_send_prod_index" },
365 { "ring_status_update" },
367 { "nic_avoided_irqs" },
368 { "nic_tx_threshold_hit" }
371 static const struct {
372 const char string[ETH_GSTRING_LEN];
373 } ethtool_test_keys[TG3_NUM_TEST] = {
374 { "nvram test (online) " },
375 { "link test (online) " },
376 { "register test (offline)" },
377 { "memory test (offline)" },
378 { "loopback test (offline)" },
379 { "interrupt test (offline)" },
382 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
384 writel(val, tp->regs + off);
387 static u32 tg3_read32(struct tg3 *tp, u32 off)
389 return readl(tp->regs + off);
392 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
394 writel(val, tp->aperegs + off);
397 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
399 return readl(tp->aperegs + off);
402 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
406 spin_lock_irqsave(&tp->indirect_lock, flags);
407 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
408 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
409 spin_unlock_irqrestore(&tp->indirect_lock, flags);
412 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
414 writel(val, tp->regs + off);
415 readl(tp->regs + off);
418 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
423 spin_lock_irqsave(&tp->indirect_lock, flags);
424 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
425 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
426 spin_unlock_irqrestore(&tp->indirect_lock, flags);
430 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
434 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
435 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
436 TG3_64BIT_REG_LOW, val);
439 if (off == TG3_RX_STD_PROD_IDX_REG) {
440 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
441 TG3_64BIT_REG_LOW, val);
445 spin_lock_irqsave(&tp->indirect_lock, flags);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
447 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
448 spin_unlock_irqrestore(&tp->indirect_lock, flags);
450 /* In indirect mode when disabling interrupts, we also need
451 * to clear the interrupt bit in the GRC local ctrl register.
453 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
455 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
456 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
460 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
465 spin_lock_irqsave(&tp->indirect_lock, flags);
466 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
467 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
468 spin_unlock_irqrestore(&tp->indirect_lock, flags);
472 /* usec_wait specifies the wait time in usec when writing to certain registers
473 * where it is unsafe to read back the register without some delay.
474 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
475 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
477 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
479 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
480 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
481 /* Non-posted methods */
482 tp->write32(tp, off, val);
485 tg3_write32(tp, off, val);
490 /* Wait again after the read for the posted method to guarantee that
491 * the wait time is met.
497 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
499 tp->write32_mbox(tp, off, val);
500 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
501 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
502 tp->read32_mbox(tp, off);
505 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
507 void __iomem *mbox = tp->regs + off;
509 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
511 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
515 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
517 return readl(tp->regs + off + GRCMBOX_BASE);
520 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
522 writel(val, tp->regs + off + GRCMBOX_BASE);
525 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
526 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
527 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
528 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
529 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
531 #define tw32(reg, val) tp->write32(tp, reg, val)
532 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
533 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
534 #define tr32(reg) tp->read32(tp, reg)
536 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
540 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
541 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
544 spin_lock_irqsave(&tp->indirect_lock, flags);
545 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
546 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
547 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
549 /* Always leave this as zero. */
550 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
552 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
553 tw32_f(TG3PCI_MEM_WIN_DATA, val);
555 /* Always leave this as zero. */
556 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
558 spin_unlock_irqrestore(&tp->indirect_lock, flags);
561 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
565 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
566 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
571 spin_lock_irqsave(&tp->indirect_lock, flags);
572 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
573 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
574 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
576 /* Always leave this as zero. */
577 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
579 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
580 *val = tr32(TG3PCI_MEM_WIN_DATA);
582 /* Always leave this as zero. */
583 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
585 spin_unlock_irqrestore(&tp->indirect_lock, flags);
588 static void tg3_ape_lock_init(struct tg3 *tp)
593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
594 regbase = TG3_APE_LOCK_GRANT;
596 regbase = TG3_APE_PER_LOCK_GRANT;
598 /* Make sure the driver hasn't any stale locks. */
599 for (i = 0; i < 8; i++)
600 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
603 static int tg3_ape_lock(struct tg3 *tp, int locknum)
607 u32 status, req, gnt;
609 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
613 case TG3_APE_LOCK_GRC:
614 case TG3_APE_LOCK_MEM:
620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
621 req = TG3_APE_LOCK_REQ;
622 gnt = TG3_APE_LOCK_GRANT;
624 req = TG3_APE_PER_LOCK_REQ;
625 gnt = TG3_APE_PER_LOCK_GRANT;
630 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
632 /* Wait for up to 1 millisecond to acquire lock. */
633 for (i = 0; i < 100; i++) {
634 status = tg3_ape_read32(tp, gnt + off);
635 if (status == APE_LOCK_GRANT_DRIVER)
640 if (status != APE_LOCK_GRANT_DRIVER) {
641 /* Revoke the lock request. */
642 tg3_ape_write32(tp, gnt + off,
643 APE_LOCK_GRANT_DRIVER);
651 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
655 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
659 case TG3_APE_LOCK_GRC:
660 case TG3_APE_LOCK_MEM:
666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
667 gnt = TG3_APE_LOCK_GRANT;
669 gnt = TG3_APE_PER_LOCK_GRANT;
671 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
674 static void tg3_disable_ints(struct tg3 *tp)
678 tw32(TG3PCI_MISC_HOST_CTRL,
679 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
680 for (i = 0; i < tp->irq_max; i++)
681 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
684 static void tg3_enable_ints(struct tg3 *tp)
691 tw32(TG3PCI_MISC_HOST_CTRL,
692 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
694 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
695 for (i = 0; i < tp->irq_cnt; i++) {
696 struct tg3_napi *tnapi = &tp->napi[i];
698 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
699 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
700 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
702 tp->coal_now |= tnapi->coal_now;
705 /* Force an initial interrupt */
706 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
707 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
708 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
710 tw32(HOSTCC_MODE, tp->coal_now);
712 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
715 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
717 struct tg3 *tp = tnapi->tp;
718 struct tg3_hw_status *sblk = tnapi->hw_status;
719 unsigned int work_exists = 0;
721 /* check for phy events */
722 if (!(tp->tg3_flags &
723 (TG3_FLAG_USE_LINKCHG_REG |
724 TG3_FLAG_POLL_SERDES))) {
725 if (sblk->status & SD_STATUS_LINK_CHG)
728 /* check for RX/TX work to do */
729 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
730 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
737 * similar to tg3_enable_ints, but it accurately determines whether there
738 * is new work pending and can return without flushing the PIO write
739 * which reenables interrupts
741 static void tg3_int_reenable(struct tg3_napi *tnapi)
743 struct tg3 *tp = tnapi->tp;
745 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
748 /* When doing tagged status, this work check is unnecessary.
749 * The last_tag we write above tells the chip which piece of
750 * work we've completed.
752 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
754 tw32(HOSTCC_MODE, tp->coalesce_mode |
755 HOSTCC_MODE_ENABLE | tnapi->coal_now);
758 static void tg3_napi_disable(struct tg3 *tp)
762 for (i = tp->irq_cnt - 1; i >= 0; i--)
763 napi_disable(&tp->napi[i].napi);
766 static void tg3_napi_enable(struct tg3 *tp)
770 for (i = 0; i < tp->irq_cnt; i++)
771 napi_enable(&tp->napi[i].napi);
774 static inline void tg3_netif_stop(struct tg3 *tp)
776 tp->dev->trans_start = jiffies; /* prevent tx timeout */
777 tg3_napi_disable(tp);
778 netif_tx_disable(tp->dev);
781 static inline void tg3_netif_start(struct tg3 *tp)
783 /* NOTE: unconditional netif_tx_wake_all_queues is only
784 * appropriate so long as all callers are assured to
785 * have free tx slots (such as after tg3_init_hw)
787 netif_tx_wake_all_queues(tp->dev);
790 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
794 static void tg3_switch_clocks(struct tg3 *tp)
799 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
800 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
803 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
805 orig_clock_ctrl = clock_ctrl;
806 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
807 CLOCK_CTRL_CLKRUN_OENABLE |
809 tp->pci_clock_ctrl = clock_ctrl;
811 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
812 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
813 tw32_wait_f(TG3PCI_CLOCK_CTRL,
814 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
816 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
817 tw32_wait_f(TG3PCI_CLOCK_CTRL,
819 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
821 tw32_wait_f(TG3PCI_CLOCK_CTRL,
822 clock_ctrl | (CLOCK_CTRL_ALTCLK),
825 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
828 #define PHY_BUSY_LOOPS 5000
830 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
836 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
838 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
845 MI_COM_PHY_ADDR_MASK);
846 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
847 MI_COM_REG_ADDR_MASK);
848 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
850 tw32_f(MAC_MI_COM, frame_val);
852 loops = PHY_BUSY_LOOPS;
855 frame_val = tr32(MAC_MI_COM);
857 if ((frame_val & MI_COM_BUSY) == 0) {
859 frame_val = tr32(MAC_MI_COM);
867 *val = frame_val & MI_COM_DATA_MASK;
871 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
872 tw32_f(MAC_MI_MODE, tp->mi_mode);
879 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
885 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
886 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
889 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
891 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
895 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
896 MI_COM_PHY_ADDR_MASK);
897 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
898 MI_COM_REG_ADDR_MASK);
899 frame_val |= (val & MI_COM_DATA_MASK);
900 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
902 tw32_f(MAC_MI_COM, frame_val);
904 loops = PHY_BUSY_LOOPS;
907 frame_val = tr32(MAC_MI_COM);
908 if ((frame_val & MI_COM_BUSY) == 0) {
910 frame_val = tr32(MAC_MI_COM);
920 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
921 tw32_f(MAC_MI_MODE, tp->mi_mode);
928 static int tg3_bmcr_reset(struct tg3 *tp)
933 /* OK, reset it, and poll the BMCR_RESET bit until it
934 * clears or we time out.
936 phy_control = BMCR_RESET;
937 err = tg3_writephy(tp, MII_BMCR, phy_control);
943 err = tg3_readphy(tp, MII_BMCR, &phy_control);
947 if ((phy_control & BMCR_RESET) == 0) {
959 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
961 struct tg3 *tp = bp->priv;
964 spin_lock_bh(&tp->lock);
966 if (tg3_readphy(tp, reg, &val))
969 spin_unlock_bh(&tp->lock);
974 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
976 struct tg3 *tp = bp->priv;
979 spin_lock_bh(&tp->lock);
981 if (tg3_writephy(tp, reg, val))
984 spin_unlock_bh(&tp->lock);
989 static int tg3_mdio_reset(struct mii_bus *bp)
994 static void tg3_mdio_config_5785(struct tg3 *tp)
997 struct phy_device *phydev;
999 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1000 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1001 case PHY_ID_BCM50610:
1002 case PHY_ID_BCM50610M:
1003 val = MAC_PHYCFG2_50610_LED_MODES;
1005 case PHY_ID_BCMAC131:
1006 val = MAC_PHYCFG2_AC131_LED_MODES;
1008 case PHY_ID_RTL8211C:
1009 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1011 case PHY_ID_RTL8201E:
1012 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1018 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1019 tw32(MAC_PHYCFG2, val);
1021 val = tr32(MAC_PHYCFG1);
1022 val &= ~(MAC_PHYCFG1_RGMII_INT |
1023 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1024 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1025 tw32(MAC_PHYCFG1, val);
1030 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
1031 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1032 MAC_PHYCFG2_FMODE_MASK_MASK |
1033 MAC_PHYCFG2_GMODE_MASK_MASK |
1034 MAC_PHYCFG2_ACT_MASK_MASK |
1035 MAC_PHYCFG2_QUAL_MASK_MASK |
1036 MAC_PHYCFG2_INBAND_ENABLE;
1038 tw32(MAC_PHYCFG2, val);
1040 val = tr32(MAC_PHYCFG1);
1041 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1042 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1043 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1044 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1045 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1046 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1047 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1049 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1050 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1051 tw32(MAC_PHYCFG1, val);
1053 val = tr32(MAC_EXT_RGMII_MODE);
1054 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1055 MAC_RGMII_MODE_RX_QUALITY |
1056 MAC_RGMII_MODE_RX_ACTIVITY |
1057 MAC_RGMII_MODE_RX_ENG_DET |
1058 MAC_RGMII_MODE_TX_ENABLE |
1059 MAC_RGMII_MODE_TX_LOWPWR |
1060 MAC_RGMII_MODE_TX_RESET);
1061 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1062 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1063 val |= MAC_RGMII_MODE_RX_INT_B |
1064 MAC_RGMII_MODE_RX_QUALITY |
1065 MAC_RGMII_MODE_RX_ACTIVITY |
1066 MAC_RGMII_MODE_RX_ENG_DET;
1067 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1068 val |= MAC_RGMII_MODE_TX_ENABLE |
1069 MAC_RGMII_MODE_TX_LOWPWR |
1070 MAC_RGMII_MODE_TX_RESET;
1072 tw32(MAC_EXT_RGMII_MODE, val);
1075 static void tg3_mdio_start(struct tg3 *tp)
1077 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1078 tw32_f(MAC_MI_MODE, tp->mi_mode);
1081 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1083 tg3_mdio_config_5785(tp);
1086 static int tg3_mdio_init(struct tg3 *tp)
1090 struct phy_device *phydev;
1092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1096 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1098 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1099 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1101 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1102 TG3_CPMU_PHY_STRAP_IS_SERDES;
1106 tp->phy_addr = TG3_PHY_MII_ADDR;
1110 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1111 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1114 tp->mdio_bus = mdiobus_alloc();
1115 if (tp->mdio_bus == NULL)
1118 tp->mdio_bus->name = "tg3 mdio bus";
1119 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1120 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1121 tp->mdio_bus->priv = tp;
1122 tp->mdio_bus->parent = &tp->pdev->dev;
1123 tp->mdio_bus->read = &tg3_mdio_read;
1124 tp->mdio_bus->write = &tg3_mdio_write;
1125 tp->mdio_bus->reset = &tg3_mdio_reset;
1126 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1127 tp->mdio_bus->irq = &tp->mdio_irq[0];
1129 for (i = 0; i < PHY_MAX_ADDR; i++)
1130 tp->mdio_bus->irq[i] = PHY_POLL;
1132 /* The bus registration will look for all the PHYs on the mdio bus.
1133 * Unfortunately, it does not ensure the PHY is powered up before
1134 * accessing the PHY ID registers. A chip reset is the
1135 * quickest way to bring the device back to an operational state..
1137 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1140 i = mdiobus_register(tp->mdio_bus);
1142 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1143 mdiobus_free(tp->mdio_bus);
1147 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1149 if (!phydev || !phydev->drv) {
1150 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1151 mdiobus_unregister(tp->mdio_bus);
1152 mdiobus_free(tp->mdio_bus);
1156 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1157 case PHY_ID_BCM57780:
1158 phydev->interface = PHY_INTERFACE_MODE_GMII;
1159 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1161 case PHY_ID_BCM50610:
1162 case PHY_ID_BCM50610M:
1163 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1164 PHY_BRCM_RX_REFCLK_UNUSED |
1165 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1166 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1167 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1168 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1169 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1170 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1171 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1172 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1174 case PHY_ID_RTL8211C:
1175 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1177 case PHY_ID_RTL8201E:
1178 case PHY_ID_BCMAC131:
1179 phydev->interface = PHY_INTERFACE_MODE_MII;
1180 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1181 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1185 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1187 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1188 tg3_mdio_config_5785(tp);
1193 static void tg3_mdio_fini(struct tg3 *tp)
1195 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1196 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1197 mdiobus_unregister(tp->mdio_bus);
1198 mdiobus_free(tp->mdio_bus);
1202 /* tp->lock is held. */
1203 static inline void tg3_generate_fw_event(struct tg3 *tp)
1207 val = tr32(GRC_RX_CPU_EVENT);
1208 val |= GRC_RX_CPU_DRIVER_EVENT;
1209 tw32_f(GRC_RX_CPU_EVENT, val);
1211 tp->last_event_jiffies = jiffies;
1214 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1216 /* tp->lock is held. */
1217 static void tg3_wait_for_event_ack(struct tg3 *tp)
1220 unsigned int delay_cnt;
1223 /* If enough time has passed, no wait is necessary. */
1224 time_remain = (long)(tp->last_event_jiffies + 1 +
1225 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1227 if (time_remain < 0)
1230 /* Check if we can shorten the wait time. */
1231 delay_cnt = jiffies_to_usecs(time_remain);
1232 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1233 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1234 delay_cnt = (delay_cnt >> 3) + 1;
1236 for (i = 0; i < delay_cnt; i++) {
1237 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1243 /* tp->lock is held. */
1244 static void tg3_ump_link_report(struct tg3 *tp)
1249 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1250 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1253 tg3_wait_for_event_ack(tp);
1255 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1257 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1260 if (!tg3_readphy(tp, MII_BMCR, ®))
1262 if (!tg3_readphy(tp, MII_BMSR, ®))
1263 val |= (reg & 0xffff);
1264 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1267 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1269 if (!tg3_readphy(tp, MII_LPA, ®))
1270 val |= (reg & 0xffff);
1271 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1274 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1275 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1277 if (!tg3_readphy(tp, MII_STAT1000, ®))
1278 val |= (reg & 0xffff);
1280 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1282 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1286 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1288 tg3_generate_fw_event(tp);
1291 static void tg3_link_report(struct tg3 *tp)
1293 if (!netif_carrier_ok(tp->dev)) {
1294 netif_info(tp, link, tp->dev, "Link is down\n");
1295 tg3_ump_link_report(tp);
1296 } else if (netif_msg_link(tp)) {
1297 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1298 (tp->link_config.active_speed == SPEED_1000 ?
1300 (tp->link_config.active_speed == SPEED_100 ?
1302 (tp->link_config.active_duplex == DUPLEX_FULL ?
1305 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1306 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1308 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1310 tg3_ump_link_report(tp);
1314 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1318 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1319 miireg = ADVERTISE_PAUSE_CAP;
1320 else if (flow_ctrl & FLOW_CTRL_TX)
1321 miireg = ADVERTISE_PAUSE_ASYM;
1322 else if (flow_ctrl & FLOW_CTRL_RX)
1323 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1330 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1334 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1335 miireg = ADVERTISE_1000XPAUSE;
1336 else if (flow_ctrl & FLOW_CTRL_TX)
1337 miireg = ADVERTISE_1000XPSE_ASYM;
1338 else if (flow_ctrl & FLOW_CTRL_RX)
1339 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1346 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1350 if (lcladv & ADVERTISE_1000XPAUSE) {
1351 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1352 if (rmtadv & LPA_1000XPAUSE)
1353 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1354 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1357 if (rmtadv & LPA_1000XPAUSE)
1358 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1360 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1361 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1368 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1372 u32 old_rx_mode = tp->rx_mode;
1373 u32 old_tx_mode = tp->tx_mode;
1375 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1376 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1378 autoneg = tp->link_config.autoneg;
1380 if (autoneg == AUTONEG_ENABLE &&
1381 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1382 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1383 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1385 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1387 flowctrl = tp->link_config.flowctrl;
1389 tp->link_config.active_flowctrl = flowctrl;
1391 if (flowctrl & FLOW_CTRL_RX)
1392 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1394 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1396 if (old_rx_mode != tp->rx_mode)
1397 tw32_f(MAC_RX_MODE, tp->rx_mode);
1399 if (flowctrl & FLOW_CTRL_TX)
1400 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1402 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1404 if (old_tx_mode != tp->tx_mode)
1405 tw32_f(MAC_TX_MODE, tp->tx_mode);
1408 static void tg3_adjust_link(struct net_device *dev)
1410 u8 oldflowctrl, linkmesg = 0;
1411 u32 mac_mode, lcl_adv, rmt_adv;
1412 struct tg3 *tp = netdev_priv(dev);
1413 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1415 spin_lock_bh(&tp->lock);
1417 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1418 MAC_MODE_HALF_DUPLEX);
1420 oldflowctrl = tp->link_config.active_flowctrl;
1426 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1427 mac_mode |= MAC_MODE_PORT_MODE_MII;
1428 else if (phydev->speed == SPEED_1000 ||
1429 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1430 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1432 mac_mode |= MAC_MODE_PORT_MODE_MII;
1434 if (phydev->duplex == DUPLEX_HALF)
1435 mac_mode |= MAC_MODE_HALF_DUPLEX;
1437 lcl_adv = tg3_advert_flowctrl_1000T(
1438 tp->link_config.flowctrl);
1441 rmt_adv = LPA_PAUSE_CAP;
1442 if (phydev->asym_pause)
1443 rmt_adv |= LPA_PAUSE_ASYM;
1446 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1448 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1450 if (mac_mode != tp->mac_mode) {
1451 tp->mac_mode = mac_mode;
1452 tw32_f(MAC_MODE, tp->mac_mode);
1456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1457 if (phydev->speed == SPEED_10)
1459 MAC_MI_STAT_10MBPS_MODE |
1460 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1462 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1465 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1466 tw32(MAC_TX_LENGTHS,
1467 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1468 (6 << TX_LENGTHS_IPG_SHIFT) |
1469 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1471 tw32(MAC_TX_LENGTHS,
1472 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1473 (6 << TX_LENGTHS_IPG_SHIFT) |
1474 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1476 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1477 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1478 phydev->speed != tp->link_config.active_speed ||
1479 phydev->duplex != tp->link_config.active_duplex ||
1480 oldflowctrl != tp->link_config.active_flowctrl)
1483 tp->link_config.active_speed = phydev->speed;
1484 tp->link_config.active_duplex = phydev->duplex;
1486 spin_unlock_bh(&tp->lock);
1489 tg3_link_report(tp);
1492 static int tg3_phy_init(struct tg3 *tp)
1494 struct phy_device *phydev;
1496 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1499 /* Bring the PHY back to a known state. */
1502 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1504 /* Attach the MAC to the PHY. */
1505 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1506 phydev->dev_flags, phydev->interface);
1507 if (IS_ERR(phydev)) {
1508 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1509 return PTR_ERR(phydev);
1512 /* Mask with MAC supported features. */
1513 switch (phydev->interface) {
1514 case PHY_INTERFACE_MODE_GMII:
1515 case PHY_INTERFACE_MODE_RGMII:
1516 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1517 phydev->supported &= (PHY_GBIT_FEATURES |
1519 SUPPORTED_Asym_Pause);
1523 case PHY_INTERFACE_MODE_MII:
1524 phydev->supported &= (PHY_BASIC_FEATURES |
1526 SUPPORTED_Asym_Pause);
1529 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1533 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1535 phydev->advertising = phydev->supported;
1540 static void tg3_phy_start(struct tg3 *tp)
1542 struct phy_device *phydev;
1544 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1547 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1549 if (tp->link_config.phy_is_low_power) {
1550 tp->link_config.phy_is_low_power = 0;
1551 phydev->speed = tp->link_config.orig_speed;
1552 phydev->duplex = tp->link_config.orig_duplex;
1553 phydev->autoneg = tp->link_config.orig_autoneg;
1554 phydev->advertising = tp->link_config.orig_advertising;
1559 phy_start_aneg(phydev);
1562 static void tg3_phy_stop(struct tg3 *tp)
1564 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1567 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1570 static void tg3_phy_fini(struct tg3 *tp)
1572 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1573 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1574 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1578 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1580 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1581 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1584 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1588 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1591 tg3_writephy(tp, MII_TG3_FET_TEST,
1592 phytest | MII_TG3_FET_SHADOW_EN);
1593 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1595 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1597 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1598 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1600 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1604 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1608 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1609 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1610 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1611 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1614 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1615 tg3_phy_fet_toggle_apd(tp, enable);
1619 reg = MII_TG3_MISC_SHDW_WREN |
1620 MII_TG3_MISC_SHDW_SCR5_SEL |
1621 MII_TG3_MISC_SHDW_SCR5_LPED |
1622 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1623 MII_TG3_MISC_SHDW_SCR5_SDTL |
1624 MII_TG3_MISC_SHDW_SCR5_C125OE;
1625 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1626 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1628 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1631 reg = MII_TG3_MISC_SHDW_WREN |
1632 MII_TG3_MISC_SHDW_APD_SEL |
1633 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1635 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1637 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1640 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1644 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1645 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1648 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1651 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1652 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1654 tg3_writephy(tp, MII_TG3_FET_TEST,
1655 ephy | MII_TG3_FET_SHADOW_EN);
1656 if (!tg3_readphy(tp, reg, &phy)) {
1658 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1660 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1661 tg3_writephy(tp, reg, phy);
1663 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1666 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1667 MII_TG3_AUXCTL_SHDWSEL_MISC;
1668 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1669 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1671 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1673 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1674 phy |= MII_TG3_AUXCTL_MISC_WREN;
1675 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1680 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1684 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1687 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1688 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1689 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1690 (val | (1 << 15) | (1 << 4)));
1693 static void tg3_phy_apply_otp(struct tg3 *tp)
1702 /* Enable SM_DSP clock and tx 6dB coding. */
1703 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1704 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1705 MII_TG3_AUXCTL_ACTL_TX_6DB;
1706 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1708 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1709 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1710 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1712 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1713 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1714 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1716 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1717 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1718 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1720 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1721 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1723 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1724 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1726 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1727 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1728 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1730 /* Turn off SM_DSP clock. */
1731 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1732 MII_TG3_AUXCTL_ACTL_TX_6DB;
1733 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1736 static int tg3_wait_macro_done(struct tg3 *tp)
1743 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1744 if ((tmp32 & 0x1000) == 0)
1754 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1756 static const u32 test_pat[4][6] = {
1757 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1758 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1759 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1760 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1764 for (chan = 0; chan < 4; chan++) {
1767 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1768 (chan * 0x2000) | 0x0200);
1769 tg3_writephy(tp, 0x16, 0x0002);
1771 for (i = 0; i < 6; i++)
1772 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1775 tg3_writephy(tp, 0x16, 0x0202);
1776 if (tg3_wait_macro_done(tp)) {
1781 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1782 (chan * 0x2000) | 0x0200);
1783 tg3_writephy(tp, 0x16, 0x0082);
1784 if (tg3_wait_macro_done(tp)) {
1789 tg3_writephy(tp, 0x16, 0x0802);
1790 if (tg3_wait_macro_done(tp)) {
1795 for (i = 0; i < 6; i += 2) {
1798 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1799 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1800 tg3_wait_macro_done(tp)) {
1806 if (low != test_pat[chan][i] ||
1807 high != test_pat[chan][i+1]) {
1808 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1809 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1810 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1820 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1824 for (chan = 0; chan < 4; chan++) {
1827 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1828 (chan * 0x2000) | 0x0200);
1829 tg3_writephy(tp, 0x16, 0x0002);
1830 for (i = 0; i < 6; i++)
1831 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1832 tg3_writephy(tp, 0x16, 0x0202);
1833 if (tg3_wait_macro_done(tp))
1840 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1842 u32 reg32, phy9_orig;
1843 int retries, do_phy_reset, err;
1849 err = tg3_bmcr_reset(tp);
1855 /* Disable transmitter and interrupt. */
1856 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1860 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1862 /* Set full-duplex, 1000 mbps. */
1863 tg3_writephy(tp, MII_BMCR,
1864 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1866 /* Set to master mode. */
1867 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1870 tg3_writephy(tp, MII_TG3_CTRL,
1871 (MII_TG3_CTRL_AS_MASTER |
1872 MII_TG3_CTRL_ENABLE_AS_MASTER));
1874 /* Enable SM_DSP_CLOCK and 6dB. */
1875 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1877 /* Block the PHY control access. */
1878 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1879 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1881 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1884 } while (--retries);
1886 err = tg3_phy_reset_chanpat(tp);
1890 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1891 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1893 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1894 tg3_writephy(tp, 0x16, 0x0000);
1896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1898 /* Set Extended packet length bit for jumbo frames */
1899 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1901 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1904 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1906 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1908 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1915 /* This will reset the tigon3 PHY if there is no valid
1916 * link unless the FORCE argument is non-zero.
1918 static int tg3_phy_reset(struct tg3 *tp)
1924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1927 val = tr32(GRC_MISC_CFG);
1928 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1931 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1932 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1936 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1937 netif_carrier_off(tp->dev);
1938 tg3_link_report(tp);
1941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1942 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1944 err = tg3_phy_reset_5703_4_5(tp);
1951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1952 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1953 cpmuctrl = tr32(TG3_CPMU_CTRL);
1954 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1956 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1959 err = tg3_bmcr_reset(tp);
1963 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1966 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1967 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1969 tw32(TG3_CPMU_CTRL, cpmuctrl);
1972 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1973 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1976 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1977 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1978 CPMU_LSPD_1000MB_MACCLK_12_5) {
1979 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1981 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1985 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1987 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1990 tg3_phy_apply_otp(tp);
1992 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1993 tg3_phy_toggle_apd(tp, true);
1995 tg3_phy_toggle_apd(tp, false);
1998 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1999 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2000 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2001 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
2002 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2003 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
2004 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2006 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
2007 tg3_writephy(tp, 0x1c, 0x8d68);
2008 tg3_writephy(tp, 0x1c, 0x8d68);
2010 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
2011 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2012 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2013 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
2014 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2015 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
2016 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
2017 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
2018 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2019 } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
2020 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2021 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2022 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
2023 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2024 tg3_writephy(tp, MII_TG3_TEST1,
2025 MII_TG3_TEST1_TRIM_EN | 0x4);
2027 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2028 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2030 /* Set Extended packet length bit (bit 14) on all chips that */
2031 /* support jumbo frames */
2032 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2033 /* Cannot do read-modify-write on 5401 */
2034 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2035 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2038 /* Set bit 14 with read-modify-write to preserve other bits */
2039 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2040 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
2041 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
2044 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2045 * jumbo frames transmission.
2047 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2050 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2051 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2052 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2056 /* adjust output voltage */
2057 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2060 tg3_phy_toggle_automdix(tp, 1);
2061 tg3_phy_set_wirespeed(tp);
2065 static void tg3_frob_aux_power(struct tg3 *tp)
2067 struct tg3 *tp_peer = tp;
2069 /* The GPIOs do something completely different on 57765. */
2070 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2071 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2072 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2075 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2076 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2077 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2078 struct net_device *dev_peer;
2080 dev_peer = pci_get_drvdata(tp->pdev_peer);
2081 /* remove_one() may have been run on the peer. */
2085 tp_peer = netdev_priv(dev_peer);
2088 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2089 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2090 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2091 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2094 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2095 (GRC_LCLCTRL_GPIO_OE0 |
2096 GRC_LCLCTRL_GPIO_OE1 |
2097 GRC_LCLCTRL_GPIO_OE2 |
2098 GRC_LCLCTRL_GPIO_OUTPUT0 |
2099 GRC_LCLCTRL_GPIO_OUTPUT1),
2101 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2103 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2104 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2105 GRC_LCLCTRL_GPIO_OE1 |
2106 GRC_LCLCTRL_GPIO_OE2 |
2107 GRC_LCLCTRL_GPIO_OUTPUT0 |
2108 GRC_LCLCTRL_GPIO_OUTPUT1 |
2110 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2112 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2113 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2115 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2116 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2119 u32 grc_local_ctrl = 0;
2121 if (tp_peer != tp &&
2122 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2125 /* Workaround to prevent overdrawing Amps. */
2126 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2128 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2129 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2130 grc_local_ctrl, 100);
2133 /* On 5753 and variants, GPIO2 cannot be used. */
2134 no_gpio2 = tp->nic_sram_data_cfg &
2135 NIC_SRAM_DATA_CFG_NO_GPIO2;
2137 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2138 GRC_LCLCTRL_GPIO_OE1 |
2139 GRC_LCLCTRL_GPIO_OE2 |
2140 GRC_LCLCTRL_GPIO_OUTPUT1 |
2141 GRC_LCLCTRL_GPIO_OUTPUT2;
2143 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2144 GRC_LCLCTRL_GPIO_OUTPUT2);
2146 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2147 grc_local_ctrl, 100);
2149 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2151 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2152 grc_local_ctrl, 100);
2155 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2156 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2157 grc_local_ctrl, 100);
2161 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2162 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2163 if (tp_peer != tp &&
2164 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2167 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2168 (GRC_LCLCTRL_GPIO_OE1 |
2169 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2171 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2172 GRC_LCLCTRL_GPIO_OE1, 100);
2174 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2175 (GRC_LCLCTRL_GPIO_OE1 |
2176 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2181 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2183 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2185 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2186 if (speed != SPEED_10)
2188 } else if (speed == SPEED_10)
2194 static int tg3_setup_phy(struct tg3 *, int);
2196 #define RESET_KIND_SHUTDOWN 0
2197 #define RESET_KIND_INIT 1
2198 #define RESET_KIND_SUSPEND 2
2200 static void tg3_write_sig_post_reset(struct tg3 *, int);
2201 static int tg3_halt_cpu(struct tg3 *, u32);
2203 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2207 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2209 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2210 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2213 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2214 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2215 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2220 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2222 val = tr32(GRC_MISC_CFG);
2223 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2226 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2228 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2231 tg3_writephy(tp, MII_ADVERTISE, 0);
2232 tg3_writephy(tp, MII_BMCR,
2233 BMCR_ANENABLE | BMCR_ANRESTART);
2235 tg3_writephy(tp, MII_TG3_FET_TEST,
2236 phytest | MII_TG3_FET_SHADOW_EN);
2237 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2238 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2240 MII_TG3_FET_SHDW_AUXMODE4,
2243 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2246 } else if (do_low_power) {
2247 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2248 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2250 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2251 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2252 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2253 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2254 MII_TG3_AUXCTL_PCTL_VREG_11V);
2257 /* The PHY should not be powered down on some chips because
2260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2261 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2262 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2263 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2266 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2267 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2268 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2269 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2270 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2271 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2274 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2277 /* tp->lock is held. */
2278 static int tg3_nvram_lock(struct tg3 *tp)
2280 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2283 if (tp->nvram_lock_cnt == 0) {
2284 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2285 for (i = 0; i < 8000; i++) {
2286 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2291 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2295 tp->nvram_lock_cnt++;
2300 /* tp->lock is held. */
2301 static void tg3_nvram_unlock(struct tg3 *tp)
2303 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2304 if (tp->nvram_lock_cnt > 0)
2305 tp->nvram_lock_cnt--;
2306 if (tp->nvram_lock_cnt == 0)
2307 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2311 /* tp->lock is held. */
2312 static void tg3_enable_nvram_access(struct tg3 *tp)
2314 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2315 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2316 u32 nvaccess = tr32(NVRAM_ACCESS);
2318 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2322 /* tp->lock is held. */
2323 static void tg3_disable_nvram_access(struct tg3 *tp)
2325 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2326 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2327 u32 nvaccess = tr32(NVRAM_ACCESS);
2329 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2333 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2334 u32 offset, u32 *val)
2339 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2342 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2343 EEPROM_ADDR_DEVID_MASK |
2345 tw32(GRC_EEPROM_ADDR,
2347 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2348 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2349 EEPROM_ADDR_ADDR_MASK) |
2350 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2352 for (i = 0; i < 1000; i++) {
2353 tmp = tr32(GRC_EEPROM_ADDR);
2355 if (tmp & EEPROM_ADDR_COMPLETE)
2359 if (!(tmp & EEPROM_ADDR_COMPLETE))
2362 tmp = tr32(GRC_EEPROM_DATA);
2365 * The data will always be opposite the native endian
2366 * format. Perform a blind byteswap to compensate.
2373 #define NVRAM_CMD_TIMEOUT 10000
2375 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2379 tw32(NVRAM_CMD, nvram_cmd);
2380 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2382 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2388 if (i == NVRAM_CMD_TIMEOUT)
2394 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2396 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2397 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2398 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2399 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2400 (tp->nvram_jedecnum == JEDEC_ATMEL))
2402 addr = ((addr / tp->nvram_pagesize) <<
2403 ATMEL_AT45DB0X1B_PAGE_POS) +
2404 (addr % tp->nvram_pagesize);
2409 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2411 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2412 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2413 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2414 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2415 (tp->nvram_jedecnum == JEDEC_ATMEL))
2417 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2418 tp->nvram_pagesize) +
2419 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2424 /* NOTE: Data read in from NVRAM is byteswapped according to
2425 * the byteswapping settings for all other register accesses.
2426 * tg3 devices are BE devices, so on a BE machine, the data
2427 * returned will be exactly as it is seen in NVRAM. On a LE
2428 * machine, the 32-bit value will be byteswapped.
2430 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2434 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2435 return tg3_nvram_read_using_eeprom(tp, offset, val);
2437 offset = tg3_nvram_phys_addr(tp, offset);
2439 if (offset > NVRAM_ADDR_MSK)
2442 ret = tg3_nvram_lock(tp);
2446 tg3_enable_nvram_access(tp);
2448 tw32(NVRAM_ADDR, offset);
2449 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2450 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2453 *val = tr32(NVRAM_RDDATA);
2455 tg3_disable_nvram_access(tp);
2457 tg3_nvram_unlock(tp);
2462 /* Ensures NVRAM data is in bytestream format. */
2463 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2466 int res = tg3_nvram_read(tp, offset, &v);
2468 *val = cpu_to_be32(v);
2472 /* tp->lock is held. */
2473 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2475 u32 addr_high, addr_low;
2478 addr_high = ((tp->dev->dev_addr[0] << 8) |
2479 tp->dev->dev_addr[1]);
2480 addr_low = ((tp->dev->dev_addr[2] << 24) |
2481 (tp->dev->dev_addr[3] << 16) |
2482 (tp->dev->dev_addr[4] << 8) |
2483 (tp->dev->dev_addr[5] << 0));
2484 for (i = 0; i < 4; i++) {
2485 if (i == 1 && skip_mac_1)
2487 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2488 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2491 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2493 for (i = 0; i < 12; i++) {
2494 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2495 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2499 addr_high = (tp->dev->dev_addr[0] +
2500 tp->dev->dev_addr[1] +
2501 tp->dev->dev_addr[2] +
2502 tp->dev->dev_addr[3] +
2503 tp->dev->dev_addr[4] +
2504 tp->dev->dev_addr[5]) &
2505 TX_BACKOFF_SEED_MASK;
2506 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2509 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2512 bool device_should_wake, do_low_power;
2514 /* Make sure register accesses (indirect or otherwise)
2515 * will function correctly.
2517 pci_write_config_dword(tp->pdev,
2518 TG3PCI_MISC_HOST_CTRL,
2519 tp->misc_host_ctrl);
2523 pci_enable_wake(tp->pdev, state, false);
2524 pci_set_power_state(tp->pdev, PCI_D0);
2526 /* Switch out of Vaux if it is a NIC */
2527 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2528 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2538 netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
2543 /* Restore the CLKREQ setting. */
2544 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2547 pci_read_config_word(tp->pdev,
2548 tp->pcie_cap + PCI_EXP_LNKCTL,
2550 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2551 pci_write_config_word(tp->pdev,
2552 tp->pcie_cap + PCI_EXP_LNKCTL,
2556 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2557 tw32(TG3PCI_MISC_HOST_CTRL,
2558 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2560 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2561 device_may_wakeup(&tp->pdev->dev) &&
2562 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2564 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2565 do_low_power = false;
2566 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2567 !tp->link_config.phy_is_low_power) {
2568 struct phy_device *phydev;
2569 u32 phyid, advertising;
2571 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2573 tp->link_config.phy_is_low_power = 1;
2575 tp->link_config.orig_speed = phydev->speed;
2576 tp->link_config.orig_duplex = phydev->duplex;
2577 tp->link_config.orig_autoneg = phydev->autoneg;
2578 tp->link_config.orig_advertising = phydev->advertising;
2580 advertising = ADVERTISED_TP |
2582 ADVERTISED_Autoneg |
2583 ADVERTISED_10baseT_Half;
2585 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2586 device_should_wake) {
2587 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2589 ADVERTISED_100baseT_Half |
2590 ADVERTISED_100baseT_Full |
2591 ADVERTISED_10baseT_Full;
2593 advertising |= ADVERTISED_10baseT_Full;
2596 phydev->advertising = advertising;
2598 phy_start_aneg(phydev);
2600 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2601 if (phyid != PHY_ID_BCMAC131) {
2602 phyid &= PHY_BCM_OUI_MASK;
2603 if (phyid == PHY_BCM_OUI_1 ||
2604 phyid == PHY_BCM_OUI_2 ||
2605 phyid == PHY_BCM_OUI_3)
2606 do_low_power = true;
2610 do_low_power = true;
2612 if (tp->link_config.phy_is_low_power == 0) {
2613 tp->link_config.phy_is_low_power = 1;
2614 tp->link_config.orig_speed = tp->link_config.speed;
2615 tp->link_config.orig_duplex = tp->link_config.duplex;
2616 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2619 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2620 tp->link_config.speed = SPEED_10;
2621 tp->link_config.duplex = DUPLEX_HALF;
2622 tp->link_config.autoneg = AUTONEG_ENABLE;
2623 tg3_setup_phy(tp, 0);
2627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2630 val = tr32(GRC_VCPU_EXT_CTRL);
2631 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2632 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2636 for (i = 0; i < 200; i++) {
2637 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2638 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2643 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2644 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2645 WOL_DRV_STATE_SHUTDOWN |
2649 if (device_should_wake) {
2652 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2654 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2658 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2659 mac_mode = MAC_MODE_PORT_MODE_GMII;
2661 mac_mode = MAC_MODE_PORT_MODE_MII;
2663 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2664 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2666 u32 speed = (tp->tg3_flags &
2667 TG3_FLAG_WOL_SPEED_100MB) ?
2668 SPEED_100 : SPEED_10;
2669 if (tg3_5700_link_polarity(tp, speed))
2670 mac_mode |= MAC_MODE_LINK_POLARITY;
2672 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2675 mac_mode = MAC_MODE_PORT_MODE_TBI;
2678 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2679 tw32(MAC_LED_CTRL, tp->led_ctrl);
2681 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2682 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2683 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2684 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2685 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2686 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2688 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2689 mac_mode |= tp->mac_mode &
2690 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2691 if (mac_mode & MAC_MODE_APE_TX_EN)
2692 mac_mode |= MAC_MODE_TDE_ENABLE;
2695 tw32_f(MAC_MODE, mac_mode);
2698 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2702 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2703 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2704 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2707 base_val = tp->pci_clock_ctrl;
2708 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2709 CLOCK_CTRL_TXCLK_DISABLE);
2711 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2712 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2713 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2714 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2715 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2717 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2718 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2719 u32 newbits1, newbits2;
2721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2722 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2723 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2724 CLOCK_CTRL_TXCLK_DISABLE |
2726 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2727 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2728 newbits1 = CLOCK_CTRL_625_CORE;
2729 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2731 newbits1 = CLOCK_CTRL_ALTCLK;
2732 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2735 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2738 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2741 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2745 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2746 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2747 CLOCK_CTRL_TXCLK_DISABLE |
2748 CLOCK_CTRL_44MHZ_CORE);
2750 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2753 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2754 tp->pci_clock_ctrl | newbits3, 40);
2758 if (!(device_should_wake) &&
2759 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2760 tg3_power_down_phy(tp, do_low_power);
2762 tg3_frob_aux_power(tp);
2764 /* Workaround for unstable PLL clock */
2765 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2766 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2767 u32 val = tr32(0x7d00);
2769 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2771 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2774 err = tg3_nvram_lock(tp);
2775 tg3_halt_cpu(tp, RX_CPU_BASE);
2777 tg3_nvram_unlock(tp);
2781 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2783 if (device_should_wake)
2784 pci_enable_wake(tp->pdev, state, true);
2786 /* Finally, set the new power state. */
2787 pci_set_power_state(tp->pdev, state);
2792 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2794 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2795 case MII_TG3_AUX_STAT_10HALF:
2797 *duplex = DUPLEX_HALF;
2800 case MII_TG3_AUX_STAT_10FULL:
2802 *duplex = DUPLEX_FULL;
2805 case MII_TG3_AUX_STAT_100HALF:
2807 *duplex = DUPLEX_HALF;
2810 case MII_TG3_AUX_STAT_100FULL:
2812 *duplex = DUPLEX_FULL;
2815 case MII_TG3_AUX_STAT_1000HALF:
2816 *speed = SPEED_1000;
2817 *duplex = DUPLEX_HALF;
2820 case MII_TG3_AUX_STAT_1000FULL:
2821 *speed = SPEED_1000;
2822 *duplex = DUPLEX_FULL;
2826 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2827 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2829 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2833 *speed = SPEED_INVALID;
2834 *duplex = DUPLEX_INVALID;
2839 static void tg3_phy_copper_begin(struct tg3 *tp)
2844 if (tp->link_config.phy_is_low_power) {
2845 /* Entering low power mode. Disable gigabit and
2846 * 100baseT advertisements.
2848 tg3_writephy(tp, MII_TG3_CTRL, 0);
2850 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2851 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2852 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2853 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2855 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2856 } else if (tp->link_config.speed == SPEED_INVALID) {
2857 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2858 tp->link_config.advertising &=
2859 ~(ADVERTISED_1000baseT_Half |
2860 ADVERTISED_1000baseT_Full);
2862 new_adv = ADVERTISE_CSMA;
2863 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2864 new_adv |= ADVERTISE_10HALF;
2865 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2866 new_adv |= ADVERTISE_10FULL;
2867 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2868 new_adv |= ADVERTISE_100HALF;
2869 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2870 new_adv |= ADVERTISE_100FULL;
2872 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2874 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2876 if (tp->link_config.advertising &
2877 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2879 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2880 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2881 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2882 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2883 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2884 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2885 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2886 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2887 MII_TG3_CTRL_ENABLE_AS_MASTER);
2888 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2890 tg3_writephy(tp, MII_TG3_CTRL, 0);
2893 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2894 new_adv |= ADVERTISE_CSMA;
2896 /* Asking for a specific link mode. */
2897 if (tp->link_config.speed == SPEED_1000) {
2898 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2900 if (tp->link_config.duplex == DUPLEX_FULL)
2901 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2903 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2904 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2905 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2906 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2907 MII_TG3_CTRL_ENABLE_AS_MASTER);
2909 if (tp->link_config.speed == SPEED_100) {
2910 if (tp->link_config.duplex == DUPLEX_FULL)
2911 new_adv |= ADVERTISE_100FULL;
2913 new_adv |= ADVERTISE_100HALF;
2915 if (tp->link_config.duplex == DUPLEX_FULL)
2916 new_adv |= ADVERTISE_10FULL;
2918 new_adv |= ADVERTISE_10HALF;
2920 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2925 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2928 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2929 tp->link_config.speed != SPEED_INVALID) {
2930 u32 bmcr, orig_bmcr;
2932 tp->link_config.active_speed = tp->link_config.speed;
2933 tp->link_config.active_duplex = tp->link_config.duplex;
2936 switch (tp->link_config.speed) {
2942 bmcr |= BMCR_SPEED100;
2946 bmcr |= TG3_BMCR_SPEED1000;
2950 if (tp->link_config.duplex == DUPLEX_FULL)
2951 bmcr |= BMCR_FULLDPLX;
2953 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2954 (bmcr != orig_bmcr)) {
2955 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2956 for (i = 0; i < 1500; i++) {
2960 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2961 tg3_readphy(tp, MII_BMSR, &tmp))
2963 if (!(tmp & BMSR_LSTATUS)) {
2968 tg3_writephy(tp, MII_BMCR, bmcr);
2972 tg3_writephy(tp, MII_BMCR,
2973 BMCR_ANENABLE | BMCR_ANRESTART);
2977 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2981 /* Turn off tap power management. */
2982 /* Set Extended packet length bit */
2983 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2985 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2986 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2988 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2989 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2991 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2992 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2994 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2995 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2997 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2998 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
3005 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3007 u32 adv_reg, all_mask = 0;
3009 if (mask & ADVERTISED_10baseT_Half)
3010 all_mask |= ADVERTISE_10HALF;
3011 if (mask & ADVERTISED_10baseT_Full)
3012 all_mask |= ADVERTISE_10FULL;
3013 if (mask & ADVERTISED_100baseT_Half)
3014 all_mask |= ADVERTISE_100HALF;
3015 if (mask & ADVERTISED_100baseT_Full)
3016 all_mask |= ADVERTISE_100FULL;
3018 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3021 if ((adv_reg & all_mask) != all_mask)
3023 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
3027 if (mask & ADVERTISED_1000baseT_Half)
3028 all_mask |= ADVERTISE_1000HALF;
3029 if (mask & ADVERTISED_1000baseT_Full)
3030 all_mask |= ADVERTISE_1000FULL;
3032 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3035 if ((tg3_ctrl & all_mask) != all_mask)
3041 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3045 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3048 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3049 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3051 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3052 if (curadv != reqadv)
3055 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3056 tg3_readphy(tp, MII_LPA, rmtadv);
3058 /* Reprogram the advertisement register, even if it
3059 * does not affect the current link. If the link
3060 * gets renegotiated in the future, we can save an
3061 * additional renegotiation cycle by advertising
3062 * it correctly in the first place.
3064 if (curadv != reqadv) {
3065 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3066 ADVERTISE_PAUSE_ASYM);
3067 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3074 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3076 int current_link_up;
3078 u32 lcl_adv, rmt_adv;
3086 (MAC_STATUS_SYNC_CHANGED |
3087 MAC_STATUS_CFG_CHANGED |
3088 MAC_STATUS_MI_COMPLETION |
3089 MAC_STATUS_LNKSTATE_CHANGED));
3092 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3094 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3098 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3100 /* Some third-party PHYs need to be reset on link going
3103 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3106 netif_carrier_ok(tp->dev)) {
3107 tg3_readphy(tp, MII_BMSR, &bmsr);
3108 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3109 !(bmsr & BMSR_LSTATUS))
3115 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3116 tg3_readphy(tp, MII_BMSR, &bmsr);
3117 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3118 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3121 if (!(bmsr & BMSR_LSTATUS)) {
3122 err = tg3_init_5401phy_dsp(tp);
3126 tg3_readphy(tp, MII_BMSR, &bmsr);
3127 for (i = 0; i < 1000; i++) {
3129 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3130 (bmsr & BMSR_LSTATUS)) {
3136 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3137 TG3_PHY_REV_BCM5401_B0 &&
3138 !(bmsr & BMSR_LSTATUS) &&
3139 tp->link_config.active_speed == SPEED_1000) {
3140 err = tg3_phy_reset(tp);
3142 err = tg3_init_5401phy_dsp(tp);
3147 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3148 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3149 /* 5701 {A0,B0} CRC bug workaround */
3150 tg3_writephy(tp, 0x15, 0x0a75);
3151 tg3_writephy(tp, 0x1c, 0x8c68);
3152 tg3_writephy(tp, 0x1c, 0x8d68);
3153 tg3_writephy(tp, 0x1c, 0x8c68);
3156 /* Clear pending interrupts... */
3157 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3158 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3160 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3161 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3162 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3163 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3165 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3166 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3167 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3168 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3169 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3171 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3174 current_link_up = 0;
3175 current_speed = SPEED_INVALID;
3176 current_duplex = DUPLEX_INVALID;
3178 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3181 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3182 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3183 if (!(val & (1 << 10))) {
3185 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3191 for (i = 0; i < 100; i++) {
3192 tg3_readphy(tp, MII_BMSR, &bmsr);
3193 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3194 (bmsr & BMSR_LSTATUS))
3199 if (bmsr & BMSR_LSTATUS) {
3202 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3203 for (i = 0; i < 2000; i++) {
3205 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3210 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3215 for (i = 0; i < 200; i++) {
3216 tg3_readphy(tp, MII_BMCR, &bmcr);
3217 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3219 if (bmcr && bmcr != 0x7fff)
3227 tp->link_config.active_speed = current_speed;
3228 tp->link_config.active_duplex = current_duplex;
3230 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3231 if ((bmcr & BMCR_ANENABLE) &&
3232 tg3_copper_is_advertising_all(tp,
3233 tp->link_config.advertising)) {
3234 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3236 current_link_up = 1;
3239 if (!(bmcr & BMCR_ANENABLE) &&
3240 tp->link_config.speed == current_speed &&
3241 tp->link_config.duplex == current_duplex &&
3242 tp->link_config.flowctrl ==
3243 tp->link_config.active_flowctrl) {
3244 current_link_up = 1;
3248 if (current_link_up == 1 &&
3249 tp->link_config.active_duplex == DUPLEX_FULL)
3250 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3254 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3257 tg3_phy_copper_begin(tp);
3259 tg3_readphy(tp, MII_BMSR, &tmp);
3260 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3261 (tmp & BMSR_LSTATUS))
3262 current_link_up = 1;
3265 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3266 if (current_link_up == 1) {
3267 if (tp->link_config.active_speed == SPEED_100 ||
3268 tp->link_config.active_speed == SPEED_10)
3269 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3271 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3272 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3273 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3275 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3277 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3278 if (tp->link_config.active_duplex == DUPLEX_HALF)
3279 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3282 if (current_link_up == 1 &&
3283 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3284 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3286 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3289 /* ??? Without this setting Netgear GA302T PHY does not
3290 * ??? send/receive packets...
3292 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3293 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3294 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3295 tw32_f(MAC_MI_MODE, tp->mi_mode);
3299 tw32_f(MAC_MODE, tp->mac_mode);
3302 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3303 /* Polled via timer. */
3304 tw32_f(MAC_EVENT, 0);
3306 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3310 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3311 current_link_up == 1 &&
3312 tp->link_config.active_speed == SPEED_1000 &&
3313 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3314 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3317 (MAC_STATUS_SYNC_CHANGED |
3318 MAC_STATUS_CFG_CHANGED));
3321 NIC_SRAM_FIRMWARE_MBOX,
3322 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3325 /* Prevent send BD corruption. */
3326 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3327 u16 oldlnkctl, newlnkctl;
3329 pci_read_config_word(tp->pdev,
3330 tp->pcie_cap + PCI_EXP_LNKCTL,
3332 if (tp->link_config.active_speed == SPEED_100 ||
3333 tp->link_config.active_speed == SPEED_10)
3334 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3336 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3337 if (newlnkctl != oldlnkctl)
3338 pci_write_config_word(tp->pdev,
3339 tp->pcie_cap + PCI_EXP_LNKCTL,
3343 if (current_link_up != netif_carrier_ok(tp->dev)) {
3344 if (current_link_up)
3345 netif_carrier_on(tp->dev);
3347 netif_carrier_off(tp->dev);
3348 tg3_link_report(tp);
3354 struct tg3_fiber_aneginfo {
3356 #define ANEG_STATE_UNKNOWN 0
3357 #define ANEG_STATE_AN_ENABLE 1
3358 #define ANEG_STATE_RESTART_INIT 2
3359 #define ANEG_STATE_RESTART 3
3360 #define ANEG_STATE_DISABLE_LINK_OK 4
3361 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3362 #define ANEG_STATE_ABILITY_DETECT 6
3363 #define ANEG_STATE_ACK_DETECT_INIT 7
3364 #define ANEG_STATE_ACK_DETECT 8
3365 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3366 #define ANEG_STATE_COMPLETE_ACK 10
3367 #define ANEG_STATE_IDLE_DETECT_INIT 11
3368 #define ANEG_STATE_IDLE_DETECT 12
3369 #define ANEG_STATE_LINK_OK 13
3370 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3371 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3374 #define MR_AN_ENABLE 0x00000001
3375 #define MR_RESTART_AN 0x00000002
3376 #define MR_AN_COMPLETE 0x00000004
3377 #define MR_PAGE_RX 0x00000008
3378 #define MR_NP_LOADED 0x00000010
3379 #define MR_TOGGLE_TX 0x00000020
3380 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3381 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3382 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3383 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3384 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3385 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3386 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3387 #define MR_TOGGLE_RX 0x00002000
3388 #define MR_NP_RX 0x00004000
3390 #define MR_LINK_OK 0x80000000
3392 unsigned long link_time, cur_time;
3394 u32 ability_match_cfg;
3395 int ability_match_count;
3397 char ability_match, idle_match, ack_match;
3399 u32 txconfig, rxconfig;
3400 #define ANEG_CFG_NP 0x00000080
3401 #define ANEG_CFG_ACK 0x00000040
3402 #define ANEG_CFG_RF2 0x00000020
3403 #define ANEG_CFG_RF1 0x00000010
3404 #define ANEG_CFG_PS2 0x00000001
3405 #define ANEG_CFG_PS1 0x00008000
3406 #define ANEG_CFG_HD 0x00004000
3407 #define ANEG_CFG_FD 0x00002000
3408 #define ANEG_CFG_INVAL 0x00001f06
3413 #define ANEG_TIMER_ENAB 2
3414 #define ANEG_FAILED -1
3416 #define ANEG_STATE_SETTLE_TIME 10000
3418 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3419 struct tg3_fiber_aneginfo *ap)
3422 unsigned long delta;
3426 if (ap->state == ANEG_STATE_UNKNOWN) {
3430 ap->ability_match_cfg = 0;
3431 ap->ability_match_count = 0;
3432 ap->ability_match = 0;
3438 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3439 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3441 if (rx_cfg_reg != ap->ability_match_cfg) {
3442 ap->ability_match_cfg = rx_cfg_reg;
3443 ap->ability_match = 0;
3444 ap->ability_match_count = 0;
3446 if (++ap->ability_match_count > 1) {
3447 ap->ability_match = 1;
3448 ap->ability_match_cfg = rx_cfg_reg;
3451 if (rx_cfg_reg & ANEG_CFG_ACK)
3459 ap->ability_match_cfg = 0;
3460 ap->ability_match_count = 0;
3461 ap->ability_match = 0;
3467 ap->rxconfig = rx_cfg_reg;
3470 switch (ap->state) {
3471 case ANEG_STATE_UNKNOWN:
3472 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3473 ap->state = ANEG_STATE_AN_ENABLE;
3476 case ANEG_STATE_AN_ENABLE:
3477 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3478 if (ap->flags & MR_AN_ENABLE) {
3481 ap->ability_match_cfg = 0;
3482 ap->ability_match_count = 0;
3483 ap->ability_match = 0;
3487 ap->state = ANEG_STATE_RESTART_INIT;
3489 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3493 case ANEG_STATE_RESTART_INIT:
3494 ap->link_time = ap->cur_time;
3495 ap->flags &= ~(MR_NP_LOADED);
3497 tw32(MAC_TX_AUTO_NEG, 0);
3498 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3499 tw32_f(MAC_MODE, tp->mac_mode);
3502 ret = ANEG_TIMER_ENAB;
3503 ap->state = ANEG_STATE_RESTART;
3506 case ANEG_STATE_RESTART:
3507 delta = ap->cur_time - ap->link_time;
3508 if (delta > ANEG_STATE_SETTLE_TIME)
3509 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3511 ret = ANEG_TIMER_ENAB;
3514 case ANEG_STATE_DISABLE_LINK_OK:
3518 case ANEG_STATE_ABILITY_DETECT_INIT:
3519 ap->flags &= ~(MR_TOGGLE_TX);
3520 ap->txconfig = ANEG_CFG_FD;
3521 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3522 if (flowctrl & ADVERTISE_1000XPAUSE)
3523 ap->txconfig |= ANEG_CFG_PS1;
3524 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3525 ap->txconfig |= ANEG_CFG_PS2;
3526 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3527 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3528 tw32_f(MAC_MODE, tp->mac_mode);
3531 ap->state = ANEG_STATE_ABILITY_DETECT;
3534 case ANEG_STATE_ABILITY_DETECT:
3535 if (ap->ability_match != 0 && ap->rxconfig != 0)
3536 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3539 case ANEG_STATE_ACK_DETECT_INIT:
3540 ap->txconfig |= ANEG_CFG_ACK;
3541 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3542 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3543 tw32_f(MAC_MODE, tp->mac_mode);
3546 ap->state = ANEG_STATE_ACK_DETECT;
3549 case ANEG_STATE_ACK_DETECT:
3550 if (ap->ack_match != 0) {
3551 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3552 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3553 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3555 ap->state = ANEG_STATE_AN_ENABLE;
3557 } else if (ap->ability_match != 0 &&
3558 ap->rxconfig == 0) {
3559 ap->state = ANEG_STATE_AN_ENABLE;
3563 case ANEG_STATE_COMPLETE_ACK_INIT:
3564 if (ap->rxconfig & ANEG_CFG_INVAL) {
3568 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3569 MR_LP_ADV_HALF_DUPLEX |
3570 MR_LP_ADV_SYM_PAUSE |
3571 MR_LP_ADV_ASYM_PAUSE |
3572 MR_LP_ADV_REMOTE_FAULT1 |
3573 MR_LP_ADV_REMOTE_FAULT2 |
3574 MR_LP_ADV_NEXT_PAGE |
3577 if (ap->rxconfig & ANEG_CFG_FD)
3578 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3579 if (ap->rxconfig & ANEG_CFG_HD)
3580 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3581 if (ap->rxconfig & ANEG_CFG_PS1)
3582 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3583 if (ap->rxconfig & ANEG_CFG_PS2)
3584 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3585 if (ap->rxconfig & ANEG_CFG_RF1)
3586 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3587 if (ap->rxconfig & ANEG_CFG_RF2)
3588 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3589 if (ap->rxconfig & ANEG_CFG_NP)
3590 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3592 ap->link_time = ap->cur_time;
3594 ap->flags ^= (MR_TOGGLE_TX);
3595 if (ap->rxconfig & 0x0008)
3596 ap->flags |= MR_TOGGLE_RX;
3597 if (ap->rxconfig & ANEG_CFG_NP)
3598 ap->flags |= MR_NP_RX;
3599 ap->flags |= MR_PAGE_RX;
3601 ap->state = ANEG_STATE_COMPLETE_ACK;
3602 ret = ANEG_TIMER_ENAB;
3605 case ANEG_STATE_COMPLETE_ACK:
3606 if (ap->ability_match != 0 &&
3607 ap->rxconfig == 0) {
3608 ap->state = ANEG_STATE_AN_ENABLE;
3611 delta = ap->cur_time - ap->link_time;
3612 if (delta > ANEG_STATE_SETTLE_TIME) {
3613 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3614 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3616 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3617 !(ap->flags & MR_NP_RX)) {
3618 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3626 case ANEG_STATE_IDLE_DETECT_INIT:
3627 ap->link_time = ap->cur_time;
3628 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3629 tw32_f(MAC_MODE, tp->mac_mode);
3632 ap->state = ANEG_STATE_IDLE_DETECT;
3633 ret = ANEG_TIMER_ENAB;
3636 case ANEG_STATE_IDLE_DETECT:
3637 if (ap->ability_match != 0 &&
3638 ap->rxconfig == 0) {
3639 ap->state = ANEG_STATE_AN_ENABLE;
3642 delta = ap->cur_time - ap->link_time;
3643 if (delta > ANEG_STATE_SETTLE_TIME) {
3644 /* XXX another gem from the Broadcom driver :( */
3645 ap->state = ANEG_STATE_LINK_OK;
3649 case ANEG_STATE_LINK_OK:
3650 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3654 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3655 /* ??? unimplemented */
3658 case ANEG_STATE_NEXT_PAGE_WAIT:
3659 /* ??? unimplemented */
3670 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3673 struct tg3_fiber_aneginfo aninfo;
3674 int status = ANEG_FAILED;
3678 tw32_f(MAC_TX_AUTO_NEG, 0);
3680 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3681 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3684 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3687 memset(&aninfo, 0, sizeof(aninfo));
3688 aninfo.flags |= MR_AN_ENABLE;
3689 aninfo.state = ANEG_STATE_UNKNOWN;
3690 aninfo.cur_time = 0;
3692 while (++tick < 195000) {
3693 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3694 if (status == ANEG_DONE || status == ANEG_FAILED)
3700 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3701 tw32_f(MAC_MODE, tp->mac_mode);
3704 *txflags = aninfo.txconfig;
3705 *rxflags = aninfo.flags;
3707 if (status == ANEG_DONE &&
3708 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3709 MR_LP_ADV_FULL_DUPLEX)))
3715 static void tg3_init_bcm8002(struct tg3 *tp)
3717 u32 mac_status = tr32(MAC_STATUS);
3720 /* Reset when initting first time or we have a link. */
3721 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3722 !(mac_status & MAC_STATUS_PCS_SYNCED))
3725 /* Set PLL lock range. */
3726 tg3_writephy(tp, 0x16, 0x8007);
3729 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3731 /* Wait for reset to complete. */
3732 /* XXX schedule_timeout() ... */
3733 for (i = 0; i < 500; i++)
3736 /* Config mode; select PMA/Ch 1 regs. */
3737 tg3_writephy(tp, 0x10, 0x8411);
3739 /* Enable auto-lock and comdet, select txclk for tx. */
3740 tg3_writephy(tp, 0x11, 0x0a10);
3742 tg3_writephy(tp, 0x18, 0x00a0);
3743 tg3_writephy(tp, 0x16, 0x41ff);
3745 /* Assert and deassert POR. */
3746 tg3_writephy(tp, 0x13, 0x0400);
3748 tg3_writephy(tp, 0x13, 0x0000);
3750 tg3_writephy(tp, 0x11, 0x0a50);
3752 tg3_writephy(tp, 0x11, 0x0a10);
3754 /* Wait for signal to stabilize */
3755 /* XXX schedule_timeout() ... */
3756 for (i = 0; i < 15000; i++)
3759 /* Deselect the channel register so we can read the PHYID
3762 tg3_writephy(tp, 0x10, 0x8011);
3765 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3768 u32 sg_dig_ctrl, sg_dig_status;
3769 u32 serdes_cfg, expected_sg_dig_ctrl;
3770 int workaround, port_a;
3771 int current_link_up;
3774 expected_sg_dig_ctrl = 0;
3777 current_link_up = 0;
3779 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3780 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3782 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3785 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3786 /* preserve bits 20-23 for voltage regulator */
3787 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3790 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3792 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3793 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3795 u32 val = serdes_cfg;
3801 tw32_f(MAC_SERDES_CFG, val);
3804 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3806 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3807 tg3_setup_flow_control(tp, 0, 0);
3808 current_link_up = 1;
3813 /* Want auto-negotiation. */
3814 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3816 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3817 if (flowctrl & ADVERTISE_1000XPAUSE)
3818 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3819 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3820 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3822 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3823 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3824 tp->serdes_counter &&
3825 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3826 MAC_STATUS_RCVD_CFG)) ==
3827 MAC_STATUS_PCS_SYNCED)) {
3828 tp->serdes_counter--;
3829 current_link_up = 1;
3834 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3835 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3837 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3839 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3840 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3841 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3842 MAC_STATUS_SIGNAL_DET)) {
3843 sg_dig_status = tr32(SG_DIG_STATUS);
3844 mac_status = tr32(MAC_STATUS);
3846 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3847 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3848 u32 local_adv = 0, remote_adv = 0;
3850 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3851 local_adv |= ADVERTISE_1000XPAUSE;
3852 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3853 local_adv |= ADVERTISE_1000XPSE_ASYM;
3855 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3856 remote_adv |= LPA_1000XPAUSE;
3857 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3858 remote_adv |= LPA_1000XPAUSE_ASYM;
3860 tg3_setup_flow_control(tp, local_adv, remote_adv);
3861 current_link_up = 1;
3862 tp->serdes_counter = 0;
3863 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3864 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3865 if (tp->serdes_counter)
3866 tp->serdes_counter--;
3869 u32 val = serdes_cfg;
3876 tw32_f(MAC_SERDES_CFG, val);
3879 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3882 /* Link parallel detection - link is up */
3883 /* only if we have PCS_SYNC and not */
3884 /* receiving config code words */
3885 mac_status = tr32(MAC_STATUS);
3886 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3887 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3888 tg3_setup_flow_control(tp, 0, 0);
3889 current_link_up = 1;
3891 TG3_FLG2_PARALLEL_DETECT;
3892 tp->serdes_counter =
3893 SERDES_PARALLEL_DET_TIMEOUT;
3895 goto restart_autoneg;
3899 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3900 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3904 return current_link_up;
3907 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3909 int current_link_up = 0;
3911 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3914 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3915 u32 txflags, rxflags;
3918 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3919 u32 local_adv = 0, remote_adv = 0;
3921 if (txflags & ANEG_CFG_PS1)
3922 local_adv |= ADVERTISE_1000XPAUSE;
3923 if (txflags & ANEG_CFG_PS2)
3924 local_adv |= ADVERTISE_1000XPSE_ASYM;
3926 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3927 remote_adv |= LPA_1000XPAUSE;
3928 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3929 remote_adv |= LPA_1000XPAUSE_ASYM;
3931 tg3_setup_flow_control(tp, local_adv, remote_adv);
3933 current_link_up = 1;
3935 for (i = 0; i < 30; i++) {
3938 (MAC_STATUS_SYNC_CHANGED |
3939 MAC_STATUS_CFG_CHANGED));
3941 if ((tr32(MAC_STATUS) &
3942 (MAC_STATUS_SYNC_CHANGED |
3943 MAC_STATUS_CFG_CHANGED)) == 0)
3947 mac_status = tr32(MAC_STATUS);
3948 if (current_link_up == 0 &&
3949 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3950 !(mac_status & MAC_STATUS_RCVD_CFG))
3951 current_link_up = 1;
3953 tg3_setup_flow_control(tp, 0, 0);
3955 /* Forcing 1000FD link up. */
3956 current_link_up = 1;
3958 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3961 tw32_f(MAC_MODE, tp->mac_mode);
3966 return current_link_up;
3969 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3972 u16 orig_active_speed;
3973 u8 orig_active_duplex;
3975 int current_link_up;
3978 orig_pause_cfg = tp->link_config.active_flowctrl;
3979 orig_active_speed = tp->link_config.active_speed;
3980 orig_active_duplex = tp->link_config.active_duplex;
3982 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3983 netif_carrier_ok(tp->dev) &&
3984 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3985 mac_status = tr32(MAC_STATUS);
3986 mac_status &= (MAC_STATUS_PCS_SYNCED |
3987 MAC_STATUS_SIGNAL_DET |
3988 MAC_STATUS_CFG_CHANGED |
3989 MAC_STATUS_RCVD_CFG);
3990 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3991 MAC_STATUS_SIGNAL_DET)) {
3992 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3993 MAC_STATUS_CFG_CHANGED));
3998 tw32_f(MAC_TX_AUTO_NEG, 0);
4000 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4001 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4002 tw32_f(MAC_MODE, tp->mac_mode);
4005 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4006 tg3_init_bcm8002(tp);
4008 /* Enable link change event even when serdes polling. */
4009 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4012 current_link_up = 0;
4013 mac_status = tr32(MAC_STATUS);
4015 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
4016 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4018 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4020 tp->napi[0].hw_status->status =
4021 (SD_STATUS_UPDATED |
4022 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4024 for (i = 0; i < 100; i++) {
4025 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4026 MAC_STATUS_CFG_CHANGED));
4028 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4029 MAC_STATUS_CFG_CHANGED |
4030 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4034 mac_status = tr32(MAC_STATUS);
4035 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4036 current_link_up = 0;
4037 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4038 tp->serdes_counter == 0) {
4039 tw32_f(MAC_MODE, (tp->mac_mode |
4040 MAC_MODE_SEND_CONFIGS));
4042 tw32_f(MAC_MODE, tp->mac_mode);
4046 if (current_link_up == 1) {
4047 tp->link_config.active_speed = SPEED_1000;
4048 tp->link_config.active_duplex = DUPLEX_FULL;
4049 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4050 LED_CTRL_LNKLED_OVERRIDE |
4051 LED_CTRL_1000MBPS_ON));
4053 tp->link_config.active_speed = SPEED_INVALID;
4054 tp->link_config.active_duplex = DUPLEX_INVALID;
4055 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4056 LED_CTRL_LNKLED_OVERRIDE |
4057 LED_CTRL_TRAFFIC_OVERRIDE));
4060 if (current_link_up != netif_carrier_ok(tp->dev)) {
4061 if (current_link_up)
4062 netif_carrier_on(tp->dev);
4064 netif_carrier_off(tp->dev);
4065 tg3_link_report(tp);
4067 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4068 if (orig_pause_cfg != now_pause_cfg ||
4069 orig_active_speed != tp->link_config.active_speed ||
4070 orig_active_duplex != tp->link_config.active_duplex)
4071 tg3_link_report(tp);
4077 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4079 int current_link_up, err = 0;
4083 u32 local_adv, remote_adv;
4085 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4086 tw32_f(MAC_MODE, tp->mac_mode);
4092 (MAC_STATUS_SYNC_CHANGED |
4093 MAC_STATUS_CFG_CHANGED |
4094 MAC_STATUS_MI_COMPLETION |
4095 MAC_STATUS_LNKSTATE_CHANGED));
4101 current_link_up = 0;
4102 current_speed = SPEED_INVALID;
4103 current_duplex = DUPLEX_INVALID;
4105 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4106 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4107 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4108 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4109 bmsr |= BMSR_LSTATUS;
4111 bmsr &= ~BMSR_LSTATUS;
4114 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4116 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4117 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4118 /* do nothing, just check for link up at the end */
4119 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4122 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4123 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4124 ADVERTISE_1000XPAUSE |
4125 ADVERTISE_1000XPSE_ASYM |
4128 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4130 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4131 new_adv |= ADVERTISE_1000XHALF;
4132 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4133 new_adv |= ADVERTISE_1000XFULL;
4135 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4136 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4137 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4138 tg3_writephy(tp, MII_BMCR, bmcr);
4140 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4141 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4142 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4149 bmcr &= ~BMCR_SPEED1000;
4150 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4152 if (tp->link_config.duplex == DUPLEX_FULL)
4153 new_bmcr |= BMCR_FULLDPLX;
4155 if (new_bmcr != bmcr) {
4156 /* BMCR_SPEED1000 is a reserved bit that needs
4157 * to be set on write.
4159 new_bmcr |= BMCR_SPEED1000;
4161 /* Force a linkdown */
4162 if (netif_carrier_ok(tp->dev)) {
4165 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4166 adv &= ~(ADVERTISE_1000XFULL |
4167 ADVERTISE_1000XHALF |
4169 tg3_writephy(tp, MII_ADVERTISE, adv);
4170 tg3_writephy(tp, MII_BMCR, bmcr |
4174 netif_carrier_off(tp->dev);
4176 tg3_writephy(tp, MII_BMCR, new_bmcr);
4178 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4179 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4180 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4182 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4183 bmsr |= BMSR_LSTATUS;
4185 bmsr &= ~BMSR_LSTATUS;
4187 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4191 if (bmsr & BMSR_LSTATUS) {
4192 current_speed = SPEED_1000;
4193 current_link_up = 1;
4194 if (bmcr & BMCR_FULLDPLX)
4195 current_duplex = DUPLEX_FULL;
4197 current_duplex = DUPLEX_HALF;
4202 if (bmcr & BMCR_ANENABLE) {
4205 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4206 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4207 common = local_adv & remote_adv;
4208 if (common & (ADVERTISE_1000XHALF |
4209 ADVERTISE_1000XFULL)) {
4210 if (common & ADVERTISE_1000XFULL)
4211 current_duplex = DUPLEX_FULL;
4213 current_duplex = DUPLEX_HALF;
4214 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4215 /* Link is up via parallel detect */
4217 current_link_up = 0;
4222 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4223 tg3_setup_flow_control(tp, local_adv, remote_adv);
4225 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4226 if (tp->link_config.active_duplex == DUPLEX_HALF)
4227 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4229 tw32_f(MAC_MODE, tp->mac_mode);
4232 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4234 tp->link_config.active_speed = current_speed;
4235 tp->link_config.active_duplex = current_duplex;
4237 if (current_link_up != netif_carrier_ok(tp->dev)) {
4238 if (current_link_up)
4239 netif_carrier_on(tp->dev);
4241 netif_carrier_off(tp->dev);
4242 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4244 tg3_link_report(tp);
4249 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4251 if (tp->serdes_counter) {
4252 /* Give autoneg time to complete. */
4253 tp->serdes_counter--;
4257 if (!netif_carrier_ok(tp->dev) &&
4258 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4261 tg3_readphy(tp, MII_BMCR, &bmcr);
4262 if (bmcr & BMCR_ANENABLE) {
4265 /* Select shadow register 0x1f */
4266 tg3_writephy(tp, 0x1c, 0x7c00);
4267 tg3_readphy(tp, 0x1c, &phy1);
4269 /* Select expansion interrupt status register */
4270 tg3_writephy(tp, 0x17, 0x0f01);
4271 tg3_readphy(tp, 0x15, &phy2);
4272 tg3_readphy(tp, 0x15, &phy2);
4274 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4275 /* We have signal detect and not receiving
4276 * config code words, link is up by parallel
4280 bmcr &= ~BMCR_ANENABLE;
4281 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4282 tg3_writephy(tp, MII_BMCR, bmcr);
4283 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4286 } else if (netif_carrier_ok(tp->dev) &&
4287 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4288 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4291 /* Select expansion interrupt status register */
4292 tg3_writephy(tp, 0x17, 0x0f01);
4293 tg3_readphy(tp, 0x15, &phy2);
4297 /* Config code words received, turn on autoneg. */
4298 tg3_readphy(tp, MII_BMCR, &bmcr);
4299 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4301 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4307 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4311 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
4312 err = tg3_setup_fiber_phy(tp, force_reset);
4313 else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
4314 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4316 err = tg3_setup_copper_phy(tp, force_reset);
4318 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4321 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4322 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4324 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4329 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4330 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4331 tw32(GRC_MISC_CFG, val);
4334 if (tp->link_config.active_speed == SPEED_1000 &&
4335 tp->link_config.active_duplex == DUPLEX_HALF)
4336 tw32(MAC_TX_LENGTHS,
4337 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4338 (6 << TX_LENGTHS_IPG_SHIFT) |
4339 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4341 tw32(MAC_TX_LENGTHS,
4342 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4343 (6 << TX_LENGTHS_IPG_SHIFT) |
4344 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4346 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4347 if (netif_carrier_ok(tp->dev)) {
4348 tw32(HOSTCC_STAT_COAL_TICKS,
4349 tp->coal.stats_block_coalesce_usecs);
4351 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4355 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4356 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4357 if (!netif_carrier_ok(tp->dev))
4358 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4361 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4362 tw32(PCIE_PWR_MGMT_THRESH, val);
4368 /* This is called whenever we suspect that the system chipset is re-
4369 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4370 * is bogus tx completions. We try to recover by setting the
4371 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4374 static void tg3_tx_recover(struct tg3 *tp)
4376 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4377 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4379 netdev_warn(tp->dev,
4380 "The system may be re-ordering memory-mapped I/O "
4381 "cycles to the network device, attempting to recover. "
4382 "Please report the problem to the driver maintainer "
4383 "and include system chipset information.\n");
4385 spin_lock(&tp->lock);
4386 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4387 spin_unlock(&tp->lock);
4390 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4393 return tnapi->tx_pending -
4394 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4397 /* Tigon3 never reports partial packet sends. So we do not
4398 * need special logic to handle SKBs that have not had all
4399 * of their frags sent yet, like SunGEM does.
4401 static void tg3_tx(struct tg3_napi *tnapi)
4403 struct tg3 *tp = tnapi->tp;
4404 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4405 u32 sw_idx = tnapi->tx_cons;
4406 struct netdev_queue *txq;
4407 int index = tnapi - tp->napi;
4409 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4412 txq = netdev_get_tx_queue(tp->dev, index);
4414 while (sw_idx != hw_idx) {
4415 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4416 struct sk_buff *skb = ri->skb;
4419 if (unlikely(skb == NULL)) {
4424 pci_unmap_single(tp->pdev,
4425 dma_unmap_addr(ri, mapping),
4431 sw_idx = NEXT_TX(sw_idx);
4433 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4434 ri = &tnapi->tx_buffers[sw_idx];
4435 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4438 pci_unmap_page(tp->pdev,
4439 dma_unmap_addr(ri, mapping),
4440 skb_shinfo(skb)->frags[i].size,
4442 sw_idx = NEXT_TX(sw_idx);
4447 if (unlikely(tx_bug)) {
4453 tnapi->tx_cons = sw_idx;
4455 /* Need to make the tx_cons update visible to tg3_start_xmit()
4456 * before checking for netif_queue_stopped(). Without the
4457 * memory barrier, there is a small possibility that tg3_start_xmit()
4458 * will miss it and cause the queue to be stopped forever.
4462 if (unlikely(netif_tx_queue_stopped(txq) &&
4463 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4464 __netif_tx_lock(txq, smp_processor_id());
4465 if (netif_tx_queue_stopped(txq) &&
4466 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4467 netif_tx_wake_queue(txq);
4468 __netif_tx_unlock(txq);
4472 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4477 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4478 map_sz, PCI_DMA_FROMDEVICE);
4479 dev_kfree_skb_any(ri->skb);
4483 /* Returns size of skb allocated or < 0 on error.
4485 * We only need to fill in the address because the other members
4486 * of the RX descriptor are invariant, see tg3_init_rings.
4488 * Note the purposeful assymetry of cpu vs. chip accesses. For
4489 * posting buffers we only dirty the first cache line of the RX
4490 * descriptor (containing the address). Whereas for the RX status
4491 * buffers the cpu only reads the last cacheline of the RX descriptor
4492 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4494 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4495 u32 opaque_key, u32 dest_idx_unmasked)
4497 struct tg3_rx_buffer_desc *desc;
4498 struct ring_info *map, *src_map;
4499 struct sk_buff *skb;
4501 int skb_size, dest_idx;
4504 switch (opaque_key) {
4505 case RXD_OPAQUE_RING_STD:
4506 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4507 desc = &tpr->rx_std[dest_idx];
4508 map = &tpr->rx_std_buffers[dest_idx];
4509 skb_size = tp->rx_pkt_map_sz;
4512 case RXD_OPAQUE_RING_JUMBO:
4513 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4514 desc = &tpr->rx_jmb[dest_idx].std;
4515 map = &tpr->rx_jmb_buffers[dest_idx];
4516 skb_size = TG3_RX_JMB_MAP_SZ;
4523 /* Do not overwrite any of the map or rp information
4524 * until we are sure we can commit to a new buffer.
4526 * Callers depend upon this behavior and assume that
4527 * we leave everything unchanged if we fail.
4529 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4533 skb_reserve(skb, tp->rx_offset);
4535 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4536 PCI_DMA_FROMDEVICE);
4537 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4543 dma_unmap_addr_set(map, mapping, mapping);
4545 desc->addr_hi = ((u64)mapping >> 32);
4546 desc->addr_lo = ((u64)mapping & 0xffffffff);
4551 /* We only need to move over in the address because the other
4552 * members of the RX descriptor are invariant. See notes above
4553 * tg3_alloc_rx_skb for full details.
4555 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4556 struct tg3_rx_prodring_set *dpr,
4557 u32 opaque_key, int src_idx,
4558 u32 dest_idx_unmasked)
4560 struct tg3 *tp = tnapi->tp;
4561 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4562 struct ring_info *src_map, *dest_map;
4563 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4566 switch (opaque_key) {
4567 case RXD_OPAQUE_RING_STD:
4568 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4569 dest_desc = &dpr->rx_std[dest_idx];
4570 dest_map = &dpr->rx_std_buffers[dest_idx];
4571 src_desc = &spr->rx_std[src_idx];
4572 src_map = &spr->rx_std_buffers[src_idx];
4575 case RXD_OPAQUE_RING_JUMBO:
4576 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4577 dest_desc = &dpr->rx_jmb[dest_idx].std;
4578 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4579 src_desc = &spr->rx_jmb[src_idx].std;
4580 src_map = &spr->rx_jmb_buffers[src_idx];
4587 dest_map->skb = src_map->skb;
4588 dma_unmap_addr_set(dest_map, mapping,
4589 dma_unmap_addr(src_map, mapping));
4590 dest_desc->addr_hi = src_desc->addr_hi;
4591 dest_desc->addr_lo = src_desc->addr_lo;
4593 /* Ensure that the update to the skb happens after the physical
4594 * addresses have been transferred to the new BD location.
4598 src_map->skb = NULL;
4601 /* The RX ring scheme is composed of multiple rings which post fresh
4602 * buffers to the chip, and one special ring the chip uses to report
4603 * status back to the host.
4605 * The special ring reports the status of received packets to the
4606 * host. The chip does not write into the original descriptor the
4607 * RX buffer was obtained from. The chip simply takes the original
4608 * descriptor as provided by the host, updates the status and length
4609 * field, then writes this into the next status ring entry.
4611 * Each ring the host uses to post buffers to the chip is described
4612 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4613 * it is first placed into the on-chip ram. When the packet's length
4614 * is known, it walks down the TG3_BDINFO entries to select the ring.
4615 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4616 * which is within the range of the new packet's length is chosen.
4618 * The "separate ring for rx status" scheme may sound queer, but it makes
4619 * sense from a cache coherency perspective. If only the host writes
4620 * to the buffer post rings, and only the chip writes to the rx status
4621 * rings, then cache lines never move beyond shared-modified state.
4622 * If both the host and chip were to write into the same ring, cache line
4623 * eviction could occur since both entities want it in an exclusive state.
4625 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4627 struct tg3 *tp = tnapi->tp;
4628 u32 work_mask, rx_std_posted = 0;
4629 u32 std_prod_idx, jmb_prod_idx;
4630 u32 sw_idx = tnapi->rx_rcb_ptr;
4633 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4635 hw_idx = *(tnapi->rx_rcb_prod_idx);
4637 * We need to order the read of hw_idx and the read of
4638 * the opaque cookie.
4643 std_prod_idx = tpr->rx_std_prod_idx;
4644 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4645 while (sw_idx != hw_idx && budget > 0) {
4646 struct ring_info *ri;
4647 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4649 struct sk_buff *skb;
4650 dma_addr_t dma_addr;
4651 u32 opaque_key, desc_idx, *post_ptr;
4652 bool hw_vlan __maybe_unused = false;
4653 u16 vtag __maybe_unused = 0;
4655 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4656 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4657 if (opaque_key == RXD_OPAQUE_RING_STD) {
4658 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4659 dma_addr = dma_unmap_addr(ri, mapping);
4661 post_ptr = &std_prod_idx;
4663 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4664 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4665 dma_addr = dma_unmap_addr(ri, mapping);
4667 post_ptr = &jmb_prod_idx;
4669 goto next_pkt_nopost;
4671 work_mask |= opaque_key;
4673 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4674 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4676 tg3_recycle_rx(tnapi, tpr, opaque_key,
4677 desc_idx, *post_ptr);
4679 /* Other statistics kept track of by card. */
4680 tp->net_stats.rx_dropped++;
4684 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4687 if (len > TG3_RX_COPY_THRESH(tp)) {
4690 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4695 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4696 PCI_DMA_FROMDEVICE);
4698 /* Ensure that the update to the skb happens
4699 * after the usage of the old DMA mapping.
4707 struct sk_buff *copy_skb;
4709 tg3_recycle_rx(tnapi, tpr, opaque_key,
4710 desc_idx, *post_ptr);
4712 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
4714 if (copy_skb == NULL)
4715 goto drop_it_no_recycle;
4717 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
4718 skb_put(copy_skb, len);
4719 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4720 skb_copy_from_linear_data(skb, copy_skb->data, len);
4721 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4723 /* We'll reuse the original ring buffer. */
4727 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4728 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4729 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4730 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4731 skb->ip_summed = CHECKSUM_UNNECESSARY;
4733 skb->ip_summed = CHECKSUM_NONE;
4735 skb->protocol = eth_type_trans(skb, tp->dev);
4737 if (len > (tp->dev->mtu + ETH_HLEN) &&
4738 skb->protocol != htons(ETH_P_8021Q)) {
4743 if (desc->type_flags & RXD_FLAG_VLAN &&
4744 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
4745 vtag = desc->err_vlan & RXD_VLAN_MASK;
4746 #if TG3_VLAN_TAG_USED
4752 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4753 __skb_push(skb, VLAN_HLEN);
4755 memmove(ve, skb->data + VLAN_HLEN,
4757 ve->h_vlan_proto = htons(ETH_P_8021Q);
4758 ve->h_vlan_TCI = htons(vtag);
4762 #if TG3_VLAN_TAG_USED
4764 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4767 napi_gro_receive(&tnapi->napi, skb);
4775 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4776 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4777 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4778 tpr->rx_std_prod_idx);
4779 work_mask &= ~RXD_OPAQUE_RING_STD;
4784 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4786 /* Refresh hw_idx to see if there is new work */
4787 if (sw_idx == hw_idx) {
4788 hw_idx = *(tnapi->rx_rcb_prod_idx);
4793 /* ACK the status ring. */
4794 tnapi->rx_rcb_ptr = sw_idx;
4795 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4797 /* Refill RX ring(s). */
4798 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4799 if (work_mask & RXD_OPAQUE_RING_STD) {
4800 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4801 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4802 tpr->rx_std_prod_idx);
4804 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4805 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4806 TG3_RX_JUMBO_RING_SIZE;
4807 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4808 tpr->rx_jmb_prod_idx);
4811 } else if (work_mask) {
4812 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4813 * updated before the producer indices can be updated.
4817 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4818 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4820 if (tnapi != &tp->napi[1])
4821 napi_schedule(&tp->napi[1].napi);
4827 static void tg3_poll_link(struct tg3 *tp)
4829 /* handle link change and other phy events */
4830 if (!(tp->tg3_flags &
4831 (TG3_FLAG_USE_LINKCHG_REG |
4832 TG3_FLAG_POLL_SERDES))) {
4833 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4835 if (sblk->status & SD_STATUS_LINK_CHG) {
4836 sblk->status = SD_STATUS_UPDATED |
4837 (sblk->status & ~SD_STATUS_LINK_CHG);
4838 spin_lock(&tp->lock);
4839 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4841 (MAC_STATUS_SYNC_CHANGED |
4842 MAC_STATUS_CFG_CHANGED |
4843 MAC_STATUS_MI_COMPLETION |
4844 MAC_STATUS_LNKSTATE_CHANGED));
4847 tg3_setup_phy(tp, 0);
4848 spin_unlock(&tp->lock);
4853 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4854 struct tg3_rx_prodring_set *dpr,
4855 struct tg3_rx_prodring_set *spr)
4857 u32 si, di, cpycnt, src_prod_idx;
4861 src_prod_idx = spr->rx_std_prod_idx;
4863 /* Make sure updates to the rx_std_buffers[] entries and the
4864 * standard producer index are seen in the correct order.
4868 if (spr->rx_std_cons_idx == src_prod_idx)
4871 if (spr->rx_std_cons_idx < src_prod_idx)
4872 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4874 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4876 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4878 si = spr->rx_std_cons_idx;
4879 di = dpr->rx_std_prod_idx;
4881 for (i = di; i < di + cpycnt; i++) {
4882 if (dpr->rx_std_buffers[i].skb) {
4892 /* Ensure that updates to the rx_std_buffers ring and the
4893 * shadowed hardware producer ring from tg3_recycle_skb() are
4894 * ordered correctly WRT the skb check above.
4898 memcpy(&dpr->rx_std_buffers[di],
4899 &spr->rx_std_buffers[si],
4900 cpycnt * sizeof(struct ring_info));
4902 for (i = 0; i < cpycnt; i++, di++, si++) {
4903 struct tg3_rx_buffer_desc *sbd, *dbd;
4904 sbd = &spr->rx_std[si];
4905 dbd = &dpr->rx_std[di];
4906 dbd->addr_hi = sbd->addr_hi;
4907 dbd->addr_lo = sbd->addr_lo;
4910 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4912 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4917 src_prod_idx = spr->rx_jmb_prod_idx;
4919 /* Make sure updates to the rx_jmb_buffers[] entries and
4920 * the jumbo producer index are seen in the correct order.
4924 if (spr->rx_jmb_cons_idx == src_prod_idx)
4927 if (spr->rx_jmb_cons_idx < src_prod_idx)
4928 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4930 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4932 cpycnt = min(cpycnt,
4933 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4935 si = spr->rx_jmb_cons_idx;
4936 di = dpr->rx_jmb_prod_idx;
4938 for (i = di; i < di + cpycnt; i++) {
4939 if (dpr->rx_jmb_buffers[i].skb) {
4949 /* Ensure that updates to the rx_jmb_buffers ring and the
4950 * shadowed hardware producer ring from tg3_recycle_skb() are
4951 * ordered correctly WRT the skb check above.
4955 memcpy(&dpr->rx_jmb_buffers[di],
4956 &spr->rx_jmb_buffers[si],
4957 cpycnt * sizeof(struct ring_info));
4959 for (i = 0; i < cpycnt; i++, di++, si++) {
4960 struct tg3_rx_buffer_desc *sbd, *dbd;
4961 sbd = &spr->rx_jmb[si].std;
4962 dbd = &dpr->rx_jmb[di].std;
4963 dbd->addr_hi = sbd->addr_hi;
4964 dbd->addr_lo = sbd->addr_lo;
4967 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4968 TG3_RX_JUMBO_RING_SIZE;
4969 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4970 TG3_RX_JUMBO_RING_SIZE;
4976 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4978 struct tg3 *tp = tnapi->tp;
4980 /* run TX completion thread */
4981 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4983 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4987 /* run RX thread, within the bounds set by NAPI.
4988 * All RX "locking" is done by ensuring outside
4989 * code synchronizes with tg3->napi.poll()
4991 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4992 work_done += tg3_rx(tnapi, budget - work_done);
4994 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4995 struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
4997 u32 std_prod_idx = dpr->rx_std_prod_idx;
4998 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5000 for (i = 1; i < tp->irq_cnt; i++)
5001 err |= tg3_rx_prodring_xfer(tp, dpr,
5002 tp->napi[i].prodring);
5006 if (std_prod_idx != dpr->rx_std_prod_idx)
5007 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5008 dpr->rx_std_prod_idx);
5010 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5011 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5012 dpr->rx_jmb_prod_idx);
5017 tw32_f(HOSTCC_MODE, tp->coal_now);
5023 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5025 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5026 struct tg3 *tp = tnapi->tp;
5028 struct tg3_hw_status *sblk = tnapi->hw_status;
5031 work_done = tg3_poll_work(tnapi, work_done, budget);
5033 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5036 if (unlikely(work_done >= budget))
5039 /* tp->last_tag is used in tg3_int_reenable() below
5040 * to tell the hw how much work has been processed,
5041 * so we must read it before checking for more work.
5043 tnapi->last_tag = sblk->status_tag;
5044 tnapi->last_irq_tag = tnapi->last_tag;
5047 /* check for RX/TX work to do */
5048 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5049 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5050 napi_complete(napi);
5051 /* Reenable interrupts. */
5052 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5061 /* work_done is guaranteed to be less than budget. */
5062 napi_complete(napi);
5063 schedule_work(&tp->reset_task);
5067 static int tg3_poll(struct napi_struct *napi, int budget)
5069 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5070 struct tg3 *tp = tnapi->tp;
5072 struct tg3_hw_status *sblk = tnapi->hw_status;
5077 work_done = tg3_poll_work(tnapi, work_done, budget);
5079 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5082 if (unlikely(work_done >= budget))
5085 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5086 /* tp->last_tag is used in tg3_int_reenable() below
5087 * to tell the hw how much work has been processed,
5088 * so we must read it before checking for more work.
5090 tnapi->last_tag = sblk->status_tag;
5091 tnapi->last_irq_tag = tnapi->last_tag;
5094 sblk->status &= ~SD_STATUS_UPDATED;
5096 if (likely(!tg3_has_work(tnapi))) {
5097 napi_complete(napi);
5098 tg3_int_reenable(tnapi);
5106 /* work_done is guaranteed to be less than budget. */
5107 napi_complete(napi);
5108 schedule_work(&tp->reset_task);
5112 static void tg3_irq_quiesce(struct tg3 *tp)
5116 BUG_ON(tp->irq_sync);
5121 for (i = 0; i < tp->irq_cnt; i++)
5122 synchronize_irq(tp->napi[i].irq_vec);
5125 static inline int tg3_irq_sync(struct tg3 *tp)
5127 return tp->irq_sync;
5130 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5131 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5132 * with as well. Most of the time, this is not necessary except when
5133 * shutting down the device.
5135 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5137 spin_lock_bh(&tp->lock);
5139 tg3_irq_quiesce(tp);
5142 static inline void tg3_full_unlock(struct tg3 *tp)
5144 spin_unlock_bh(&tp->lock);
5147 /* One-shot MSI handler - Chip automatically disables interrupt
5148 * after sending MSI so driver doesn't have to do it.
5150 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5152 struct tg3_napi *tnapi = dev_id;
5153 struct tg3 *tp = tnapi->tp;
5155 prefetch(tnapi->hw_status);
5157 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5159 if (likely(!tg3_irq_sync(tp)))
5160 napi_schedule(&tnapi->napi);
5165 /* MSI ISR - No need to check for interrupt sharing and no need to
5166 * flush status block and interrupt mailbox. PCI ordering rules
5167 * guarantee that MSI will arrive after the status block.
5169 static irqreturn_t tg3_msi(int irq, void *dev_id)
5171 struct tg3_napi *tnapi = dev_id;
5172 struct tg3 *tp = tnapi->tp;
5174 prefetch(tnapi->hw_status);
5176 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5178 * Writing any value to intr-mbox-0 clears PCI INTA# and
5179 * chip-internal interrupt pending events.
5180 * Writing non-zero to intr-mbox-0 additional tells the
5181 * NIC to stop sending us irqs, engaging "in-intr-handler"
5184 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5185 if (likely(!tg3_irq_sync(tp)))
5186 napi_schedule(&tnapi->napi);
5188 return IRQ_RETVAL(1);
5191 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5193 struct tg3_napi *tnapi = dev_id;
5194 struct tg3 *tp = tnapi->tp;
5195 struct tg3_hw_status *sblk = tnapi->hw_status;
5196 unsigned int handled = 1;
5198 /* In INTx mode, it is possible for the interrupt to arrive at
5199 * the CPU before the status block posted prior to the interrupt.
5200 * Reading the PCI State register will confirm whether the
5201 * interrupt is ours and will flush the status block.
5203 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5204 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5205 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5212 * Writing any value to intr-mbox-0 clears PCI INTA# and
5213 * chip-internal interrupt pending events.
5214 * Writing non-zero to intr-mbox-0 additional tells the
5215 * NIC to stop sending us irqs, engaging "in-intr-handler"
5218 * Flush the mailbox to de-assert the IRQ immediately to prevent
5219 * spurious interrupts. The flush impacts performance but
5220 * excessive spurious interrupts can be worse in some cases.
5222 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5223 if (tg3_irq_sync(tp))
5225 sblk->status &= ~SD_STATUS_UPDATED;
5226 if (likely(tg3_has_work(tnapi))) {
5227 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5228 napi_schedule(&tnapi->napi);
5230 /* No work, shared interrupt perhaps? re-enable
5231 * interrupts, and flush that PCI write
5233 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5237 return IRQ_RETVAL(handled);
5240 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5242 struct tg3_napi *tnapi = dev_id;
5243 struct tg3 *tp = tnapi->tp;
5244 struct tg3_hw_status *sblk = tnapi->hw_status;
5245 unsigned int handled = 1;
5247 /* In INTx mode, it is possible for the interrupt to arrive at
5248 * the CPU before the status block posted prior to the interrupt.
5249 * Reading the PCI State register will confirm whether the
5250 * interrupt is ours and will flush the status block.
5252 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5253 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5254 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5261 * writing any value to intr-mbox-0 clears PCI INTA# and
5262 * chip-internal interrupt pending events.
5263 * writing non-zero to intr-mbox-0 additional tells the
5264 * NIC to stop sending us irqs, engaging "in-intr-handler"
5267 * Flush the mailbox to de-assert the IRQ immediately to prevent
5268 * spurious interrupts. The flush impacts performance but
5269 * excessive spurious interrupts can be worse in some cases.
5271 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5274 * In a shared interrupt configuration, sometimes other devices'
5275 * interrupts will scream. We record the current status tag here
5276 * so that the above check can report that the screaming interrupts
5277 * are unhandled. Eventually they will be silenced.
5279 tnapi->last_irq_tag = sblk->status_tag;
5281 if (tg3_irq_sync(tp))
5284 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5286 napi_schedule(&tnapi->napi);
5289 return IRQ_RETVAL(handled);
5292 /* ISR for interrupt test */
5293 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5295 struct tg3_napi *tnapi = dev_id;
5296 struct tg3 *tp = tnapi->tp;
5297 struct tg3_hw_status *sblk = tnapi->hw_status;
5299 if ((sblk->status & SD_STATUS_UPDATED) ||
5300 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5301 tg3_disable_ints(tp);
5302 return IRQ_RETVAL(1);
5304 return IRQ_RETVAL(0);
5307 static int tg3_init_hw(struct tg3 *, int);
5308 static int tg3_halt(struct tg3 *, int, int);
5310 /* Restart hardware after configuration changes, self-test, etc.
5311 * Invoked with tp->lock held.
5313 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5314 __releases(tp->lock)
5315 __acquires(tp->lock)
5319 err = tg3_init_hw(tp, reset_phy);
5322 "Failed to re-initialize device, aborting\n");
5323 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5324 tg3_full_unlock(tp);
5325 del_timer_sync(&tp->timer);
5327 tg3_napi_enable(tp);
5329 tg3_full_lock(tp, 0);
5334 #ifdef CONFIG_NET_POLL_CONTROLLER
5335 static void tg3_poll_controller(struct net_device *dev)
5338 struct tg3 *tp = netdev_priv(dev);
5340 for (i = 0; i < tp->irq_cnt; i++)
5341 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5345 static void tg3_reset_task(struct work_struct *work)
5347 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5349 unsigned int restart_timer;
5351 tg3_full_lock(tp, 0);
5353 if (!netif_running(tp->dev)) {
5354 tg3_full_unlock(tp);
5358 tg3_full_unlock(tp);
5364 tg3_full_lock(tp, 1);
5366 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5367 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5369 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5370 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5371 tp->write32_rx_mbox = tg3_write_flush_reg32;
5372 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5373 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5376 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5377 err = tg3_init_hw(tp, 1);
5381 tg3_netif_start(tp);
5384 mod_timer(&tp->timer, jiffies + 1);
5387 tg3_full_unlock(tp);
5393 static void tg3_dump_short_state(struct tg3 *tp)
5395 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5396 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5397 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5398 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5401 static void tg3_tx_timeout(struct net_device *dev)
5403 struct tg3 *tp = netdev_priv(dev);
5405 if (netif_msg_tx_err(tp)) {
5406 netdev_err(dev, "transmit timed out, resetting\n");
5407 tg3_dump_short_state(tp);
5410 schedule_work(&tp->reset_task);
5413 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5414 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5416 u32 base = (u32) mapping & 0xffffffff;
5418 return ((base > 0xffffdcc0) &&
5419 (base + len + 8 < base));
5422 /* Test for DMA addresses > 40-bit */
5423 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5426 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5427 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5428 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5435 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5437 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5438 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5439 struct sk_buff *skb, u32 last_plus_one,
5440 u32 *start, u32 base_flags, u32 mss)
5442 struct tg3 *tp = tnapi->tp;
5443 struct sk_buff *new_skb;
5444 dma_addr_t new_addr = 0;
5448 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5449 new_skb = skb_copy(skb, GFP_ATOMIC);
5451 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5453 new_skb = skb_copy_expand(skb,
5454 skb_headroom(skb) + more_headroom,
5455 skb_tailroom(skb), GFP_ATOMIC);
5461 /* New SKB is guaranteed to be linear. */
5463 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5465 /* Make sure the mapping succeeded */
5466 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5468 dev_kfree_skb(new_skb);
5471 /* Make sure new skb does not cross any 4G boundaries.
5472 * Drop the packet if it does.
5474 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5475 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5476 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5479 dev_kfree_skb(new_skb);
5482 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5483 base_flags, 1 | (mss << 1));
5484 *start = NEXT_TX(entry);
5488 /* Now clean up the sw ring entries. */
5490 while (entry != last_plus_one) {
5494 len = skb_headlen(skb);
5496 len = skb_shinfo(skb)->frags[i-1].size;
5498 pci_unmap_single(tp->pdev,
5499 dma_unmap_addr(&tnapi->tx_buffers[entry],
5501 len, PCI_DMA_TODEVICE);
5503 tnapi->tx_buffers[entry].skb = new_skb;
5504 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5507 tnapi->tx_buffers[entry].skb = NULL;
5509 entry = NEXT_TX(entry);
5518 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5519 dma_addr_t mapping, int len, u32 flags,
5522 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5523 int is_end = (mss_and_is_end & 0x1);
5524 u32 mss = (mss_and_is_end >> 1);
5528 flags |= TXD_FLAG_END;
5529 if (flags & TXD_FLAG_VLAN) {
5530 vlan_tag = flags >> 16;
5533 vlan_tag |= (mss << TXD_MSS_SHIFT);
5535 txd->addr_hi = ((u64) mapping >> 32);
5536 txd->addr_lo = ((u64) mapping & 0xffffffff);
5537 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5538 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5541 /* hard_start_xmit for devices that don't have any bugs and
5542 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5544 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5545 struct net_device *dev)
5547 struct tg3 *tp = netdev_priv(dev);
5548 u32 len, entry, base_flags, mss;
5550 struct tg3_napi *tnapi;
5551 struct netdev_queue *txq;
5552 unsigned int i, last;
5554 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5555 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5556 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5559 /* We are running in BH disabled context with netif_tx_lock
5560 * and TX reclaim runs via tp->napi.poll inside of a software
5561 * interrupt. Furthermore, IRQ processing runs lockless so we have
5562 * no IRQ context deadlocks to worry about either. Rejoice!
5564 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5565 if (!netif_tx_queue_stopped(txq)) {
5566 netif_tx_stop_queue(txq);
5568 /* This is a hard error, log it. */
5570 "BUG! Tx Ring full when queue awake!\n");
5572 return NETDEV_TX_BUSY;
5575 entry = tnapi->tx_prod;
5577 mss = skb_shinfo(skb)->gso_size;
5579 int tcp_opt_len, ip_tcp_len;
5582 if (skb_header_cloned(skb) &&
5583 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5588 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5589 hdrlen = skb_headlen(skb) - ETH_HLEN;
5591 struct iphdr *iph = ip_hdr(skb);
5593 tcp_opt_len = tcp_optlen(skb);
5594 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5597 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5598 hdrlen = ip_tcp_len + tcp_opt_len;
5601 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5602 mss |= (hdrlen & 0xc) << 12;
5604 base_flags |= 0x00000010;
5605 base_flags |= (hdrlen & 0x3e0) << 5;
5609 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5610 TXD_FLAG_CPU_POST_DMA);
5612 tcp_hdr(skb)->check = 0;
5614 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5615 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5618 #if TG3_VLAN_TAG_USED
5619 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5620 base_flags |= (TXD_FLAG_VLAN |
5621 (vlan_tx_tag_get(skb) << 16));
5624 len = skb_headlen(skb);
5626 /* Queue skb data, a.k.a. the main skb fragment. */
5627 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5628 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5633 tnapi->tx_buffers[entry].skb = skb;
5634 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5636 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5637 !mss && skb->len > ETH_DATA_LEN)
5638 base_flags |= TXD_FLAG_JMB_PKT;
5640 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5641 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5643 entry = NEXT_TX(entry);
5645 /* Now loop through additional data fragments, and queue them. */
5646 if (skb_shinfo(skb)->nr_frags > 0) {
5647 last = skb_shinfo(skb)->nr_frags - 1;
5648 for (i = 0; i <= last; i++) {
5649 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5652 mapping = pci_map_page(tp->pdev,
5655 len, PCI_DMA_TODEVICE);
5656 if (pci_dma_mapping_error(tp->pdev, mapping))
5659 tnapi->tx_buffers[entry].skb = NULL;
5660 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5663 tg3_set_txd(tnapi, entry, mapping, len,
5664 base_flags, (i == last) | (mss << 1));
5666 entry = NEXT_TX(entry);
5670 /* Packets are ready, update Tx producer idx local and on card. */
5671 tw32_tx_mbox(tnapi->prodmbox, entry);
5673 tnapi->tx_prod = entry;
5674 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5675 netif_tx_stop_queue(txq);
5676 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5677 netif_tx_wake_queue(txq);
5683 return NETDEV_TX_OK;
5687 entry = tnapi->tx_prod;
5688 tnapi->tx_buffers[entry].skb = NULL;
5689 pci_unmap_single(tp->pdev,
5690 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5693 for (i = 0; i <= last; i++) {
5694 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5695 entry = NEXT_TX(entry);
5697 pci_unmap_page(tp->pdev,
5698 dma_unmap_addr(&tnapi->tx_buffers[entry],
5700 frag->size, PCI_DMA_TODEVICE);
5704 return NETDEV_TX_OK;
5707 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5708 struct net_device *);
5710 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5711 * TSO header is greater than 80 bytes.
5713 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5715 struct sk_buff *segs, *nskb;
5716 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5718 /* Estimate the number of fragments in the worst case */
5719 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5720 netif_stop_queue(tp->dev);
5721 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5722 return NETDEV_TX_BUSY;
5724 netif_wake_queue(tp->dev);
5727 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5729 goto tg3_tso_bug_end;
5735 tg3_start_xmit_dma_bug(nskb, tp->dev);
5741 return NETDEV_TX_OK;
5744 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5745 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5747 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5748 struct net_device *dev)
5750 struct tg3 *tp = netdev_priv(dev);
5751 u32 len, entry, base_flags, mss;
5752 int would_hit_hwbug;
5754 struct tg3_napi *tnapi;
5755 struct netdev_queue *txq;
5756 unsigned int i, last;
5758 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5759 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5760 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5763 /* We are running in BH disabled context with netif_tx_lock
5764 * and TX reclaim runs via tp->napi.poll inside of a software
5765 * interrupt. Furthermore, IRQ processing runs lockless so we have
5766 * no IRQ context deadlocks to worry about either. Rejoice!
5768 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5769 if (!netif_tx_queue_stopped(txq)) {
5770 netif_tx_stop_queue(txq);
5772 /* This is a hard error, log it. */
5774 "BUG! Tx Ring full when queue awake!\n");
5776 return NETDEV_TX_BUSY;
5779 entry = tnapi->tx_prod;
5781 if (skb->ip_summed == CHECKSUM_PARTIAL)
5782 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5784 mss = skb_shinfo(skb)->gso_size;
5787 u32 tcp_opt_len, hdr_len;
5789 if (skb_header_cloned(skb) &&
5790 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5796 tcp_opt_len = tcp_optlen(skb);
5798 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5799 hdr_len = skb_headlen(skb) - ETH_HLEN;
5803 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5804 hdr_len = ip_tcp_len + tcp_opt_len;
5807 iph->tot_len = htons(mss + hdr_len);
5810 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5811 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5812 return tg3_tso_bug(tp, skb);
5814 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5815 TXD_FLAG_CPU_POST_DMA);
5817 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5818 tcp_hdr(skb)->check = 0;
5819 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5821 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5826 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5827 mss |= (hdr_len & 0xc) << 12;
5829 base_flags |= 0x00000010;
5830 base_flags |= (hdr_len & 0x3e0) << 5;
5831 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5832 mss |= hdr_len << 9;
5833 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5834 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5835 if (tcp_opt_len || iph->ihl > 5) {
5838 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5839 mss |= (tsflags << 11);
5842 if (tcp_opt_len || iph->ihl > 5) {
5845 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5846 base_flags |= tsflags << 12;
5850 #if TG3_VLAN_TAG_USED
5851 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5852 base_flags |= (TXD_FLAG_VLAN |
5853 (vlan_tx_tag_get(skb) << 16));
5856 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5857 !mss && skb->len > ETH_DATA_LEN)
5858 base_flags |= TXD_FLAG_JMB_PKT;
5860 len = skb_headlen(skb);
5862 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5863 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5868 tnapi->tx_buffers[entry].skb = skb;
5869 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5871 would_hit_hwbug = 0;
5873 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5874 would_hit_hwbug = 1;
5876 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5877 tg3_4g_overflow_test(mapping, len))
5878 would_hit_hwbug = 1;
5880 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5881 tg3_40bit_overflow_test(tp, mapping, len))
5882 would_hit_hwbug = 1;
5884 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5885 would_hit_hwbug = 1;
5887 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5888 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5890 entry = NEXT_TX(entry);
5892 /* Now loop through additional data fragments, and queue them. */
5893 if (skb_shinfo(skb)->nr_frags > 0) {
5894 last = skb_shinfo(skb)->nr_frags - 1;
5895 for (i = 0; i <= last; i++) {
5896 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5899 mapping = pci_map_page(tp->pdev,
5902 len, PCI_DMA_TODEVICE);
5904 tnapi->tx_buffers[entry].skb = NULL;
5905 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5907 if (pci_dma_mapping_error(tp->pdev, mapping))
5910 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5912 would_hit_hwbug = 1;
5914 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5915 tg3_4g_overflow_test(mapping, len))
5916 would_hit_hwbug = 1;
5918 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5919 tg3_40bit_overflow_test(tp, mapping, len))
5920 would_hit_hwbug = 1;
5922 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5923 tg3_set_txd(tnapi, entry, mapping, len,
5924 base_flags, (i == last)|(mss << 1));
5926 tg3_set_txd(tnapi, entry, mapping, len,
5927 base_flags, (i == last));
5929 entry = NEXT_TX(entry);
5933 if (would_hit_hwbug) {
5934 u32 last_plus_one = entry;
5937 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5938 start &= (TG3_TX_RING_SIZE - 1);
5940 /* If the workaround fails due to memory/mapping
5941 * failure, silently drop this packet.
5943 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5944 &start, base_flags, mss))
5950 /* Packets are ready, update Tx producer idx local and on card. */
5951 tw32_tx_mbox(tnapi->prodmbox, entry);
5953 tnapi->tx_prod = entry;
5954 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5955 netif_tx_stop_queue(txq);
5956 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5957 netif_tx_wake_queue(txq);
5963 return NETDEV_TX_OK;
5967 entry = tnapi->tx_prod;
5968 tnapi->tx_buffers[entry].skb = NULL;
5969 pci_unmap_single(tp->pdev,
5970 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5973 for (i = 0; i <= last; i++) {
5974 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5975 entry = NEXT_TX(entry);
5977 pci_unmap_page(tp->pdev,
5978 dma_unmap_addr(&tnapi->tx_buffers[entry],
5980 frag->size, PCI_DMA_TODEVICE);
5984 return NETDEV_TX_OK;
5987 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5992 if (new_mtu > ETH_DATA_LEN) {
5993 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5994 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5995 ethtool_op_set_tso(dev, 0);
5997 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
6000 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6001 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
6002 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
6006 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6008 struct tg3 *tp = netdev_priv(dev);
6011 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6014 if (!netif_running(dev)) {
6015 /* We'll just catch it later when the
6018 tg3_set_mtu(dev, tp, new_mtu);
6026 tg3_full_lock(tp, 1);
6028 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6030 tg3_set_mtu(dev, tp, new_mtu);
6032 err = tg3_restart_hw(tp, 0);
6035 tg3_netif_start(tp);
6037 tg3_full_unlock(tp);
6045 static void tg3_rx_prodring_free(struct tg3 *tp,
6046 struct tg3_rx_prodring_set *tpr)
6050 if (tpr != &tp->prodring[0]) {
6051 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6052 i = (i + 1) % TG3_RX_RING_SIZE)
6053 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6056 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6057 for (i = tpr->rx_jmb_cons_idx;
6058 i != tpr->rx_jmb_prod_idx;
6059 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
6060 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6068 for (i = 0; i < TG3_RX_RING_SIZE; i++)
6069 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6072 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6073 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
6074 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6079 /* Initialize rx rings for packet processing.
6081 * The chip has been shut down and the driver detached from
6082 * the networking, so no interrupts or new tx packets will
6083 * end up in the driver. tp->{tx,}lock are held and thus
6086 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6087 struct tg3_rx_prodring_set *tpr)
6089 u32 i, rx_pkt_dma_sz;
6091 tpr->rx_std_cons_idx = 0;
6092 tpr->rx_std_prod_idx = 0;
6093 tpr->rx_jmb_cons_idx = 0;
6094 tpr->rx_jmb_prod_idx = 0;
6096 if (tpr != &tp->prodring[0]) {
6097 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
6098 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
6099 memset(&tpr->rx_jmb_buffers[0], 0,
6100 TG3_RX_JMB_BUFF_RING_SIZE);
6104 /* Zero out all descriptors. */
6105 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
6107 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6108 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6109 tp->dev->mtu > ETH_DATA_LEN)
6110 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6111 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6113 /* Initialize invariants of the rings, we only set this
6114 * stuff once. This works because the card does not
6115 * write into the rx buffer posting rings.
6117 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
6118 struct tg3_rx_buffer_desc *rxd;
6120 rxd = &tpr->rx_std[i];
6121 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6122 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6123 rxd->opaque = (RXD_OPAQUE_RING_STD |
6124 (i << RXD_OPAQUE_INDEX_SHIFT));
6127 /* Now allocate fresh SKBs for each rx ring. */
6128 for (i = 0; i < tp->rx_pending; i++) {
6129 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6130 netdev_warn(tp->dev,
6131 "Using a smaller RX standard ring. Only "
6132 "%d out of %d buffers were allocated "
6133 "successfully\n", i, tp->rx_pending);
6141 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
6144 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
6146 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6149 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
6150 struct tg3_rx_buffer_desc *rxd;
6152 rxd = &tpr->rx_jmb[i].std;
6153 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6154 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6156 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6157 (i << RXD_OPAQUE_INDEX_SHIFT));
6160 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6161 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6162 netdev_warn(tp->dev,
6163 "Using a smaller RX jumbo ring. Only %d "
6164 "out of %d buffers were allocated "
6165 "successfully\n", i, tp->rx_jumbo_pending);
6168 tp->rx_jumbo_pending = i;
6177 tg3_rx_prodring_free(tp, tpr);
6181 static void tg3_rx_prodring_fini(struct tg3 *tp,
6182 struct tg3_rx_prodring_set *tpr)
6184 kfree(tpr->rx_std_buffers);
6185 tpr->rx_std_buffers = NULL;
6186 kfree(tpr->rx_jmb_buffers);
6187 tpr->rx_jmb_buffers = NULL;
6189 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
6190 tpr->rx_std, tpr->rx_std_mapping);
6194 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
6195 tpr->rx_jmb, tpr->rx_jmb_mapping);
6200 static int tg3_rx_prodring_init(struct tg3 *tp,
6201 struct tg3_rx_prodring_set *tpr)
6203 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
6204 if (!tpr->rx_std_buffers)
6207 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
6208 &tpr->rx_std_mapping);
6212 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6213 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
6215 if (!tpr->rx_jmb_buffers)
6218 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6219 TG3_RX_JUMBO_RING_BYTES,
6220 &tpr->rx_jmb_mapping);
6228 tg3_rx_prodring_fini(tp, tpr);
6232 /* Free up pending packets in all rx/tx rings.
6234 * The chip has been shut down and the driver detached from
6235 * the networking, so no interrupts or new tx packets will
6236 * end up in the driver. tp->{tx,}lock is not held and we are not
6237 * in an interrupt context and thus may sleep.
6239 static void tg3_free_rings(struct tg3 *tp)
6243 for (j = 0; j < tp->irq_cnt; j++) {
6244 struct tg3_napi *tnapi = &tp->napi[j];
6246 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6248 if (!tnapi->tx_buffers)
6251 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6252 struct ring_info *txp;
6253 struct sk_buff *skb;
6256 txp = &tnapi->tx_buffers[i];
6264 pci_unmap_single(tp->pdev,
6265 dma_unmap_addr(txp, mapping),
6272 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6273 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6274 pci_unmap_page(tp->pdev,
6275 dma_unmap_addr(txp, mapping),
6276 skb_shinfo(skb)->frags[k].size,
6281 dev_kfree_skb_any(skb);
6286 /* Initialize tx/rx rings for packet processing.
6288 * The chip has been shut down and the driver detached from
6289 * the networking, so no interrupts or new tx packets will
6290 * end up in the driver. tp->{tx,}lock are held and thus
6293 static int tg3_init_rings(struct tg3 *tp)
6297 /* Free up all the SKBs. */
6300 for (i = 0; i < tp->irq_cnt; i++) {
6301 struct tg3_napi *tnapi = &tp->napi[i];
6303 tnapi->last_tag = 0;
6304 tnapi->last_irq_tag = 0;
6305 tnapi->hw_status->status = 0;
6306 tnapi->hw_status->status_tag = 0;
6307 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6312 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6314 tnapi->rx_rcb_ptr = 0;
6316 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6318 if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
6328 * Must not be invoked with interrupt sources disabled and
6329 * the hardware shutdown down.
6331 static void tg3_free_consistent(struct tg3 *tp)
6335 for (i = 0; i < tp->irq_cnt; i++) {
6336 struct tg3_napi *tnapi = &tp->napi[i];
6338 if (tnapi->tx_ring) {
6339 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
6340 tnapi->tx_ring, tnapi->tx_desc_mapping);
6341 tnapi->tx_ring = NULL;
6344 kfree(tnapi->tx_buffers);
6345 tnapi->tx_buffers = NULL;
6347 if (tnapi->rx_rcb) {
6348 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
6350 tnapi->rx_rcb_mapping);
6351 tnapi->rx_rcb = NULL;
6354 if (tnapi->hw_status) {
6355 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6357 tnapi->status_mapping);
6358 tnapi->hw_status = NULL;
6363 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
6364 tp->hw_stats, tp->stats_mapping);
6365 tp->hw_stats = NULL;
6368 for (i = 0; i < tp->irq_cnt; i++)
6369 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
6373 * Must not be invoked with interrupt sources disabled and
6374 * the hardware shutdown down. Can sleep.
6376 static int tg3_alloc_consistent(struct tg3 *tp)
6380 for (i = 0; i < tp->irq_cnt; i++) {
6381 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6385 tp->hw_stats = pci_alloc_consistent(tp->pdev,
6386 sizeof(struct tg3_hw_stats),
6387 &tp->stats_mapping);
6391 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6393 for (i = 0; i < tp->irq_cnt; i++) {
6394 struct tg3_napi *tnapi = &tp->napi[i];
6395 struct tg3_hw_status *sblk;
6397 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
6399 &tnapi->status_mapping);
6400 if (!tnapi->hw_status)
6403 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6404 sblk = tnapi->hw_status;
6406 /* If multivector TSS is enabled, vector 0 does not handle
6407 * tx interrupts. Don't allocate any resources for it.
6409 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6410 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6411 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6414 if (!tnapi->tx_buffers)
6417 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6419 &tnapi->tx_desc_mapping);
6420 if (!tnapi->tx_ring)
6425 * When RSS is enabled, the status block format changes
6426 * slightly. The "rx_jumbo_consumer", "reserved",
6427 * and "rx_mini_consumer" members get mapped to the
6428 * other three rx return ring producer indexes.
6432 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6435 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6438 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6441 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6445 tnapi->prodring = &tp->prodring[i];
6448 * If multivector RSS is enabled, vector 0 does not handle
6449 * rx or tx interrupts. Don't allocate any resources for it.
6451 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6454 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
6455 TG3_RX_RCB_RING_BYTES(tp),
6456 &tnapi->rx_rcb_mapping);
6460 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6466 tg3_free_consistent(tp);
6470 #define MAX_WAIT_CNT 1000
6472 /* To stop a block, clear the enable bit and poll till it
6473 * clears. tp->lock is held.
6475 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6480 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6487 /* We can't enable/disable these bits of the
6488 * 5705/5750, just say success.
6501 for (i = 0; i < MAX_WAIT_CNT; i++) {
6504 if ((val & enable_bit) == 0)
6508 if (i == MAX_WAIT_CNT && !silent) {
6509 dev_err(&tp->pdev->dev,
6510 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6518 /* tp->lock is held. */
6519 static int tg3_abort_hw(struct tg3 *tp, int silent)
6523 tg3_disable_ints(tp);
6525 tp->rx_mode &= ~RX_MODE_ENABLE;
6526 tw32_f(MAC_RX_MODE, tp->rx_mode);
6529 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6530 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6531 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6532 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6533 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6534 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6536 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6537 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6538 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6539 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6540 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6541 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6542 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6544 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6545 tw32_f(MAC_MODE, tp->mac_mode);
6548 tp->tx_mode &= ~TX_MODE_ENABLE;
6549 tw32_f(MAC_TX_MODE, tp->tx_mode);
6551 for (i = 0; i < MAX_WAIT_CNT; i++) {
6553 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6556 if (i >= MAX_WAIT_CNT) {
6557 dev_err(&tp->pdev->dev,
6558 "%s timed out, TX_MODE_ENABLE will not clear "
6559 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6563 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6564 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6565 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6567 tw32(FTQ_RESET, 0xffffffff);
6568 tw32(FTQ_RESET, 0x00000000);
6570 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6571 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6573 for (i = 0; i < tp->irq_cnt; i++) {
6574 struct tg3_napi *tnapi = &tp->napi[i];
6575 if (tnapi->hw_status)
6576 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6579 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6584 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6589 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6590 if (apedata != APE_SEG_SIG_MAGIC)
6593 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6594 if (!(apedata & APE_FW_STATUS_READY))
6597 /* Wait for up to 1 millisecond for APE to service previous event. */
6598 for (i = 0; i < 10; i++) {
6599 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6602 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6604 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6605 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6606 event | APE_EVENT_STATUS_EVENT_PENDING);
6608 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6610 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6616 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6617 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6620 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6625 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6629 case RESET_KIND_INIT:
6630 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6631 APE_HOST_SEG_SIG_MAGIC);
6632 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6633 APE_HOST_SEG_LEN_MAGIC);
6634 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6635 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6636 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6637 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6638 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6639 APE_HOST_BEHAV_NO_PHYLOCK);
6641 event = APE_EVENT_STATUS_STATE_START;
6643 case RESET_KIND_SHUTDOWN:
6644 /* With the interface we are currently using,
6645 * APE does not track driver state. Wiping
6646 * out the HOST SEGMENT SIGNATURE forces
6647 * the APE to assume OS absent status.
6649 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6651 event = APE_EVENT_STATUS_STATE_UNLOAD;
6653 case RESET_KIND_SUSPEND:
6654 event = APE_EVENT_STATUS_STATE_SUSPEND;
6660 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6662 tg3_ape_send_event(tp, event);
6665 /* tp->lock is held. */
6666 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6668 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6669 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6671 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6673 case RESET_KIND_INIT:
6674 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6678 case RESET_KIND_SHUTDOWN:
6679 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6683 case RESET_KIND_SUSPEND:
6684 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6693 if (kind == RESET_KIND_INIT ||
6694 kind == RESET_KIND_SUSPEND)
6695 tg3_ape_driver_state_change(tp, kind);
6698 /* tp->lock is held. */
6699 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6701 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6703 case RESET_KIND_INIT:
6704 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6705 DRV_STATE_START_DONE);
6708 case RESET_KIND_SHUTDOWN:
6709 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6710 DRV_STATE_UNLOAD_DONE);
6718 if (kind == RESET_KIND_SHUTDOWN)
6719 tg3_ape_driver_state_change(tp, kind);
6722 /* tp->lock is held. */
6723 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6725 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6727 case RESET_KIND_INIT:
6728 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6732 case RESET_KIND_SHUTDOWN:
6733 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6737 case RESET_KIND_SUSPEND:
6738 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6748 static int tg3_poll_fw(struct tg3 *tp)
6753 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6754 /* Wait up to 20ms for init done. */
6755 for (i = 0; i < 200; i++) {
6756 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6763 /* Wait for firmware initialization to complete. */
6764 for (i = 0; i < 100000; i++) {
6765 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6766 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6771 /* Chip might not be fitted with firmware. Some Sun onboard
6772 * parts are configured like that. So don't signal the timeout
6773 * of the above loop as an error, but do report the lack of
6774 * running firmware once.
6777 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6778 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6780 netdev_info(tp->dev, "No firmware running\n");
6783 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6784 /* The 57765 A0 needs a little more
6785 * time to do some important work.
6793 /* Save PCI command register before chip reset */
6794 static void tg3_save_pci_state(struct tg3 *tp)
6796 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6799 /* Restore PCI state after chip reset */
6800 static void tg3_restore_pci_state(struct tg3 *tp)
6804 /* Re-enable indirect register accesses. */
6805 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6806 tp->misc_host_ctrl);
6808 /* Set MAX PCI retry to zero. */
6809 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6810 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6811 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6812 val |= PCISTATE_RETRY_SAME_DMA;
6813 /* Allow reads and writes to the APE register and memory space. */
6814 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6815 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6816 PCISTATE_ALLOW_APE_SHMEM_WR |
6817 PCISTATE_ALLOW_APE_PSPACE_WR;
6818 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6820 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6822 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6823 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6824 pcie_set_readrq(tp->pdev, 4096);
6826 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6827 tp->pci_cacheline_sz);
6828 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6833 /* Make sure PCI-X relaxed ordering bit is clear. */
6834 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6837 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6839 pcix_cmd &= ~PCI_X_CMD_ERO;
6840 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6844 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6846 /* Chip reset on 5780 will reset MSI enable bit,
6847 * so need to restore it.
6849 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6852 pci_read_config_word(tp->pdev,
6853 tp->msi_cap + PCI_MSI_FLAGS,
6855 pci_write_config_word(tp->pdev,
6856 tp->msi_cap + PCI_MSI_FLAGS,
6857 ctrl | PCI_MSI_FLAGS_ENABLE);
6858 val = tr32(MSGINT_MODE);
6859 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6864 static void tg3_stop_fw(struct tg3 *);
6866 /* tp->lock is held. */
6867 static int tg3_chip_reset(struct tg3 *tp)
6870 void (*write_op)(struct tg3 *, u32, u32);
6875 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6877 /* No matching tg3_nvram_unlock() after this because
6878 * chip reset below will undo the nvram lock.
6880 tp->nvram_lock_cnt = 0;
6882 /* GRC_MISC_CFG core clock reset will clear the memory
6883 * enable bit in PCI register 4 and the MSI enable bit
6884 * on some chips, so we save relevant registers here.
6886 tg3_save_pci_state(tp);
6888 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6889 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6890 tw32(GRC_FASTBOOT_PC, 0);
6893 * We must avoid the readl() that normally takes place.
6894 * It locks machines, causes machine checks, and other
6895 * fun things. So, temporarily disable the 5701
6896 * hardware workaround, while we do the reset.
6898 write_op = tp->write32;
6899 if (write_op == tg3_write_flush_reg32)
6900 tp->write32 = tg3_write32;
6902 /* Prevent the irq handler from reading or writing PCI registers
6903 * during chip reset when the memory enable bit in the PCI command
6904 * register may be cleared. The chip does not generate interrupt
6905 * at this time, but the irq handler may still be called due to irq
6906 * sharing or irqpoll.
6908 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6909 for (i = 0; i < tp->irq_cnt; i++) {
6910 struct tg3_napi *tnapi = &tp->napi[i];
6911 if (tnapi->hw_status) {
6912 tnapi->hw_status->status = 0;
6913 tnapi->hw_status->status_tag = 0;
6915 tnapi->last_tag = 0;
6916 tnapi->last_irq_tag = 0;
6920 for (i = 0; i < tp->irq_cnt; i++)
6921 synchronize_irq(tp->napi[i].irq_vec);
6923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6924 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6925 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6929 val = GRC_MISC_CFG_CORECLK_RESET;
6931 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6932 if (tr32(0x7e2c) == 0x60) {
6935 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6936 tw32(GRC_MISC_CFG, (1 << 29));
6941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6942 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6943 tw32(GRC_VCPU_EXT_CTRL,
6944 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6947 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6948 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6949 tw32(GRC_MISC_CFG, val);
6951 /* restore 5701 hardware bug workaround write method */
6952 tp->write32 = write_op;
6954 /* Unfortunately, we have to delay before the PCI read back.
6955 * Some 575X chips even will not respond to a PCI cfg access
6956 * when the reset command is given to the chip.
6958 * How do these hardware designers expect things to work
6959 * properly if the PCI write is posted for a long period
6960 * of time? It is always necessary to have some method by
6961 * which a register read back can occur to push the write
6962 * out which does the reset.
6964 * For most tg3 variants the trick below was working.
6969 /* Flush PCI posted writes. The normal MMIO registers
6970 * are inaccessible at this time so this is the only
6971 * way to make this reliably (actually, this is no longer
6972 * the case, see above). I tried to use indirect
6973 * register read/write but this upset some 5701 variants.
6975 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6979 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6982 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6986 /* Wait for link training to complete. */
6987 for (i = 0; i < 5000; i++)
6990 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6991 pci_write_config_dword(tp->pdev, 0xc4,
6992 cfg_val | (1 << 15));
6995 /* Clear the "no snoop" and "relaxed ordering" bits. */
6996 pci_read_config_word(tp->pdev,
6997 tp->pcie_cap + PCI_EXP_DEVCTL,
6999 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7000 PCI_EXP_DEVCTL_NOSNOOP_EN);
7002 * Older PCIe devices only support the 128 byte
7003 * MPS setting. Enforce the restriction.
7005 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7006 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7007 pci_write_config_word(tp->pdev,
7008 tp->pcie_cap + PCI_EXP_DEVCTL,
7011 pcie_set_readrq(tp->pdev, 4096);
7013 /* Clear error status */
7014 pci_write_config_word(tp->pdev,
7015 tp->pcie_cap + PCI_EXP_DEVSTA,
7016 PCI_EXP_DEVSTA_CED |
7017 PCI_EXP_DEVSTA_NFED |
7018 PCI_EXP_DEVSTA_FED |
7019 PCI_EXP_DEVSTA_URD);
7022 tg3_restore_pci_state(tp);
7024 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
7027 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7028 val = tr32(MEMARB_MODE);
7029 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7031 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7033 tw32(0x5000, 0x400);
7036 tw32(GRC_MODE, tp->grc_mode);
7038 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7041 tw32(0xc4, val | (1 << 15));
7044 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7046 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7047 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7048 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7049 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7052 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7053 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7054 tw32_f(MAC_MODE, tp->mac_mode);
7055 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7056 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7057 tw32_f(MAC_MODE, tp->mac_mode);
7058 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7059 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
7060 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
7061 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
7062 tw32_f(MAC_MODE, tp->mac_mode);
7064 tw32_f(MAC_MODE, 0);
7067 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7069 err = tg3_poll_fw(tp);
7075 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7076 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7077 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7078 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
7081 tw32(0x7c00, val | (1 << 25));
7084 /* Reprobe ASF enable state. */
7085 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7086 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7087 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7088 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7091 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7092 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7093 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7094 tp->last_event_jiffies = jiffies;
7095 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7096 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7103 /* tp->lock is held. */
7104 static void tg3_stop_fw(struct tg3 *tp)
7106 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7107 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7108 /* Wait for RX cpu to ACK the previous event. */
7109 tg3_wait_for_event_ack(tp);
7111 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7113 tg3_generate_fw_event(tp);
7115 /* Wait for RX cpu to ACK this event. */
7116 tg3_wait_for_event_ack(tp);
7120 /* tp->lock is held. */
7121 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7127 tg3_write_sig_pre_reset(tp, kind);
7129 tg3_abort_hw(tp, silent);
7130 err = tg3_chip_reset(tp);
7132 __tg3_set_mac_addr(tp, 0);
7134 tg3_write_sig_legacy(tp, kind);
7135 tg3_write_sig_post_reset(tp, kind);
7143 #define RX_CPU_SCRATCH_BASE 0x30000
7144 #define RX_CPU_SCRATCH_SIZE 0x04000
7145 #define TX_CPU_SCRATCH_BASE 0x34000
7146 #define TX_CPU_SCRATCH_SIZE 0x04000
7148 /* tp->lock is held. */
7149 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7153 BUG_ON(offset == TX_CPU_BASE &&
7154 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7157 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7159 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7162 if (offset == RX_CPU_BASE) {
7163 for (i = 0; i < 10000; i++) {
7164 tw32(offset + CPU_STATE, 0xffffffff);
7165 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7166 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7170 tw32(offset + CPU_STATE, 0xffffffff);
7171 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7174 for (i = 0; i < 10000; i++) {
7175 tw32(offset + CPU_STATE, 0xffffffff);
7176 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7177 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7183 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7184 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7188 /* Clear firmware's nvram arbitration. */
7189 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7190 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7195 unsigned int fw_base;
7196 unsigned int fw_len;
7197 const __be32 *fw_data;
7200 /* tp->lock is held. */
7201 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7202 int cpu_scratch_size, struct fw_info *info)
7204 int err, lock_err, i;
7205 void (*write_op)(struct tg3 *, u32, u32);
7207 if (cpu_base == TX_CPU_BASE &&
7208 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7210 "%s: Trying to load TX cpu firmware which is 5705\n",
7215 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7216 write_op = tg3_write_mem;
7218 write_op = tg3_write_indirect_reg32;
7220 /* It is possible that bootcode is still loading at this point.
7221 * Get the nvram lock first before halting the cpu.
7223 lock_err = tg3_nvram_lock(tp);
7224 err = tg3_halt_cpu(tp, cpu_base);
7226 tg3_nvram_unlock(tp);
7230 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7231 write_op(tp, cpu_scratch_base + i, 0);
7232 tw32(cpu_base + CPU_STATE, 0xffffffff);
7233 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7234 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7235 write_op(tp, (cpu_scratch_base +
7236 (info->fw_base & 0xffff) +
7238 be32_to_cpu(info->fw_data[i]));
7246 /* tp->lock is held. */
7247 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7249 struct fw_info info;
7250 const __be32 *fw_data;
7253 fw_data = (void *)tp->fw->data;
7255 /* Firmware blob starts with version numbers, followed by
7256 start address and length. We are setting complete length.
7257 length = end_address_of_bss - start_address_of_text.
7258 Remainder is the blob to be loaded contiguously
7259 from start address. */
7261 info.fw_base = be32_to_cpu(fw_data[1]);
7262 info.fw_len = tp->fw->size - 12;
7263 info.fw_data = &fw_data[3];
7265 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7266 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7271 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7272 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7277 /* Now startup only the RX cpu. */
7278 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7279 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7281 for (i = 0; i < 5; i++) {
7282 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7284 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7285 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7286 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7290 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7291 "should be %08x\n", __func__,
7292 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7295 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7296 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7301 /* 5705 needs a special version of the TSO firmware. */
7303 /* tp->lock is held. */
7304 static int tg3_load_tso_firmware(struct tg3 *tp)
7306 struct fw_info info;
7307 const __be32 *fw_data;
7308 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7311 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7314 fw_data = (void *)tp->fw->data;
7316 /* Firmware blob starts with version numbers, followed by
7317 start address and length. We are setting complete length.
7318 length = end_address_of_bss - start_address_of_text.
7319 Remainder is the blob to be loaded contiguously
7320 from start address. */
7322 info.fw_base = be32_to_cpu(fw_data[1]);
7323 cpu_scratch_size = tp->fw_len;
7324 info.fw_len = tp->fw->size - 12;
7325 info.fw_data = &fw_data[3];
7327 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7328 cpu_base = RX_CPU_BASE;
7329 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7331 cpu_base = TX_CPU_BASE;
7332 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7333 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7336 err = tg3_load_firmware_cpu(tp, cpu_base,
7337 cpu_scratch_base, cpu_scratch_size,
7342 /* Now startup the cpu. */
7343 tw32(cpu_base + CPU_STATE, 0xffffffff);
7344 tw32_f(cpu_base + CPU_PC, info.fw_base);
7346 for (i = 0; i < 5; i++) {
7347 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7349 tw32(cpu_base + CPU_STATE, 0xffffffff);
7350 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7351 tw32_f(cpu_base + CPU_PC, info.fw_base);
7356 "%s fails to set CPU PC, is %08x should be %08x\n",
7357 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7360 tw32(cpu_base + CPU_STATE, 0xffffffff);
7361 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7366 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7368 struct tg3 *tp = netdev_priv(dev);
7369 struct sockaddr *addr = p;
7370 int err = 0, skip_mac_1 = 0;
7372 if (!is_valid_ether_addr(addr->sa_data))
7375 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7377 if (!netif_running(dev))
7380 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7381 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7383 addr0_high = tr32(MAC_ADDR_0_HIGH);
7384 addr0_low = tr32(MAC_ADDR_0_LOW);
7385 addr1_high = tr32(MAC_ADDR_1_HIGH);
7386 addr1_low = tr32(MAC_ADDR_1_LOW);
7388 /* Skip MAC addr 1 if ASF is using it. */
7389 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7390 !(addr1_high == 0 && addr1_low == 0))
7393 spin_lock_bh(&tp->lock);
7394 __tg3_set_mac_addr(tp, skip_mac_1);
7395 spin_unlock_bh(&tp->lock);
7400 /* tp->lock is held. */
7401 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7402 dma_addr_t mapping, u32 maxlen_flags,
7406 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7407 ((u64) mapping >> 32));
7409 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7410 ((u64) mapping & 0xffffffff));
7412 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7415 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7417 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7421 static void __tg3_set_rx_mode(struct net_device *);
7422 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7426 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7427 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7428 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7429 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7431 tw32(HOSTCC_TXCOL_TICKS, 0);
7432 tw32(HOSTCC_TXMAX_FRAMES, 0);
7433 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7436 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
7437 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7438 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7439 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7441 tw32(HOSTCC_RXCOL_TICKS, 0);
7442 tw32(HOSTCC_RXMAX_FRAMES, 0);
7443 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7446 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7447 u32 val = ec->stats_block_coalesce_usecs;
7449 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7450 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7452 if (!netif_carrier_ok(tp->dev))
7455 tw32(HOSTCC_STAT_COAL_TICKS, val);
7458 for (i = 0; i < tp->irq_cnt - 1; i++) {
7461 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7462 tw32(reg, ec->rx_coalesce_usecs);
7463 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7464 tw32(reg, ec->rx_max_coalesced_frames);
7465 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7466 tw32(reg, ec->rx_max_coalesced_frames_irq);
7468 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7469 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7470 tw32(reg, ec->tx_coalesce_usecs);
7471 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7472 tw32(reg, ec->tx_max_coalesced_frames);
7473 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7474 tw32(reg, ec->tx_max_coalesced_frames_irq);
7478 for (; i < tp->irq_max - 1; i++) {
7479 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7480 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7481 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7483 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7484 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7485 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7486 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7491 /* tp->lock is held. */
7492 static void tg3_rings_reset(struct tg3 *tp)
7495 u32 stblk, txrcb, rxrcb, limit;
7496 struct tg3_napi *tnapi = &tp->napi[0];
7498 /* Disable all transmit rings but the first. */
7499 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7500 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7501 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7502 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7504 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7506 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7507 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7508 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7509 BDINFO_FLAGS_DISABLED);
7512 /* Disable all receive return rings but the first. */
7513 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7515 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7516 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7517 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7518 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7520 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7522 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7524 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7525 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7526 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7527 BDINFO_FLAGS_DISABLED);
7529 /* Disable interrupts */
7530 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7532 /* Zero mailbox registers. */
7533 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7534 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7535 tp->napi[i].tx_prod = 0;
7536 tp->napi[i].tx_cons = 0;
7537 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7538 tw32_mailbox(tp->napi[i].prodmbox, 0);
7539 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7540 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7542 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7543 tw32_mailbox(tp->napi[0].prodmbox, 0);
7545 tp->napi[0].tx_prod = 0;
7546 tp->napi[0].tx_cons = 0;
7547 tw32_mailbox(tp->napi[0].prodmbox, 0);
7548 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7551 /* Make sure the NIC-based send BD rings are disabled. */
7552 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7553 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7554 for (i = 0; i < 16; i++)
7555 tw32_tx_mbox(mbox + i * 8, 0);
7558 txrcb = NIC_SRAM_SEND_RCB;
7559 rxrcb = NIC_SRAM_RCV_RET_RCB;
7561 /* Clear status block in ram. */
7562 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7564 /* Set status block DMA address */
7565 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7566 ((u64) tnapi->status_mapping >> 32));
7567 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7568 ((u64) tnapi->status_mapping & 0xffffffff));
7570 if (tnapi->tx_ring) {
7571 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7572 (TG3_TX_RING_SIZE <<
7573 BDINFO_FLAGS_MAXLEN_SHIFT),
7574 NIC_SRAM_TX_BUFFER_DESC);
7575 txrcb += TG3_BDINFO_SIZE;
7578 if (tnapi->rx_rcb) {
7579 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7580 (TG3_RX_RCB_RING_SIZE(tp) <<
7581 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7582 rxrcb += TG3_BDINFO_SIZE;
7585 stblk = HOSTCC_STATBLCK_RING1;
7587 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7588 u64 mapping = (u64)tnapi->status_mapping;
7589 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7590 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7592 /* Clear status block in ram. */
7593 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7595 if (tnapi->tx_ring) {
7596 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7597 (TG3_TX_RING_SIZE <<
7598 BDINFO_FLAGS_MAXLEN_SHIFT),
7599 NIC_SRAM_TX_BUFFER_DESC);
7600 txrcb += TG3_BDINFO_SIZE;
7603 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7604 (TG3_RX_RCB_RING_SIZE(tp) <<
7605 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7608 rxrcb += TG3_BDINFO_SIZE;
7612 /* tp->lock is held. */
7613 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7615 u32 val, rdmac_mode;
7617 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
7619 tg3_disable_ints(tp);
7623 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7625 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7626 tg3_abort_hw(tp, 1);
7631 err = tg3_chip_reset(tp);
7635 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7637 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7638 val = tr32(TG3_CPMU_CTRL);
7639 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7640 tw32(TG3_CPMU_CTRL, val);
7642 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7643 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7644 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7645 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7647 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7648 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7649 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7650 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7652 val = tr32(TG3_CPMU_HST_ACC);
7653 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7654 val |= CPMU_HST_ACC_MACCLK_6_25;
7655 tw32(TG3_CPMU_HST_ACC, val);
7658 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7659 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7660 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7661 PCIE_PWR_MGMT_L1_THRESH_4MS;
7662 tw32(PCIE_PWR_MGMT_THRESH, val);
7664 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7665 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7667 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7669 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7670 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7673 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7674 u32 grc_mode = tr32(GRC_MODE);
7676 /* Access the lower 1K of PL PCIE block registers. */
7677 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7678 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7680 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7681 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7682 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7684 tw32(GRC_MODE, grc_mode);
7687 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7688 u32 grc_mode = tr32(GRC_MODE);
7690 /* Access the lower 1K of PL PCIE block registers. */
7691 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7692 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7694 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
7695 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7696 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7698 tw32(GRC_MODE, grc_mode);
7700 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7701 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7702 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7703 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7706 /* This works around an issue with Athlon chipsets on
7707 * B3 tigon3 silicon. This bit has no effect on any
7708 * other revision. But do not set this on PCI Express
7709 * chips and don't even touch the clocks if the CPMU is present.
7711 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7712 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7713 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7714 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7717 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7718 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7719 val = tr32(TG3PCI_PCISTATE);
7720 val |= PCISTATE_RETRY_SAME_DMA;
7721 tw32(TG3PCI_PCISTATE, val);
7724 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7725 /* Allow reads and writes to the
7726 * APE register and memory space.
7728 val = tr32(TG3PCI_PCISTATE);
7729 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7730 PCISTATE_ALLOW_APE_SHMEM_WR |
7731 PCISTATE_ALLOW_APE_PSPACE_WR;
7732 tw32(TG3PCI_PCISTATE, val);
7735 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7736 /* Enable some hw fixes. */
7737 val = tr32(TG3PCI_MSI_DATA);
7738 val |= (1 << 26) | (1 << 28) | (1 << 29);
7739 tw32(TG3PCI_MSI_DATA, val);
7742 /* Descriptor ring init may make accesses to the
7743 * NIC SRAM area to setup the TX descriptors, so we
7744 * can only do this after the hardware has been
7745 * successfully reset.
7747 err = tg3_init_rings(tp);
7751 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7752 val = tr32(TG3PCI_DMA_RW_CTRL) &
7753 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7754 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
7755 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
7756 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7757 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7758 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7759 /* This value is determined during the probe time DMA
7760 * engine test, tg3_test_dma.
7762 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7765 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7766 GRC_MODE_4X_NIC_SEND_RINGS |
7767 GRC_MODE_NO_TX_PHDR_CSUM |
7768 GRC_MODE_NO_RX_PHDR_CSUM);
7769 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7771 /* Pseudo-header checksum is done by hardware logic and not
7772 * the offload processers, so make the chip do the pseudo-
7773 * header checksums on receive. For transmit it is more
7774 * convenient to do the pseudo-header checksum in software
7775 * as Linux does that on transmit for us in all cases.
7777 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7781 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7783 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7784 val = tr32(GRC_MISC_CFG);
7786 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7787 tw32(GRC_MISC_CFG, val);
7789 /* Initialize MBUF/DESC pool. */
7790 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7792 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7793 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7795 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7797 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7798 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7799 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7800 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7803 fw_len = tp->fw_len;
7804 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7805 tw32(BUFMGR_MB_POOL_ADDR,
7806 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7807 tw32(BUFMGR_MB_POOL_SIZE,
7808 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7811 if (tp->dev->mtu <= ETH_DATA_LEN) {
7812 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7813 tp->bufmgr_config.mbuf_read_dma_low_water);
7814 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7815 tp->bufmgr_config.mbuf_mac_rx_low_water);
7816 tw32(BUFMGR_MB_HIGH_WATER,
7817 tp->bufmgr_config.mbuf_high_water);
7819 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7820 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7821 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7822 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7823 tw32(BUFMGR_MB_HIGH_WATER,
7824 tp->bufmgr_config.mbuf_high_water_jumbo);
7826 tw32(BUFMGR_DMA_LOW_WATER,
7827 tp->bufmgr_config.dma_low_water);
7828 tw32(BUFMGR_DMA_HIGH_WATER,
7829 tp->bufmgr_config.dma_high_water);
7831 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7832 for (i = 0; i < 2000; i++) {
7833 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7838 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
7842 /* Setup replenish threshold. */
7843 val = tp->rx_pending / 8;
7846 else if (val > tp->rx_std_max_post)
7847 val = tp->rx_std_max_post;
7848 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7849 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7850 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7852 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7853 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7856 tw32(RCVBDI_STD_THRESH, val);
7858 /* Initialize TG3_BDINFO's at:
7859 * RCVDBDI_STD_BD: standard eth size rx ring
7860 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7861 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7864 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7865 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7866 * ring attribute flags
7867 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7869 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7870 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7872 * The size of each ring is fixed in the firmware, but the location is
7875 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7876 ((u64) tpr->rx_std_mapping >> 32));
7877 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7878 ((u64) tpr->rx_std_mapping & 0xffffffff));
7879 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
7880 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
7881 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7882 NIC_SRAM_RX_BUFFER_DESC);
7884 /* Disable the mini ring */
7885 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7886 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7887 BDINFO_FLAGS_DISABLED);
7889 /* Program the jumbo buffer descriptor ring control
7890 * blocks on those devices that have them.
7892 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7893 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7894 /* Setup replenish threshold. */
7895 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7897 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7898 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7899 ((u64) tpr->rx_jmb_mapping >> 32));
7900 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7901 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7902 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7903 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7904 BDINFO_FLAGS_USE_EXT_RECV);
7905 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
7906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7907 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7908 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7910 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7911 BDINFO_FLAGS_DISABLED);
7914 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7915 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7916 (TG3_RX_STD_DMA_SZ << 2);
7918 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
7920 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7922 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7924 tpr->rx_std_prod_idx = tp->rx_pending;
7925 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7927 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7928 tp->rx_jumbo_pending : 0;
7929 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7931 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7932 tw32(STD_REPLENISH_LWM, 32);
7933 tw32(JMB_REPLENISH_LWM, 16);
7936 tg3_rings_reset(tp);
7938 /* Initialize MAC address and backoff seed. */
7939 __tg3_set_mac_addr(tp, 0);
7941 /* MTU + ethernet header + FCS + optional VLAN tag */
7942 tw32(MAC_RX_MTU_SIZE,
7943 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7945 /* The slot time is changed by tg3_setup_phy if we
7946 * run at gigabit with half duplex.
7948 tw32(MAC_TX_LENGTHS,
7949 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7950 (6 << TX_LENGTHS_IPG_SHIFT) |
7951 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7953 /* Receive rules. */
7954 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7955 tw32(RCVLPC_CONFIG, 0x0181);
7957 /* Calculate RDMAC_MODE setting early, we need it to determine
7958 * the RCVLPC_STATE_ENABLE mask.
7960 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7961 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7962 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7963 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7964 RDMAC_MODE_LNGREAD_ENAB);
7966 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7968 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
7970 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7973 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7974 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7975 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7977 /* If statement applies to 5705 and 5750 PCI devices only */
7978 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7979 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7980 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7981 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7983 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7984 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7985 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7986 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7990 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7991 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7993 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7994 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7996 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
7997 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7999 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8001 /* Receive/send statistics. */
8002 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8003 val = tr32(RCVLPC_STATS_ENABLE);
8004 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8005 tw32(RCVLPC_STATS_ENABLE, val);
8006 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8007 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8008 val = tr32(RCVLPC_STATS_ENABLE);
8009 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8010 tw32(RCVLPC_STATS_ENABLE, val);
8012 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8014 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8015 tw32(SNDDATAI_STATSENAB, 0xffffff);
8016 tw32(SNDDATAI_STATSCTRL,
8017 (SNDDATAI_SCTRL_ENABLE |
8018 SNDDATAI_SCTRL_FASTUPD));
8020 /* Setup host coalescing engine. */
8021 tw32(HOSTCC_MODE, 0);
8022 for (i = 0; i < 2000; i++) {
8023 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8028 __tg3_set_coalesce(tp, &tp->coal);
8030 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8031 /* Status/statistics block address. See tg3_timer,
8032 * the tg3_periodic_fetch_stats call there, and
8033 * tg3_get_stats to see how this works for 5705/5750 chips.
8035 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8036 ((u64) tp->stats_mapping >> 32));
8037 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8038 ((u64) tp->stats_mapping & 0xffffffff));
8039 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8041 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8043 /* Clear statistics and status block memory areas */
8044 for (i = NIC_SRAM_STATS_BLK;
8045 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8047 tg3_write_mem(tp, i, 0);
8052 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8054 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8055 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8056 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8057 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8059 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8060 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
8061 /* reset to prevent losing 1st rx packet intermittently */
8062 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8066 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8067 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8070 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8071 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8072 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8073 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8074 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8075 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8076 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8079 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8080 * If TG3_FLG2_IS_NIC is zero, we should read the
8081 * register to preserve the GPIO settings for LOMs. The GPIOs,
8082 * whether used as inputs or outputs, are set by boot code after
8085 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8088 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8089 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8090 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8093 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8094 GRC_LCLCTRL_GPIO_OUTPUT3;
8096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8097 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8099 tp->grc_local_ctrl &= ~gpio_mask;
8100 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8102 /* GPIO1 must be driven high for eeprom write protect */
8103 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8104 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8105 GRC_LCLCTRL_GPIO_OUTPUT1);
8107 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8110 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
8111 val = tr32(MSGINT_MODE);
8112 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8113 tw32(MSGINT_MODE, val);
8116 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8117 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8121 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8122 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8123 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8124 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8125 WDMAC_MODE_LNGREAD_ENAB);
8127 /* If statement applies to 5705 and 5750 PCI devices only */
8128 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8129 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8131 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8132 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8133 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8135 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8136 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8137 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8138 val |= WDMAC_MODE_RX_ACCEL;
8142 /* Enable host coalescing bug fix */
8143 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8144 val |= WDMAC_MODE_STATUS_TAG_FIX;
8146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8147 val |= WDMAC_MODE_BURST_ALL_DATA;
8149 tw32_f(WDMAC_MODE, val);
8152 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8155 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8158 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8159 pcix_cmd |= PCI_X_CMD_READ_2K;
8160 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8161 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8162 pcix_cmd |= PCI_X_CMD_READ_2K;
8164 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8168 tw32_f(RDMAC_MODE, rdmac_mode);
8171 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8172 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8173 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8175 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8177 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8179 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8181 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8182 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8183 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
8184 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8185 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8186 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8187 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8188 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8189 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8190 tw32(SNDBDI_MODE, val);
8191 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8193 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8194 err = tg3_load_5701_a0_firmware_fix(tp);
8199 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8200 err = tg3_load_tso_firmware(tp);
8205 tp->tx_mode = TX_MODE_ENABLE;
8206 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8207 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8208 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8209 tw32_f(MAC_TX_MODE, tp->tx_mode);
8212 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8213 u32 reg = MAC_RSS_INDIR_TBL_0;
8214 u8 *ent = (u8 *)&val;
8216 /* Setup the indirection table */
8217 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8218 int idx = i % sizeof(val);
8220 ent[idx] = i % (tp->irq_cnt - 1);
8221 if (idx == sizeof(val) - 1) {
8227 /* Setup the "secret" hash key. */
8228 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8229 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8230 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8231 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8232 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8233 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8234 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8235 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8236 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8237 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8240 tp->rx_mode = RX_MODE_ENABLE;
8241 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8242 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8244 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8245 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8246 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8247 RX_MODE_RSS_IPV6_HASH_EN |
8248 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8249 RX_MODE_RSS_IPV4_HASH_EN |
8250 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8252 tw32_f(MAC_RX_MODE, tp->rx_mode);
8255 tw32(MAC_LED_CTRL, tp->led_ctrl);
8257 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8258 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8259 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8262 tw32_f(MAC_RX_MODE, tp->rx_mode);
8265 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8266 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8267 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
8268 /* Set drive transmission level to 1.2V */
8269 /* only if the signal pre-emphasis bit is not set */
8270 val = tr32(MAC_SERDES_CFG);
8273 tw32(MAC_SERDES_CFG, val);
8275 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8276 tw32(MAC_SERDES_CFG, 0x616000);
8279 /* Prevent chip from dropping frames when flow control
8282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8286 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8289 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8290 /* Use hardware link auto-negotiation */
8291 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8294 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
8295 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8298 tmp = tr32(SERDES_RX_CTRL);
8299 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8300 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8301 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8302 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8305 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8306 if (tp->link_config.phy_is_low_power) {
8307 tp->link_config.phy_is_low_power = 0;
8308 tp->link_config.speed = tp->link_config.orig_speed;
8309 tp->link_config.duplex = tp->link_config.orig_duplex;
8310 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8313 err = tg3_setup_phy(tp, 0);
8317 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8318 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
8321 /* Clear CRC stats. */
8322 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8323 tg3_writephy(tp, MII_TG3_TEST1,
8324 tmp | MII_TG3_TEST1_CRC_EN);
8325 tg3_readphy(tp, 0x14, &tmp);
8330 __tg3_set_rx_mode(tp->dev);
8332 /* Initialize receive rules. */
8333 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8334 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8335 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8336 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8338 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8339 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8343 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8347 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8349 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8351 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8353 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8355 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8357 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8359 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8361 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8363 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8365 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8367 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8369 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8371 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8373 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8381 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8382 /* Write our heartbeat update interval to APE. */
8383 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8384 APE_HOST_HEARTBEAT_INT_DISABLE);
8386 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8391 /* Called at device open time to get the chip ready for
8392 * packet processing. Invoked with tp->lock held.
8394 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8396 tg3_switch_clocks(tp);
8398 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8400 return tg3_reset_hw(tp, reset_phy);
8403 #define TG3_STAT_ADD32(PSTAT, REG) \
8404 do { u32 __val = tr32(REG); \
8405 (PSTAT)->low += __val; \
8406 if ((PSTAT)->low < __val) \
8407 (PSTAT)->high += 1; \
8410 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8412 struct tg3_hw_stats *sp = tp->hw_stats;
8414 if (!netif_carrier_ok(tp->dev))
8417 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8418 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8419 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8420 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8421 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8422 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8423 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8424 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8425 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8426 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8427 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8428 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8429 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8431 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8432 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8433 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8434 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8435 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8436 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8437 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8438 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8439 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8440 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8441 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8442 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8443 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8444 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8446 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8447 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8448 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8451 static void tg3_timer(unsigned long __opaque)
8453 struct tg3 *tp = (struct tg3 *) __opaque;
8458 spin_lock(&tp->lock);
8460 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8461 /* All of this garbage is because when using non-tagged
8462 * IRQ status the mailbox/status_block protocol the chip
8463 * uses with the cpu is race prone.
8465 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8466 tw32(GRC_LOCAL_CTRL,
8467 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8469 tw32(HOSTCC_MODE, tp->coalesce_mode |
8470 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8473 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8474 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8475 spin_unlock(&tp->lock);
8476 schedule_work(&tp->reset_task);
8481 /* This part only runs once per second. */
8482 if (!--tp->timer_counter) {
8483 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8484 tg3_periodic_fetch_stats(tp);
8486 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8490 mac_stat = tr32(MAC_STATUS);
8493 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
8494 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8496 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8500 tg3_setup_phy(tp, 0);
8501 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8502 u32 mac_stat = tr32(MAC_STATUS);
8505 if (netif_carrier_ok(tp->dev) &&
8506 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8509 if (!netif_carrier_ok(tp->dev) &&
8510 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8511 MAC_STATUS_SIGNAL_DET))) {
8515 if (!tp->serdes_counter) {
8518 ~MAC_MODE_PORT_MODE_MASK));
8520 tw32_f(MAC_MODE, tp->mac_mode);
8523 tg3_setup_phy(tp, 0);
8525 } else if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
8526 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8527 tg3_serdes_parallel_detect(tp);
8530 tp->timer_counter = tp->timer_multiplier;
8533 /* Heartbeat is only sent once every 2 seconds.
8535 * The heartbeat is to tell the ASF firmware that the host
8536 * driver is still alive. In the event that the OS crashes,
8537 * ASF needs to reset the hardware to free up the FIFO space
8538 * that may be filled with rx packets destined for the host.
8539 * If the FIFO is full, ASF will no longer function properly.
8541 * Unintended resets have been reported on real time kernels
8542 * where the timer doesn't run on time. Netpoll will also have
8545 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8546 * to check the ring condition when the heartbeat is expiring
8547 * before doing the reset. This will prevent most unintended
8550 if (!--tp->asf_counter) {
8551 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8552 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8553 tg3_wait_for_event_ack(tp);
8555 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8556 FWCMD_NICDRV_ALIVE3);
8557 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8558 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8559 TG3_FW_UPDATE_TIMEOUT_SEC);
8561 tg3_generate_fw_event(tp);
8563 tp->asf_counter = tp->asf_multiplier;
8566 spin_unlock(&tp->lock);
8569 tp->timer.expires = jiffies + tp->timer_offset;
8570 add_timer(&tp->timer);
8573 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8576 unsigned long flags;
8578 struct tg3_napi *tnapi = &tp->napi[irq_num];
8580 if (tp->irq_cnt == 1)
8581 name = tp->dev->name;
8583 name = &tnapi->irq_lbl[0];
8584 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8585 name[IFNAMSIZ-1] = 0;
8588 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8590 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8592 flags = IRQF_SAMPLE_RANDOM;
8595 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8596 fn = tg3_interrupt_tagged;
8597 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8600 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8603 static int tg3_test_interrupt(struct tg3 *tp)
8605 struct tg3_napi *tnapi = &tp->napi[0];
8606 struct net_device *dev = tp->dev;
8607 int err, i, intr_ok = 0;
8610 if (!netif_running(dev))
8613 tg3_disable_ints(tp);
8615 free_irq(tnapi->irq_vec, tnapi);
8618 * Turn off MSI one shot mode. Otherwise this test has no
8619 * observable way to know whether the interrupt was delivered.
8621 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8622 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8623 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8624 tw32(MSGINT_MODE, val);
8627 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8628 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8632 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8633 tg3_enable_ints(tp);
8635 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8638 for (i = 0; i < 5; i++) {
8639 u32 int_mbox, misc_host_ctrl;
8641 int_mbox = tr32_mailbox(tnapi->int_mbox);
8642 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8644 if ((int_mbox != 0) ||
8645 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8653 tg3_disable_ints(tp);
8655 free_irq(tnapi->irq_vec, tnapi);
8657 err = tg3_request_irq(tp, 0);
8663 /* Reenable MSI one shot mode. */
8664 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8665 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8666 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8667 tw32(MSGINT_MODE, val);
8675 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8676 * successfully restored
8678 static int tg3_test_msi(struct tg3 *tp)
8683 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8686 /* Turn off SERR reporting in case MSI terminates with Master
8689 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8690 pci_write_config_word(tp->pdev, PCI_COMMAND,
8691 pci_cmd & ~PCI_COMMAND_SERR);
8693 err = tg3_test_interrupt(tp);
8695 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8700 /* other failures */
8704 /* MSI test failed, go back to INTx mode */
8705 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
8706 "to INTx mode. Please report this failure to the PCI "
8707 "maintainer and include system chipset information\n");
8709 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8711 pci_disable_msi(tp->pdev);
8713 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8714 tp->napi[0].irq_vec = tp->pdev->irq;
8716 err = tg3_request_irq(tp, 0);
8720 /* Need to reset the chip because the MSI cycle may have terminated
8721 * with Master Abort.
8723 tg3_full_lock(tp, 1);
8725 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8726 err = tg3_init_hw(tp, 1);
8728 tg3_full_unlock(tp);
8731 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8736 static int tg3_request_firmware(struct tg3 *tp)
8738 const __be32 *fw_data;
8740 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8741 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
8746 fw_data = (void *)tp->fw->data;
8748 /* Firmware blob starts with version numbers, followed by
8749 * start address and _full_ length including BSS sections
8750 * (which must be longer than the actual data, of course
8753 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8754 if (tp->fw_len < (tp->fw->size - 12)) {
8755 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
8756 tp->fw_len, tp->fw_needed);
8757 release_firmware(tp->fw);
8762 /* We no longer need firmware; we have it. */
8763 tp->fw_needed = NULL;
8767 static bool tg3_enable_msix(struct tg3 *tp)
8769 int i, rc, cpus = num_online_cpus();
8770 struct msix_entry msix_ent[tp->irq_max];
8773 /* Just fallback to the simpler MSI mode. */
8777 * We want as many rx rings enabled as there are cpus.
8778 * The first MSIX vector only deals with link interrupts, etc,
8779 * so we add one to the number of vectors we are requesting.
8781 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8783 for (i = 0; i < tp->irq_max; i++) {
8784 msix_ent[i].entry = i;
8785 msix_ent[i].vector = 0;
8788 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8791 } else if (rc != 0) {
8792 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8794 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
8799 for (i = 0; i < tp->irq_max; i++)
8800 tp->napi[i].irq_vec = msix_ent[i].vector;
8802 tp->dev->real_num_tx_queues = 1;
8803 if (tp->irq_cnt > 1) {
8804 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8806 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8807 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8808 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
8809 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8816 static void tg3_ints_init(struct tg3 *tp)
8818 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8819 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8820 /* All MSI supporting chips should support tagged
8821 * status. Assert that this is the case.
8823 netdev_warn(tp->dev,
8824 "MSI without TAGGED_STATUS? Not using MSI\n");
8828 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8829 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8830 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8831 pci_enable_msi(tp->pdev) == 0)
8832 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8834 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8835 u32 msi_mode = tr32(MSGINT_MODE);
8836 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8837 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8838 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8841 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8843 tp->napi[0].irq_vec = tp->pdev->irq;
8844 tp->dev->real_num_tx_queues = 1;
8848 static void tg3_ints_fini(struct tg3 *tp)
8850 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8851 pci_disable_msix(tp->pdev);
8852 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8853 pci_disable_msi(tp->pdev);
8854 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8855 tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
8858 static int tg3_open(struct net_device *dev)
8860 struct tg3 *tp = netdev_priv(dev);
8863 if (tp->fw_needed) {
8864 err = tg3_request_firmware(tp);
8865 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8869 netdev_warn(tp->dev, "TSO capability disabled\n");
8870 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8871 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8872 netdev_notice(tp->dev, "TSO capability restored\n");
8873 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8877 netif_carrier_off(tp->dev);
8879 err = tg3_set_power_state(tp, PCI_D0);
8883 tg3_full_lock(tp, 0);
8885 tg3_disable_ints(tp);
8886 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8888 tg3_full_unlock(tp);
8891 * Setup interrupts first so we know how
8892 * many NAPI resources to allocate
8896 /* The placement of this call is tied
8897 * to the setup and use of Host TX descriptors.
8899 err = tg3_alloc_consistent(tp);
8903 tg3_napi_enable(tp);
8905 for (i = 0; i < tp->irq_cnt; i++) {
8906 struct tg3_napi *tnapi = &tp->napi[i];
8907 err = tg3_request_irq(tp, i);
8909 for (i--; i >= 0; i--)
8910 free_irq(tnapi->irq_vec, tnapi);
8918 tg3_full_lock(tp, 0);
8920 err = tg3_init_hw(tp, 1);
8922 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8925 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8926 tp->timer_offset = HZ;
8928 tp->timer_offset = HZ / 10;
8930 BUG_ON(tp->timer_offset > HZ);
8931 tp->timer_counter = tp->timer_multiplier =
8932 (HZ / tp->timer_offset);
8933 tp->asf_counter = tp->asf_multiplier =
8934 ((HZ / tp->timer_offset) * 2);
8936 init_timer(&tp->timer);
8937 tp->timer.expires = jiffies + tp->timer_offset;
8938 tp->timer.data = (unsigned long) tp;
8939 tp->timer.function = tg3_timer;
8942 tg3_full_unlock(tp);
8947 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8948 err = tg3_test_msi(tp);
8951 tg3_full_lock(tp, 0);
8952 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8954 tg3_full_unlock(tp);
8959 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8960 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8961 u32 val = tr32(PCIE_TRANSACTION_CFG);
8963 tw32(PCIE_TRANSACTION_CFG,
8964 val | PCIE_TRANS_CFG_1SHOT_MSI);
8970 tg3_full_lock(tp, 0);
8972 add_timer(&tp->timer);
8973 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8974 tg3_enable_ints(tp);
8976 tg3_full_unlock(tp);
8978 netif_tx_start_all_queues(dev);
8983 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8984 struct tg3_napi *tnapi = &tp->napi[i];
8985 free_irq(tnapi->irq_vec, tnapi);
8989 tg3_napi_disable(tp);
8990 tg3_free_consistent(tp);
8997 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
8998 struct rtnl_link_stats64 *);
8999 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9001 static int tg3_close(struct net_device *dev)
9004 struct tg3 *tp = netdev_priv(dev);
9006 tg3_napi_disable(tp);
9007 cancel_work_sync(&tp->reset_task);
9009 netif_tx_stop_all_queues(dev);
9011 del_timer_sync(&tp->timer);
9015 tg3_full_lock(tp, 1);
9017 tg3_disable_ints(tp);
9019 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9021 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9023 tg3_full_unlock(tp);
9025 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9026 struct tg3_napi *tnapi = &tp->napi[i];
9027 free_irq(tnapi->irq_vec, tnapi);
9032 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9034 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9035 sizeof(tp->estats_prev));
9037 tg3_free_consistent(tp);
9039 tg3_set_power_state(tp, PCI_D3hot);
9041 netif_carrier_off(tp->dev);
9046 static inline u64 get_stat64(tg3_stat64_t *val)
9048 return ((u64)val->high << 32) | ((u64)val->low);
9051 static u64 calc_crc_errors(struct tg3 *tp)
9053 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9055 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9056 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9060 spin_lock_bh(&tp->lock);
9061 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9062 tg3_writephy(tp, MII_TG3_TEST1,
9063 val | MII_TG3_TEST1_CRC_EN);
9064 tg3_readphy(tp, 0x14, &val);
9067 spin_unlock_bh(&tp->lock);
9069 tp->phy_crc_errors += val;
9071 return tp->phy_crc_errors;
9074 return get_stat64(&hw_stats->rx_fcs_errors);
9077 #define ESTAT_ADD(member) \
9078 estats->member = old_estats->member + \
9079 get_stat64(&hw_stats->member)
9081 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9083 struct tg3_ethtool_stats *estats = &tp->estats;
9084 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9085 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9090 ESTAT_ADD(rx_octets);
9091 ESTAT_ADD(rx_fragments);
9092 ESTAT_ADD(rx_ucast_packets);
9093 ESTAT_ADD(rx_mcast_packets);
9094 ESTAT_ADD(rx_bcast_packets);
9095 ESTAT_ADD(rx_fcs_errors);
9096 ESTAT_ADD(rx_align_errors);
9097 ESTAT_ADD(rx_xon_pause_rcvd);
9098 ESTAT_ADD(rx_xoff_pause_rcvd);
9099 ESTAT_ADD(rx_mac_ctrl_rcvd);
9100 ESTAT_ADD(rx_xoff_entered);
9101 ESTAT_ADD(rx_frame_too_long_errors);
9102 ESTAT_ADD(rx_jabbers);
9103 ESTAT_ADD(rx_undersize_packets);
9104 ESTAT_ADD(rx_in_length_errors);
9105 ESTAT_ADD(rx_out_length_errors);
9106 ESTAT_ADD(rx_64_or_less_octet_packets);
9107 ESTAT_ADD(rx_65_to_127_octet_packets);
9108 ESTAT_ADD(rx_128_to_255_octet_packets);
9109 ESTAT_ADD(rx_256_to_511_octet_packets);
9110 ESTAT_ADD(rx_512_to_1023_octet_packets);
9111 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9112 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9113 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9114 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9115 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9117 ESTAT_ADD(tx_octets);
9118 ESTAT_ADD(tx_collisions);
9119 ESTAT_ADD(tx_xon_sent);
9120 ESTAT_ADD(tx_xoff_sent);
9121 ESTAT_ADD(tx_flow_control);
9122 ESTAT_ADD(tx_mac_errors);
9123 ESTAT_ADD(tx_single_collisions);
9124 ESTAT_ADD(tx_mult_collisions);
9125 ESTAT_ADD(tx_deferred);
9126 ESTAT_ADD(tx_excessive_collisions);
9127 ESTAT_ADD(tx_late_collisions);
9128 ESTAT_ADD(tx_collide_2times);
9129 ESTAT_ADD(tx_collide_3times);
9130 ESTAT_ADD(tx_collide_4times);
9131 ESTAT_ADD(tx_collide_5times);
9132 ESTAT_ADD(tx_collide_6times);
9133 ESTAT_ADD(tx_collide_7times);
9134 ESTAT_ADD(tx_collide_8times);
9135 ESTAT_ADD(tx_collide_9times);
9136 ESTAT_ADD(tx_collide_10times);
9137 ESTAT_ADD(tx_collide_11times);
9138 ESTAT_ADD(tx_collide_12times);
9139 ESTAT_ADD(tx_collide_13times);
9140 ESTAT_ADD(tx_collide_14times);
9141 ESTAT_ADD(tx_collide_15times);
9142 ESTAT_ADD(tx_ucast_packets);
9143 ESTAT_ADD(tx_mcast_packets);
9144 ESTAT_ADD(tx_bcast_packets);
9145 ESTAT_ADD(tx_carrier_sense_errors);
9146 ESTAT_ADD(tx_discards);
9147 ESTAT_ADD(tx_errors);
9149 ESTAT_ADD(dma_writeq_full);
9150 ESTAT_ADD(dma_write_prioq_full);
9151 ESTAT_ADD(rxbds_empty);
9152 ESTAT_ADD(rx_discards);
9153 ESTAT_ADD(rx_errors);
9154 ESTAT_ADD(rx_threshold_hit);
9156 ESTAT_ADD(dma_readq_full);
9157 ESTAT_ADD(dma_read_prioq_full);
9158 ESTAT_ADD(tx_comp_queue_full);
9160 ESTAT_ADD(ring_set_send_prod_index);
9161 ESTAT_ADD(ring_status_update);
9162 ESTAT_ADD(nic_irqs);
9163 ESTAT_ADD(nic_avoided_irqs);
9164 ESTAT_ADD(nic_tx_threshold_hit);
9169 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9170 struct rtnl_link_stats64 *stats)
9172 struct tg3 *tp = netdev_priv(dev);
9173 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9174 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9179 stats->rx_packets = old_stats->rx_packets +
9180 get_stat64(&hw_stats->rx_ucast_packets) +
9181 get_stat64(&hw_stats->rx_mcast_packets) +
9182 get_stat64(&hw_stats->rx_bcast_packets);
9184 stats->tx_packets = old_stats->tx_packets +
9185 get_stat64(&hw_stats->tx_ucast_packets) +
9186 get_stat64(&hw_stats->tx_mcast_packets) +
9187 get_stat64(&hw_stats->tx_bcast_packets);
9189 stats->rx_bytes = old_stats->rx_bytes +
9190 get_stat64(&hw_stats->rx_octets);
9191 stats->tx_bytes = old_stats->tx_bytes +
9192 get_stat64(&hw_stats->tx_octets);
9194 stats->rx_errors = old_stats->rx_errors +
9195 get_stat64(&hw_stats->rx_errors);
9196 stats->tx_errors = old_stats->tx_errors +
9197 get_stat64(&hw_stats->tx_errors) +
9198 get_stat64(&hw_stats->tx_mac_errors) +
9199 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9200 get_stat64(&hw_stats->tx_discards);
9202 stats->multicast = old_stats->multicast +
9203 get_stat64(&hw_stats->rx_mcast_packets);
9204 stats->collisions = old_stats->collisions +
9205 get_stat64(&hw_stats->tx_collisions);
9207 stats->rx_length_errors = old_stats->rx_length_errors +
9208 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9209 get_stat64(&hw_stats->rx_undersize_packets);
9211 stats->rx_over_errors = old_stats->rx_over_errors +
9212 get_stat64(&hw_stats->rxbds_empty);
9213 stats->rx_frame_errors = old_stats->rx_frame_errors +
9214 get_stat64(&hw_stats->rx_align_errors);
9215 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9216 get_stat64(&hw_stats->tx_discards);
9217 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9218 get_stat64(&hw_stats->tx_carrier_sense_errors);
9220 stats->rx_crc_errors = old_stats->rx_crc_errors +
9221 calc_crc_errors(tp);
9223 stats->rx_missed_errors = old_stats->rx_missed_errors +
9224 get_stat64(&hw_stats->rx_discards);
9229 static inline u32 calc_crc(unsigned char *buf, int len)
9237 for (j = 0; j < len; j++) {
9240 for (k = 0; k < 8; k++) {
9253 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9255 /* accept or reject all multicast frames */
9256 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9257 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9258 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9259 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9262 static void __tg3_set_rx_mode(struct net_device *dev)
9264 struct tg3 *tp = netdev_priv(dev);
9267 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9268 RX_MODE_KEEP_VLAN_TAG);
9270 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9273 #if TG3_VLAN_TAG_USED
9275 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9276 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9278 /* By definition, VLAN is disabled always in this
9281 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9282 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9285 if (dev->flags & IFF_PROMISC) {
9286 /* Promiscuous mode. */
9287 rx_mode |= RX_MODE_PROMISC;
9288 } else if (dev->flags & IFF_ALLMULTI) {
9289 /* Accept all multicast. */
9290 tg3_set_multi(tp, 1);
9291 } else if (netdev_mc_empty(dev)) {
9292 /* Reject all multicast. */
9293 tg3_set_multi(tp, 0);
9295 /* Accept one or more multicast(s). */
9296 struct netdev_hw_addr *ha;
9297 u32 mc_filter[4] = { 0, };
9302 netdev_for_each_mc_addr(ha, dev) {
9303 crc = calc_crc(ha->addr, ETH_ALEN);
9305 regidx = (bit & 0x60) >> 5;
9307 mc_filter[regidx] |= (1 << bit);
9310 tw32(MAC_HASH_REG_0, mc_filter[0]);
9311 tw32(MAC_HASH_REG_1, mc_filter[1]);
9312 tw32(MAC_HASH_REG_2, mc_filter[2]);
9313 tw32(MAC_HASH_REG_3, mc_filter[3]);
9316 if (rx_mode != tp->rx_mode) {
9317 tp->rx_mode = rx_mode;
9318 tw32_f(MAC_RX_MODE, rx_mode);
9323 static void tg3_set_rx_mode(struct net_device *dev)
9325 struct tg3 *tp = netdev_priv(dev);
9327 if (!netif_running(dev))
9330 tg3_full_lock(tp, 0);
9331 __tg3_set_rx_mode(dev);
9332 tg3_full_unlock(tp);
9335 #define TG3_REGDUMP_LEN (32 * 1024)
9337 static int tg3_get_regs_len(struct net_device *dev)
9339 return TG3_REGDUMP_LEN;
9342 static void tg3_get_regs(struct net_device *dev,
9343 struct ethtool_regs *regs, void *_p)
9346 struct tg3 *tp = netdev_priv(dev);
9352 memset(p, 0, TG3_REGDUMP_LEN);
9354 if (tp->link_config.phy_is_low_power)
9357 tg3_full_lock(tp, 0);
9359 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9360 #define GET_REG32_LOOP(base, len) \
9361 do { p = (u32 *)(orig_p + (base)); \
9362 for (i = 0; i < len; i += 4) \
9363 __GET_REG32((base) + i); \
9365 #define GET_REG32_1(reg) \
9366 do { p = (u32 *)(orig_p + (reg)); \
9367 __GET_REG32((reg)); \
9370 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9371 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9372 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9373 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9374 GET_REG32_1(SNDDATAC_MODE);
9375 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9376 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9377 GET_REG32_1(SNDBDC_MODE);
9378 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9379 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9380 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9381 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9382 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9383 GET_REG32_1(RCVDCC_MODE);
9384 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9385 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9386 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9387 GET_REG32_1(MBFREE_MODE);
9388 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9389 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9390 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9391 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9392 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9393 GET_REG32_1(RX_CPU_MODE);
9394 GET_REG32_1(RX_CPU_STATE);
9395 GET_REG32_1(RX_CPU_PGMCTR);
9396 GET_REG32_1(RX_CPU_HWBKPT);
9397 GET_REG32_1(TX_CPU_MODE);
9398 GET_REG32_1(TX_CPU_STATE);
9399 GET_REG32_1(TX_CPU_PGMCTR);
9400 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9401 GET_REG32_LOOP(FTQ_RESET, 0x120);
9402 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9403 GET_REG32_1(DMAC_MODE);
9404 GET_REG32_LOOP(GRC_MODE, 0x4c);
9405 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9406 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9409 #undef GET_REG32_LOOP
9412 tg3_full_unlock(tp);
9415 static int tg3_get_eeprom_len(struct net_device *dev)
9417 struct tg3 *tp = netdev_priv(dev);
9419 return tp->nvram_size;
9422 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9424 struct tg3 *tp = netdev_priv(dev);
9427 u32 i, offset, len, b_offset, b_count;
9430 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9433 if (tp->link_config.phy_is_low_power)
9436 offset = eeprom->offset;
9440 eeprom->magic = TG3_EEPROM_MAGIC;
9443 /* adjustments to start on required 4 byte boundary */
9444 b_offset = offset & 3;
9445 b_count = 4 - b_offset;
9446 if (b_count > len) {
9447 /* i.e. offset=1 len=2 */
9450 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9453 memcpy(data, ((char *)&val) + b_offset, b_count);
9456 eeprom->len += b_count;
9459 /* read bytes upto the last 4 byte boundary */
9460 pd = &data[eeprom->len];
9461 for (i = 0; i < (len - (len & 3)); i += 4) {
9462 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9467 memcpy(pd + i, &val, 4);
9472 /* read last bytes not ending on 4 byte boundary */
9473 pd = &data[eeprom->len];
9475 b_offset = offset + len - b_count;
9476 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9479 memcpy(pd, &val, b_count);
9480 eeprom->len += b_count;
9485 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9487 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9489 struct tg3 *tp = netdev_priv(dev);
9491 u32 offset, len, b_offset, odd_len;
9495 if (tp->link_config.phy_is_low_power)
9498 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9499 eeprom->magic != TG3_EEPROM_MAGIC)
9502 offset = eeprom->offset;
9505 if ((b_offset = (offset & 3))) {
9506 /* adjustments to start on required 4 byte boundary */
9507 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9518 /* adjustments to end on required 4 byte boundary */
9520 len = (len + 3) & ~3;
9521 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9527 if (b_offset || odd_len) {
9528 buf = kmalloc(len, GFP_KERNEL);
9532 memcpy(buf, &start, 4);
9534 memcpy(buf+len-4, &end, 4);
9535 memcpy(buf + b_offset, data, eeprom->len);
9538 ret = tg3_nvram_write_block(tp, offset, len, buf);
9546 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9548 struct tg3 *tp = netdev_priv(dev);
9550 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9551 struct phy_device *phydev;
9552 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9554 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9555 return phy_ethtool_gset(phydev, cmd);
9558 cmd->supported = (SUPPORTED_Autoneg);
9560 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9561 cmd->supported |= (SUPPORTED_1000baseT_Half |
9562 SUPPORTED_1000baseT_Full);
9564 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9565 cmd->supported |= (SUPPORTED_100baseT_Half |
9566 SUPPORTED_100baseT_Full |
9567 SUPPORTED_10baseT_Half |
9568 SUPPORTED_10baseT_Full |
9570 cmd->port = PORT_TP;
9572 cmd->supported |= SUPPORTED_FIBRE;
9573 cmd->port = PORT_FIBRE;
9576 cmd->advertising = tp->link_config.advertising;
9577 if (netif_running(dev)) {
9578 cmd->speed = tp->link_config.active_speed;
9579 cmd->duplex = tp->link_config.active_duplex;
9581 cmd->phy_address = tp->phy_addr;
9582 cmd->transceiver = XCVR_INTERNAL;
9583 cmd->autoneg = tp->link_config.autoneg;
9589 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9591 struct tg3 *tp = netdev_priv(dev);
9593 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9594 struct phy_device *phydev;
9595 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9597 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9598 return phy_ethtool_sset(phydev, cmd);
9601 if (cmd->autoneg != AUTONEG_ENABLE &&
9602 cmd->autoneg != AUTONEG_DISABLE)
9605 if (cmd->autoneg == AUTONEG_DISABLE &&
9606 cmd->duplex != DUPLEX_FULL &&
9607 cmd->duplex != DUPLEX_HALF)
9610 if (cmd->autoneg == AUTONEG_ENABLE) {
9611 u32 mask = ADVERTISED_Autoneg |
9613 ADVERTISED_Asym_Pause;
9615 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9616 mask |= ADVERTISED_1000baseT_Half |
9617 ADVERTISED_1000baseT_Full;
9619 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9620 mask |= ADVERTISED_100baseT_Half |
9621 ADVERTISED_100baseT_Full |
9622 ADVERTISED_10baseT_Half |
9623 ADVERTISED_10baseT_Full |
9626 mask |= ADVERTISED_FIBRE;
9628 if (cmd->advertising & ~mask)
9631 mask &= (ADVERTISED_1000baseT_Half |
9632 ADVERTISED_1000baseT_Full |
9633 ADVERTISED_100baseT_Half |
9634 ADVERTISED_100baseT_Full |
9635 ADVERTISED_10baseT_Half |
9636 ADVERTISED_10baseT_Full);
9638 cmd->advertising &= mask;
9640 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9641 if (cmd->speed != SPEED_1000)
9644 if (cmd->duplex != DUPLEX_FULL)
9647 if (cmd->speed != SPEED_100 &&
9648 cmd->speed != SPEED_10)
9653 tg3_full_lock(tp, 0);
9655 tp->link_config.autoneg = cmd->autoneg;
9656 if (cmd->autoneg == AUTONEG_ENABLE) {
9657 tp->link_config.advertising = (cmd->advertising |
9658 ADVERTISED_Autoneg);
9659 tp->link_config.speed = SPEED_INVALID;
9660 tp->link_config.duplex = DUPLEX_INVALID;
9662 tp->link_config.advertising = 0;
9663 tp->link_config.speed = cmd->speed;
9664 tp->link_config.duplex = cmd->duplex;
9667 tp->link_config.orig_speed = tp->link_config.speed;
9668 tp->link_config.orig_duplex = tp->link_config.duplex;
9669 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9671 if (netif_running(dev))
9672 tg3_setup_phy(tp, 1);
9674 tg3_full_unlock(tp);
9679 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9681 struct tg3 *tp = netdev_priv(dev);
9683 strcpy(info->driver, DRV_MODULE_NAME);
9684 strcpy(info->version, DRV_MODULE_VERSION);
9685 strcpy(info->fw_version, tp->fw_ver);
9686 strcpy(info->bus_info, pci_name(tp->pdev));
9689 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9691 struct tg3 *tp = netdev_priv(dev);
9693 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9694 device_can_wakeup(&tp->pdev->dev))
9695 wol->supported = WAKE_MAGIC;
9699 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9700 device_can_wakeup(&tp->pdev->dev))
9701 wol->wolopts = WAKE_MAGIC;
9702 memset(&wol->sopass, 0, sizeof(wol->sopass));
9705 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9707 struct tg3 *tp = netdev_priv(dev);
9708 struct device *dp = &tp->pdev->dev;
9710 if (wol->wolopts & ~WAKE_MAGIC)
9712 if ((wol->wolopts & WAKE_MAGIC) &&
9713 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9716 spin_lock_bh(&tp->lock);
9717 if (wol->wolopts & WAKE_MAGIC) {
9718 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9719 device_set_wakeup_enable(dp, true);
9721 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9722 device_set_wakeup_enable(dp, false);
9724 spin_unlock_bh(&tp->lock);
9729 static u32 tg3_get_msglevel(struct net_device *dev)
9731 struct tg3 *tp = netdev_priv(dev);
9732 return tp->msg_enable;
9735 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9737 struct tg3 *tp = netdev_priv(dev);
9738 tp->msg_enable = value;
9741 static int tg3_set_tso(struct net_device *dev, u32 value)
9743 struct tg3 *tp = netdev_priv(dev);
9745 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9750 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9751 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9752 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9754 dev->features |= NETIF_F_TSO6;
9755 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9756 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9757 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9758 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9759 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9761 dev->features |= NETIF_F_TSO_ECN;
9763 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9765 return ethtool_op_set_tso(dev, value);
9768 static int tg3_nway_reset(struct net_device *dev)
9770 struct tg3 *tp = netdev_priv(dev);
9773 if (!netif_running(dev))
9776 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9779 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9780 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9782 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9786 spin_lock_bh(&tp->lock);
9788 tg3_readphy(tp, MII_BMCR, &bmcr);
9789 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9790 ((bmcr & BMCR_ANENABLE) ||
9791 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9792 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9796 spin_unlock_bh(&tp->lock);
9802 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9804 struct tg3 *tp = netdev_priv(dev);
9806 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9807 ering->rx_mini_max_pending = 0;
9808 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9809 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9811 ering->rx_jumbo_max_pending = 0;
9813 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9815 ering->rx_pending = tp->rx_pending;
9816 ering->rx_mini_pending = 0;
9817 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9818 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9820 ering->rx_jumbo_pending = 0;
9822 ering->tx_pending = tp->napi[0].tx_pending;
9825 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9827 struct tg3 *tp = netdev_priv(dev);
9828 int i, irq_sync = 0, err = 0;
9830 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9831 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9832 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9833 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9834 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9835 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9838 if (netif_running(dev)) {
9844 tg3_full_lock(tp, irq_sync);
9846 tp->rx_pending = ering->rx_pending;
9848 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9849 tp->rx_pending > 63)
9850 tp->rx_pending = 63;
9851 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9853 for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
9854 tp->napi[i].tx_pending = ering->tx_pending;
9856 if (netif_running(dev)) {
9857 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9858 err = tg3_restart_hw(tp, 1);
9860 tg3_netif_start(tp);
9863 tg3_full_unlock(tp);
9865 if (irq_sync && !err)
9871 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9873 struct tg3 *tp = netdev_priv(dev);
9875 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9877 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9878 epause->rx_pause = 1;
9880 epause->rx_pause = 0;
9882 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9883 epause->tx_pause = 1;
9885 epause->tx_pause = 0;
9888 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9890 struct tg3 *tp = netdev_priv(dev);
9893 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9895 struct phy_device *phydev;
9897 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9899 if (!(phydev->supported & SUPPORTED_Pause) ||
9900 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
9901 ((epause->rx_pause && !epause->tx_pause) ||
9902 (!epause->rx_pause && epause->tx_pause))))
9905 tp->link_config.flowctrl = 0;
9906 if (epause->rx_pause) {
9907 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9909 if (epause->tx_pause) {
9910 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9911 newadv = ADVERTISED_Pause;
9913 newadv = ADVERTISED_Pause |
9914 ADVERTISED_Asym_Pause;
9915 } else if (epause->tx_pause) {
9916 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9917 newadv = ADVERTISED_Asym_Pause;
9921 if (epause->autoneg)
9922 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9924 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9926 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9927 u32 oldadv = phydev->advertising &
9928 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
9929 if (oldadv != newadv) {
9930 phydev->advertising &=
9931 ~(ADVERTISED_Pause |
9932 ADVERTISED_Asym_Pause);
9933 phydev->advertising |= newadv;
9934 if (phydev->autoneg) {
9936 * Always renegotiate the link to
9937 * inform our link partner of our
9938 * flow control settings, even if the
9939 * flow control is forced. Let
9940 * tg3_adjust_link() do the final
9941 * flow control setup.
9943 return phy_start_aneg(phydev);
9947 if (!epause->autoneg)
9948 tg3_setup_flow_control(tp, 0, 0);
9950 tp->link_config.orig_advertising &=
9951 ~(ADVERTISED_Pause |
9952 ADVERTISED_Asym_Pause);
9953 tp->link_config.orig_advertising |= newadv;
9958 if (netif_running(dev)) {
9963 tg3_full_lock(tp, irq_sync);
9965 if (epause->autoneg)
9966 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9968 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9969 if (epause->rx_pause)
9970 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9972 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9973 if (epause->tx_pause)
9974 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9976 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9978 if (netif_running(dev)) {
9979 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9980 err = tg3_restart_hw(tp, 1);
9982 tg3_netif_start(tp);
9985 tg3_full_unlock(tp);
9991 static u32 tg3_get_rx_csum(struct net_device *dev)
9993 struct tg3 *tp = netdev_priv(dev);
9994 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9997 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9999 struct tg3 *tp = netdev_priv(dev);
10001 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10007 spin_lock_bh(&tp->lock);
10009 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10011 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10012 spin_unlock_bh(&tp->lock);
10017 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10019 struct tg3 *tp = netdev_priv(dev);
10021 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10027 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10028 ethtool_op_set_tx_ipv6_csum(dev, data);
10030 ethtool_op_set_tx_csum(dev, data);
10035 static int tg3_get_sset_count(struct net_device *dev, int sset)
10039 return TG3_NUM_TEST;
10041 return TG3_NUM_STATS;
10043 return -EOPNOTSUPP;
10047 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10049 switch (stringset) {
10051 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10054 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10057 WARN_ON(1); /* we need a WARN() */
10062 static int tg3_phys_id(struct net_device *dev, u32 data)
10064 struct tg3 *tp = netdev_priv(dev);
10067 if (!netif_running(tp->dev))
10071 data = UINT_MAX / 2;
10073 for (i = 0; i < (data * 2); i++) {
10075 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10076 LED_CTRL_1000MBPS_ON |
10077 LED_CTRL_100MBPS_ON |
10078 LED_CTRL_10MBPS_ON |
10079 LED_CTRL_TRAFFIC_OVERRIDE |
10080 LED_CTRL_TRAFFIC_BLINK |
10081 LED_CTRL_TRAFFIC_LED);
10084 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10085 LED_CTRL_TRAFFIC_OVERRIDE);
10087 if (msleep_interruptible(500))
10090 tw32(MAC_LED_CTRL, tp->led_ctrl);
10094 static void tg3_get_ethtool_stats(struct net_device *dev,
10095 struct ethtool_stats *estats, u64 *tmp_stats)
10097 struct tg3 *tp = netdev_priv(dev);
10098 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10101 #define NVRAM_TEST_SIZE 0x100
10102 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10103 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10104 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10105 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10106 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10108 static int tg3_test_nvram(struct tg3 *tp)
10112 int i, j, k, err = 0, size;
10114 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10117 if (tg3_nvram_read(tp, 0, &magic) != 0)
10120 if (magic == TG3_EEPROM_MAGIC)
10121 size = NVRAM_TEST_SIZE;
10122 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10123 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10124 TG3_EEPROM_SB_FORMAT_1) {
10125 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10126 case TG3_EEPROM_SB_REVISION_0:
10127 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10129 case TG3_EEPROM_SB_REVISION_2:
10130 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10132 case TG3_EEPROM_SB_REVISION_3:
10133 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10140 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10141 size = NVRAM_SELFBOOT_HW_SIZE;
10145 buf = kmalloc(size, GFP_KERNEL);
10150 for (i = 0, j = 0; i < size; i += 4, j++) {
10151 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10158 /* Selfboot format */
10159 magic = be32_to_cpu(buf[0]);
10160 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10161 TG3_EEPROM_MAGIC_FW) {
10162 u8 *buf8 = (u8 *) buf, csum8 = 0;
10164 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10165 TG3_EEPROM_SB_REVISION_2) {
10166 /* For rev 2, the csum doesn't include the MBA. */
10167 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10169 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10172 for (i = 0; i < size; i++)
10185 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10186 TG3_EEPROM_MAGIC_HW) {
10187 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10188 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10189 u8 *buf8 = (u8 *) buf;
10191 /* Separate the parity bits and the data bytes. */
10192 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10193 if ((i == 0) || (i == 8)) {
10197 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10198 parity[k++] = buf8[i] & msk;
10200 } else if (i == 16) {
10204 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10205 parity[k++] = buf8[i] & msk;
10208 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10209 parity[k++] = buf8[i] & msk;
10212 data[j++] = buf8[i];
10216 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10217 u8 hw8 = hweight8(data[i]);
10219 if ((hw8 & 0x1) && parity[i])
10221 else if (!(hw8 & 0x1) && !parity[i])
10228 /* Bootstrap checksum at offset 0x10 */
10229 csum = calc_crc((unsigned char *) buf, 0x10);
10230 if (csum != be32_to_cpu(buf[0x10/4]))
10233 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10234 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10235 if (csum != be32_to_cpu(buf[0xfc/4]))
10245 #define TG3_SERDES_TIMEOUT_SEC 2
10246 #define TG3_COPPER_TIMEOUT_SEC 6
10248 static int tg3_test_link(struct tg3 *tp)
10252 if (!netif_running(tp->dev))
10255 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10256 max = TG3_SERDES_TIMEOUT_SEC;
10258 max = TG3_COPPER_TIMEOUT_SEC;
10260 for (i = 0; i < max; i++) {
10261 if (netif_carrier_ok(tp->dev))
10264 if (msleep_interruptible(1000))
10271 /* Only test the commonly used registers */
10272 static int tg3_test_registers(struct tg3 *tp)
10274 int i, is_5705, is_5750;
10275 u32 offset, read_mask, write_mask, val, save_val, read_val;
10279 #define TG3_FL_5705 0x1
10280 #define TG3_FL_NOT_5705 0x2
10281 #define TG3_FL_NOT_5788 0x4
10282 #define TG3_FL_NOT_5750 0x8
10286 /* MAC Control Registers */
10287 { MAC_MODE, TG3_FL_NOT_5705,
10288 0x00000000, 0x00ef6f8c },
10289 { MAC_MODE, TG3_FL_5705,
10290 0x00000000, 0x01ef6b8c },
10291 { MAC_STATUS, TG3_FL_NOT_5705,
10292 0x03800107, 0x00000000 },
10293 { MAC_STATUS, TG3_FL_5705,
10294 0x03800100, 0x00000000 },
10295 { MAC_ADDR_0_HIGH, 0x0000,
10296 0x00000000, 0x0000ffff },
10297 { MAC_ADDR_0_LOW, 0x0000,
10298 0x00000000, 0xffffffff },
10299 { MAC_RX_MTU_SIZE, 0x0000,
10300 0x00000000, 0x0000ffff },
10301 { MAC_TX_MODE, 0x0000,
10302 0x00000000, 0x00000070 },
10303 { MAC_TX_LENGTHS, 0x0000,
10304 0x00000000, 0x00003fff },
10305 { MAC_RX_MODE, TG3_FL_NOT_5705,
10306 0x00000000, 0x000007fc },
10307 { MAC_RX_MODE, TG3_FL_5705,
10308 0x00000000, 0x000007dc },
10309 { MAC_HASH_REG_0, 0x0000,
10310 0x00000000, 0xffffffff },
10311 { MAC_HASH_REG_1, 0x0000,
10312 0x00000000, 0xffffffff },
10313 { MAC_HASH_REG_2, 0x0000,
10314 0x00000000, 0xffffffff },
10315 { MAC_HASH_REG_3, 0x0000,
10316 0x00000000, 0xffffffff },
10318 /* Receive Data and Receive BD Initiator Control Registers. */
10319 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10320 0x00000000, 0xffffffff },
10321 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10322 0x00000000, 0xffffffff },
10323 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10324 0x00000000, 0x00000003 },
10325 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10326 0x00000000, 0xffffffff },
10327 { RCVDBDI_STD_BD+0, 0x0000,
10328 0x00000000, 0xffffffff },
10329 { RCVDBDI_STD_BD+4, 0x0000,
10330 0x00000000, 0xffffffff },
10331 { RCVDBDI_STD_BD+8, 0x0000,
10332 0x00000000, 0xffff0002 },
10333 { RCVDBDI_STD_BD+0xc, 0x0000,
10334 0x00000000, 0xffffffff },
10336 /* Receive BD Initiator Control Registers. */
10337 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10338 0x00000000, 0xffffffff },
10339 { RCVBDI_STD_THRESH, TG3_FL_5705,
10340 0x00000000, 0x000003ff },
10341 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10342 0x00000000, 0xffffffff },
10344 /* Host Coalescing Control Registers. */
10345 { HOSTCC_MODE, TG3_FL_NOT_5705,
10346 0x00000000, 0x00000004 },
10347 { HOSTCC_MODE, TG3_FL_5705,
10348 0x00000000, 0x000000f6 },
10349 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10350 0x00000000, 0xffffffff },
10351 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10352 0x00000000, 0x000003ff },
10353 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10354 0x00000000, 0xffffffff },
10355 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10356 0x00000000, 0x000003ff },
10357 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10358 0x00000000, 0xffffffff },
10359 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10360 0x00000000, 0x000000ff },
10361 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10362 0x00000000, 0xffffffff },
10363 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10364 0x00000000, 0x000000ff },
10365 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10366 0x00000000, 0xffffffff },
10367 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10368 0x00000000, 0xffffffff },
10369 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10370 0x00000000, 0xffffffff },
10371 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10372 0x00000000, 0x000000ff },
10373 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10374 0x00000000, 0xffffffff },
10375 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10376 0x00000000, 0x000000ff },
10377 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10378 0x00000000, 0xffffffff },
10379 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10380 0x00000000, 0xffffffff },
10381 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10382 0x00000000, 0xffffffff },
10383 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10384 0x00000000, 0xffffffff },
10385 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10386 0x00000000, 0xffffffff },
10387 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10388 0xffffffff, 0x00000000 },
10389 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10390 0xffffffff, 0x00000000 },
10392 /* Buffer Manager Control Registers. */
10393 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10394 0x00000000, 0x007fff80 },
10395 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10396 0x00000000, 0x007fffff },
10397 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10398 0x00000000, 0x0000003f },
10399 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10400 0x00000000, 0x000001ff },
10401 { BUFMGR_MB_HIGH_WATER, 0x0000,
10402 0x00000000, 0x000001ff },
10403 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10404 0xffffffff, 0x00000000 },
10405 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10406 0xffffffff, 0x00000000 },
10408 /* Mailbox Registers */
10409 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10410 0x00000000, 0x000001ff },
10411 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10412 0x00000000, 0x000001ff },
10413 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10414 0x00000000, 0x000007ff },
10415 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10416 0x00000000, 0x000001ff },
10418 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10421 is_5705 = is_5750 = 0;
10422 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10424 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10428 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10429 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10432 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10435 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10436 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10439 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10442 offset = (u32) reg_tbl[i].offset;
10443 read_mask = reg_tbl[i].read_mask;
10444 write_mask = reg_tbl[i].write_mask;
10446 /* Save the original register content */
10447 save_val = tr32(offset);
10449 /* Determine the read-only value. */
10450 read_val = save_val & read_mask;
10452 /* Write zero to the register, then make sure the read-only bits
10453 * are not changed and the read/write bits are all zeros.
10457 val = tr32(offset);
10459 /* Test the read-only and read/write bits. */
10460 if (((val & read_mask) != read_val) || (val & write_mask))
10463 /* Write ones to all the bits defined by RdMask and WrMask, then
10464 * make sure the read-only bits are not changed and the
10465 * read/write bits are all ones.
10467 tw32(offset, read_mask | write_mask);
10469 val = tr32(offset);
10471 /* Test the read-only bits. */
10472 if ((val & read_mask) != read_val)
10475 /* Test the read/write bits. */
10476 if ((val & write_mask) != write_mask)
10479 tw32(offset, save_val);
10485 if (netif_msg_hw(tp))
10486 netdev_err(tp->dev,
10487 "Register test failed at offset %x\n", offset);
10488 tw32(offset, save_val);
10492 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10494 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10498 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10499 for (j = 0; j < len; j += 4) {
10502 tg3_write_mem(tp, offset + j, test_pattern[i]);
10503 tg3_read_mem(tp, offset + j, &val);
10504 if (val != test_pattern[i])
10511 static int tg3_test_memory(struct tg3 *tp)
10513 static struct mem_entry {
10516 } mem_tbl_570x[] = {
10517 { 0x00000000, 0x00b50},
10518 { 0x00002000, 0x1c000},
10519 { 0xffffffff, 0x00000}
10520 }, mem_tbl_5705[] = {
10521 { 0x00000100, 0x0000c},
10522 { 0x00000200, 0x00008},
10523 { 0x00004000, 0x00800},
10524 { 0x00006000, 0x01000},
10525 { 0x00008000, 0x02000},
10526 { 0x00010000, 0x0e000},
10527 { 0xffffffff, 0x00000}
10528 }, mem_tbl_5755[] = {
10529 { 0x00000200, 0x00008},
10530 { 0x00004000, 0x00800},
10531 { 0x00006000, 0x00800},
10532 { 0x00008000, 0x02000},
10533 { 0x00010000, 0x0c000},
10534 { 0xffffffff, 0x00000}
10535 }, mem_tbl_5906[] = {
10536 { 0x00000200, 0x00008},
10537 { 0x00004000, 0x00400},
10538 { 0x00006000, 0x00400},
10539 { 0x00008000, 0x01000},
10540 { 0x00010000, 0x01000},
10541 { 0xffffffff, 0x00000}
10542 }, mem_tbl_5717[] = {
10543 { 0x00000200, 0x00008},
10544 { 0x00010000, 0x0a000},
10545 { 0x00020000, 0x13c00},
10546 { 0xffffffff, 0x00000}
10547 }, mem_tbl_57765[] = {
10548 { 0x00000200, 0x00008},
10549 { 0x00004000, 0x00800},
10550 { 0x00006000, 0x09800},
10551 { 0x00010000, 0x0a000},
10552 { 0xffffffff, 0x00000}
10554 struct mem_entry *mem_tbl;
10558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10559 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
10560 mem_tbl = mem_tbl_5717;
10561 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10562 mem_tbl = mem_tbl_57765;
10563 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10564 mem_tbl = mem_tbl_5755;
10565 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10566 mem_tbl = mem_tbl_5906;
10567 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10568 mem_tbl = mem_tbl_5705;
10570 mem_tbl = mem_tbl_570x;
10572 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10573 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10581 #define TG3_MAC_LOOPBACK 0
10582 #define TG3_PHY_LOOPBACK 1
10584 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10586 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10587 u32 desc_idx, coal_now;
10588 struct sk_buff *skb, *rx_skb;
10591 int num_pkts, tx_len, rx_len, i, err;
10592 struct tg3_rx_buffer_desc *desc;
10593 struct tg3_napi *tnapi, *rnapi;
10594 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10596 tnapi = &tp->napi[0];
10597 rnapi = &tp->napi[0];
10598 if (tp->irq_cnt > 1) {
10599 rnapi = &tp->napi[1];
10600 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
10601 tnapi = &tp->napi[1];
10603 coal_now = tnapi->coal_now | rnapi->coal_now;
10605 if (loopback_mode == TG3_MAC_LOOPBACK) {
10606 /* HW errata - mac loopback fails in some cases on 5780.
10607 * Normal traffic and PHY loopback are not affected by
10610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10613 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10614 MAC_MODE_PORT_INT_LPBACK;
10615 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10616 mac_mode |= MAC_MODE_LINK_POLARITY;
10617 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10618 mac_mode |= MAC_MODE_PORT_MODE_MII;
10620 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10621 tw32(MAC_MODE, mac_mode);
10622 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10625 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10626 tg3_phy_fet_toggle_apd(tp, false);
10627 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10629 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10631 tg3_phy_toggle_automdix(tp, 0);
10633 tg3_writephy(tp, MII_BMCR, val);
10636 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10637 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10638 tg3_writephy(tp, MII_TG3_FET_PTEST,
10639 MII_TG3_FET_PTEST_FRC_TX_LINK |
10640 MII_TG3_FET_PTEST_FRC_TX_LOCK);
10641 /* The write needs to be flushed for the AC131 */
10642 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10643 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
10644 mac_mode |= MAC_MODE_PORT_MODE_MII;
10646 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10648 /* reset to prevent losing 1st rx packet intermittently */
10649 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10650 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10652 tw32_f(MAC_RX_MODE, tp->rx_mode);
10654 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10655 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
10656 if (masked_phy_id == TG3_PHY_ID_BCM5401)
10657 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10658 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
10659 mac_mode |= MAC_MODE_LINK_POLARITY;
10660 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10661 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10663 tw32(MAC_MODE, mac_mode);
10671 skb = netdev_alloc_skb(tp->dev, tx_len);
10675 tx_data = skb_put(skb, tx_len);
10676 memcpy(tx_data, tp->dev->dev_addr, 6);
10677 memset(tx_data + 6, 0x0, 8);
10679 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10681 for (i = 14; i < tx_len; i++)
10682 tx_data[i] = (u8) (i & 0xff);
10684 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10685 if (pci_dma_mapping_error(tp->pdev, map)) {
10686 dev_kfree_skb(skb);
10690 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10695 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10699 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10704 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10705 tr32_mailbox(tnapi->prodmbox);
10709 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10710 for (i = 0; i < 35; i++) {
10711 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10716 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10717 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10718 if ((tx_idx == tnapi->tx_prod) &&
10719 (rx_idx == (rx_start_idx + num_pkts)))
10723 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10724 dev_kfree_skb(skb);
10726 if (tx_idx != tnapi->tx_prod)
10729 if (rx_idx != rx_start_idx + num_pkts)
10732 desc = &rnapi->rx_rcb[rx_start_idx];
10733 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10734 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10735 if (opaque_key != RXD_OPAQUE_RING_STD)
10738 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10739 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10742 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10743 if (rx_len != tx_len)
10746 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10748 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10749 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10751 for (i = 14; i < tx_len; i++) {
10752 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10757 /* tg3_free_rings will unmap and free the rx_skb */
10762 #define TG3_MAC_LOOPBACK_FAILED 1
10763 #define TG3_PHY_LOOPBACK_FAILED 2
10764 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10765 TG3_PHY_LOOPBACK_FAILED)
10767 static int tg3_test_loopback(struct tg3 *tp)
10772 if (!netif_running(tp->dev))
10773 return TG3_LOOPBACK_FAILED;
10775 err = tg3_reset_hw(tp, 1);
10777 return TG3_LOOPBACK_FAILED;
10779 /* Turn off gphy autopowerdown. */
10780 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10781 tg3_phy_toggle_apd(tp, false);
10783 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10787 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10789 /* Wait for up to 40 microseconds to acquire lock. */
10790 for (i = 0; i < 4; i++) {
10791 status = tr32(TG3_CPMU_MUTEX_GNT);
10792 if (status == CPMU_MUTEX_GNT_DRIVER)
10797 if (status != CPMU_MUTEX_GNT_DRIVER)
10798 return TG3_LOOPBACK_FAILED;
10800 /* Turn off link-based power management. */
10801 cpmuctrl = tr32(TG3_CPMU_CTRL);
10802 tw32(TG3_CPMU_CTRL,
10803 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10804 CPMU_CTRL_LINK_AWARE_MODE));
10807 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10808 err |= TG3_MAC_LOOPBACK_FAILED;
10810 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10811 tw32(TG3_CPMU_CTRL, cpmuctrl);
10813 /* Release the mutex */
10814 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10817 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10818 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10819 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10820 err |= TG3_PHY_LOOPBACK_FAILED;
10823 /* Re-enable gphy autopowerdown. */
10824 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10825 tg3_phy_toggle_apd(tp, true);
10830 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10833 struct tg3 *tp = netdev_priv(dev);
10835 if (tp->link_config.phy_is_low_power)
10836 tg3_set_power_state(tp, PCI_D0);
10838 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10840 if (tg3_test_nvram(tp) != 0) {
10841 etest->flags |= ETH_TEST_FL_FAILED;
10844 if (tg3_test_link(tp) != 0) {
10845 etest->flags |= ETH_TEST_FL_FAILED;
10848 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10849 int err, err2 = 0, irq_sync = 0;
10851 if (netif_running(dev)) {
10853 tg3_netif_stop(tp);
10857 tg3_full_lock(tp, irq_sync);
10859 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10860 err = tg3_nvram_lock(tp);
10861 tg3_halt_cpu(tp, RX_CPU_BASE);
10862 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10863 tg3_halt_cpu(tp, TX_CPU_BASE);
10865 tg3_nvram_unlock(tp);
10867 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10870 if (tg3_test_registers(tp) != 0) {
10871 etest->flags |= ETH_TEST_FL_FAILED;
10874 if (tg3_test_memory(tp) != 0) {
10875 etest->flags |= ETH_TEST_FL_FAILED;
10878 if ((data[4] = tg3_test_loopback(tp)) != 0)
10879 etest->flags |= ETH_TEST_FL_FAILED;
10881 tg3_full_unlock(tp);
10883 if (tg3_test_interrupt(tp) != 0) {
10884 etest->flags |= ETH_TEST_FL_FAILED;
10888 tg3_full_lock(tp, 0);
10890 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10891 if (netif_running(dev)) {
10892 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10893 err2 = tg3_restart_hw(tp, 1);
10895 tg3_netif_start(tp);
10898 tg3_full_unlock(tp);
10900 if (irq_sync && !err2)
10903 if (tp->link_config.phy_is_low_power)
10904 tg3_set_power_state(tp, PCI_D3hot);
10908 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10910 struct mii_ioctl_data *data = if_mii(ifr);
10911 struct tg3 *tp = netdev_priv(dev);
10914 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10915 struct phy_device *phydev;
10916 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10918 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10919 return phy_mii_ioctl(phydev, ifr, cmd);
10924 data->phy_id = tp->phy_addr;
10927 case SIOCGMIIREG: {
10930 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10931 break; /* We have no PHY */
10933 if (tp->link_config.phy_is_low_power)
10936 spin_lock_bh(&tp->lock);
10937 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10938 spin_unlock_bh(&tp->lock);
10940 data->val_out = mii_regval;
10946 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10947 break; /* We have no PHY */
10949 if (tp->link_config.phy_is_low_power)
10952 spin_lock_bh(&tp->lock);
10953 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10954 spin_unlock_bh(&tp->lock);
10962 return -EOPNOTSUPP;
10965 #if TG3_VLAN_TAG_USED
10966 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10968 struct tg3 *tp = netdev_priv(dev);
10970 if (!netif_running(dev)) {
10975 tg3_netif_stop(tp);
10977 tg3_full_lock(tp, 0);
10981 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10982 __tg3_set_rx_mode(dev);
10984 tg3_netif_start(tp);
10986 tg3_full_unlock(tp);
10990 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10992 struct tg3 *tp = netdev_priv(dev);
10994 memcpy(ec, &tp->coal, sizeof(*ec));
10998 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11000 struct tg3 *tp = netdev_priv(dev);
11001 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11002 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11004 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11005 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11006 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11007 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11008 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11011 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11012 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11013 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11014 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11015 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11016 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11017 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11018 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11019 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11020 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11023 /* No rx interrupts will be generated if both are zero */
11024 if ((ec->rx_coalesce_usecs == 0) &&
11025 (ec->rx_max_coalesced_frames == 0))
11028 /* No tx interrupts will be generated if both are zero */
11029 if ((ec->tx_coalesce_usecs == 0) &&
11030 (ec->tx_max_coalesced_frames == 0))
11033 /* Only copy relevant parameters, ignore all others. */
11034 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11035 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11036 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11037 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11038 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11039 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11040 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11041 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11042 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11044 if (netif_running(dev)) {
11045 tg3_full_lock(tp, 0);
11046 __tg3_set_coalesce(tp, &tp->coal);
11047 tg3_full_unlock(tp);
11052 static const struct ethtool_ops tg3_ethtool_ops = {
11053 .get_settings = tg3_get_settings,
11054 .set_settings = tg3_set_settings,
11055 .get_drvinfo = tg3_get_drvinfo,
11056 .get_regs_len = tg3_get_regs_len,
11057 .get_regs = tg3_get_regs,
11058 .get_wol = tg3_get_wol,
11059 .set_wol = tg3_set_wol,
11060 .get_msglevel = tg3_get_msglevel,
11061 .set_msglevel = tg3_set_msglevel,
11062 .nway_reset = tg3_nway_reset,
11063 .get_link = ethtool_op_get_link,
11064 .get_eeprom_len = tg3_get_eeprom_len,
11065 .get_eeprom = tg3_get_eeprom,
11066 .set_eeprom = tg3_set_eeprom,
11067 .get_ringparam = tg3_get_ringparam,
11068 .set_ringparam = tg3_set_ringparam,
11069 .get_pauseparam = tg3_get_pauseparam,
11070 .set_pauseparam = tg3_set_pauseparam,
11071 .get_rx_csum = tg3_get_rx_csum,
11072 .set_rx_csum = tg3_set_rx_csum,
11073 .set_tx_csum = tg3_set_tx_csum,
11074 .set_sg = ethtool_op_set_sg,
11075 .set_tso = tg3_set_tso,
11076 .self_test = tg3_self_test,
11077 .get_strings = tg3_get_strings,
11078 .phys_id = tg3_phys_id,
11079 .get_ethtool_stats = tg3_get_ethtool_stats,
11080 .get_coalesce = tg3_get_coalesce,
11081 .set_coalesce = tg3_set_coalesce,
11082 .get_sset_count = tg3_get_sset_count,
11085 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11087 u32 cursize, val, magic;
11089 tp->nvram_size = EEPROM_CHIP_SIZE;
11091 if (tg3_nvram_read(tp, 0, &magic) != 0)
11094 if ((magic != TG3_EEPROM_MAGIC) &&
11095 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11096 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11100 * Size the chip by reading offsets at increasing powers of two.
11101 * When we encounter our validation signature, we know the addressing
11102 * has wrapped around, and thus have our chip size.
11106 while (cursize < tp->nvram_size) {
11107 if (tg3_nvram_read(tp, cursize, &val) != 0)
11116 tp->nvram_size = cursize;
11119 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11123 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11124 tg3_nvram_read(tp, 0, &val) != 0)
11127 /* Selfboot format */
11128 if (val != TG3_EEPROM_MAGIC) {
11129 tg3_get_eeprom_size(tp);
11133 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11135 /* This is confusing. We want to operate on the
11136 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11137 * call will read from NVRAM and byteswap the data
11138 * according to the byteswapping settings for all
11139 * other register accesses. This ensures the data we
11140 * want will always reside in the lower 16-bits.
11141 * However, the data in NVRAM is in LE format, which
11142 * means the data from the NVRAM read will always be
11143 * opposite the endianness of the CPU. The 16-bit
11144 * byteswap then brings the data to CPU endianness.
11146 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11150 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11153 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11157 nvcfg1 = tr32(NVRAM_CFG1);
11158 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11159 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11161 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11162 tw32(NVRAM_CFG1, nvcfg1);
11165 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11166 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11167 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11168 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11169 tp->nvram_jedecnum = JEDEC_ATMEL;
11170 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11171 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11173 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11174 tp->nvram_jedecnum = JEDEC_ATMEL;
11175 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11177 case FLASH_VENDOR_ATMEL_EEPROM:
11178 tp->nvram_jedecnum = JEDEC_ATMEL;
11179 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11180 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11182 case FLASH_VENDOR_ST:
11183 tp->nvram_jedecnum = JEDEC_ST;
11184 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11185 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11187 case FLASH_VENDOR_SAIFUN:
11188 tp->nvram_jedecnum = JEDEC_SAIFUN;
11189 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11191 case FLASH_VENDOR_SST_SMALL:
11192 case FLASH_VENDOR_SST_LARGE:
11193 tp->nvram_jedecnum = JEDEC_SST;
11194 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11198 tp->nvram_jedecnum = JEDEC_ATMEL;
11199 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11200 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11204 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11206 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11207 case FLASH_5752PAGE_SIZE_256:
11208 tp->nvram_pagesize = 256;
11210 case FLASH_5752PAGE_SIZE_512:
11211 tp->nvram_pagesize = 512;
11213 case FLASH_5752PAGE_SIZE_1K:
11214 tp->nvram_pagesize = 1024;
11216 case FLASH_5752PAGE_SIZE_2K:
11217 tp->nvram_pagesize = 2048;
11219 case FLASH_5752PAGE_SIZE_4K:
11220 tp->nvram_pagesize = 4096;
11222 case FLASH_5752PAGE_SIZE_264:
11223 tp->nvram_pagesize = 264;
11225 case FLASH_5752PAGE_SIZE_528:
11226 tp->nvram_pagesize = 528;
11231 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11235 nvcfg1 = tr32(NVRAM_CFG1);
11237 /* NVRAM protection for TPM */
11238 if (nvcfg1 & (1 << 27))
11239 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11241 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11242 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11243 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11244 tp->nvram_jedecnum = JEDEC_ATMEL;
11245 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11247 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11248 tp->nvram_jedecnum = JEDEC_ATMEL;
11249 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11250 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11252 case FLASH_5752VENDOR_ST_M45PE10:
11253 case FLASH_5752VENDOR_ST_M45PE20:
11254 case FLASH_5752VENDOR_ST_M45PE40:
11255 tp->nvram_jedecnum = JEDEC_ST;
11256 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11257 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11261 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11262 tg3_nvram_get_pagesize(tp, nvcfg1);
11264 /* For eeprom, set pagesize to maximum eeprom size */
11265 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11267 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11268 tw32(NVRAM_CFG1, nvcfg1);
11272 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11274 u32 nvcfg1, protect = 0;
11276 nvcfg1 = tr32(NVRAM_CFG1);
11278 /* NVRAM protection for TPM */
11279 if (nvcfg1 & (1 << 27)) {
11280 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11284 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11286 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11287 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11288 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11289 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11290 tp->nvram_jedecnum = JEDEC_ATMEL;
11291 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11292 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11293 tp->nvram_pagesize = 264;
11294 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11295 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11296 tp->nvram_size = (protect ? 0x3e200 :
11297 TG3_NVRAM_SIZE_512KB);
11298 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11299 tp->nvram_size = (protect ? 0x1f200 :
11300 TG3_NVRAM_SIZE_256KB);
11302 tp->nvram_size = (protect ? 0x1f200 :
11303 TG3_NVRAM_SIZE_128KB);
11305 case FLASH_5752VENDOR_ST_M45PE10:
11306 case FLASH_5752VENDOR_ST_M45PE20:
11307 case FLASH_5752VENDOR_ST_M45PE40:
11308 tp->nvram_jedecnum = JEDEC_ST;
11309 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11310 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11311 tp->nvram_pagesize = 256;
11312 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11313 tp->nvram_size = (protect ?
11314 TG3_NVRAM_SIZE_64KB :
11315 TG3_NVRAM_SIZE_128KB);
11316 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11317 tp->nvram_size = (protect ?
11318 TG3_NVRAM_SIZE_64KB :
11319 TG3_NVRAM_SIZE_256KB);
11321 tp->nvram_size = (protect ?
11322 TG3_NVRAM_SIZE_128KB :
11323 TG3_NVRAM_SIZE_512KB);
11328 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11332 nvcfg1 = tr32(NVRAM_CFG1);
11334 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11335 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11336 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11337 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11338 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11339 tp->nvram_jedecnum = JEDEC_ATMEL;
11340 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11341 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11343 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11344 tw32(NVRAM_CFG1, nvcfg1);
11346 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11347 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11348 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11349 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11350 tp->nvram_jedecnum = JEDEC_ATMEL;
11351 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11352 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11353 tp->nvram_pagesize = 264;
11355 case FLASH_5752VENDOR_ST_M45PE10:
11356 case FLASH_5752VENDOR_ST_M45PE20:
11357 case FLASH_5752VENDOR_ST_M45PE40:
11358 tp->nvram_jedecnum = JEDEC_ST;
11359 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11360 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11361 tp->nvram_pagesize = 256;
11366 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11368 u32 nvcfg1, protect = 0;
11370 nvcfg1 = tr32(NVRAM_CFG1);
11372 /* NVRAM protection for TPM */
11373 if (nvcfg1 & (1 << 27)) {
11374 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11378 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11380 case FLASH_5761VENDOR_ATMEL_ADB021D:
11381 case FLASH_5761VENDOR_ATMEL_ADB041D:
11382 case FLASH_5761VENDOR_ATMEL_ADB081D:
11383 case FLASH_5761VENDOR_ATMEL_ADB161D:
11384 case FLASH_5761VENDOR_ATMEL_MDB021D:
11385 case FLASH_5761VENDOR_ATMEL_MDB041D:
11386 case FLASH_5761VENDOR_ATMEL_MDB081D:
11387 case FLASH_5761VENDOR_ATMEL_MDB161D:
11388 tp->nvram_jedecnum = JEDEC_ATMEL;
11389 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11390 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11391 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11392 tp->nvram_pagesize = 256;
11394 case FLASH_5761VENDOR_ST_A_M45PE20:
11395 case FLASH_5761VENDOR_ST_A_M45PE40:
11396 case FLASH_5761VENDOR_ST_A_M45PE80:
11397 case FLASH_5761VENDOR_ST_A_M45PE16:
11398 case FLASH_5761VENDOR_ST_M_M45PE20:
11399 case FLASH_5761VENDOR_ST_M_M45PE40:
11400 case FLASH_5761VENDOR_ST_M_M45PE80:
11401 case FLASH_5761VENDOR_ST_M_M45PE16:
11402 tp->nvram_jedecnum = JEDEC_ST;
11403 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11404 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11405 tp->nvram_pagesize = 256;
11410 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11413 case FLASH_5761VENDOR_ATMEL_ADB161D:
11414 case FLASH_5761VENDOR_ATMEL_MDB161D:
11415 case FLASH_5761VENDOR_ST_A_M45PE16:
11416 case FLASH_5761VENDOR_ST_M_M45PE16:
11417 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11419 case FLASH_5761VENDOR_ATMEL_ADB081D:
11420 case FLASH_5761VENDOR_ATMEL_MDB081D:
11421 case FLASH_5761VENDOR_ST_A_M45PE80:
11422 case FLASH_5761VENDOR_ST_M_M45PE80:
11423 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11425 case FLASH_5761VENDOR_ATMEL_ADB041D:
11426 case FLASH_5761VENDOR_ATMEL_MDB041D:
11427 case FLASH_5761VENDOR_ST_A_M45PE40:
11428 case FLASH_5761VENDOR_ST_M_M45PE40:
11429 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11431 case FLASH_5761VENDOR_ATMEL_ADB021D:
11432 case FLASH_5761VENDOR_ATMEL_MDB021D:
11433 case FLASH_5761VENDOR_ST_A_M45PE20:
11434 case FLASH_5761VENDOR_ST_M_M45PE20:
11435 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11441 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11443 tp->nvram_jedecnum = JEDEC_ATMEL;
11444 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11445 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11448 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11452 nvcfg1 = tr32(NVRAM_CFG1);
11454 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11455 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11456 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11457 tp->nvram_jedecnum = JEDEC_ATMEL;
11458 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11459 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11461 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11462 tw32(NVRAM_CFG1, nvcfg1);
11464 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11465 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11466 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11467 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11468 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11469 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11470 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11471 tp->nvram_jedecnum = JEDEC_ATMEL;
11472 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11473 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11475 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11476 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11477 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11478 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11479 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11481 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11482 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11483 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11485 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11486 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11487 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11491 case FLASH_5752VENDOR_ST_M45PE10:
11492 case FLASH_5752VENDOR_ST_M45PE20:
11493 case FLASH_5752VENDOR_ST_M45PE40:
11494 tp->nvram_jedecnum = JEDEC_ST;
11495 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11496 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11498 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11499 case FLASH_5752VENDOR_ST_M45PE10:
11500 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11502 case FLASH_5752VENDOR_ST_M45PE20:
11503 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11505 case FLASH_5752VENDOR_ST_M45PE40:
11506 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11511 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11515 tg3_nvram_get_pagesize(tp, nvcfg1);
11516 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11517 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11521 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11525 nvcfg1 = tr32(NVRAM_CFG1);
11527 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11528 case FLASH_5717VENDOR_ATMEL_EEPROM:
11529 case FLASH_5717VENDOR_MICRO_EEPROM:
11530 tp->nvram_jedecnum = JEDEC_ATMEL;
11531 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11532 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11534 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11535 tw32(NVRAM_CFG1, nvcfg1);
11537 case FLASH_5717VENDOR_ATMEL_MDB011D:
11538 case FLASH_5717VENDOR_ATMEL_ADB011B:
11539 case FLASH_5717VENDOR_ATMEL_ADB011D:
11540 case FLASH_5717VENDOR_ATMEL_MDB021D:
11541 case FLASH_5717VENDOR_ATMEL_ADB021B:
11542 case FLASH_5717VENDOR_ATMEL_ADB021D:
11543 case FLASH_5717VENDOR_ATMEL_45USPT:
11544 tp->nvram_jedecnum = JEDEC_ATMEL;
11545 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11546 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11548 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11549 case FLASH_5717VENDOR_ATMEL_MDB021D:
11550 case FLASH_5717VENDOR_ATMEL_ADB021B:
11551 case FLASH_5717VENDOR_ATMEL_ADB021D:
11552 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11555 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11559 case FLASH_5717VENDOR_ST_M_M25PE10:
11560 case FLASH_5717VENDOR_ST_A_M25PE10:
11561 case FLASH_5717VENDOR_ST_M_M45PE10:
11562 case FLASH_5717VENDOR_ST_A_M45PE10:
11563 case FLASH_5717VENDOR_ST_M_M25PE20:
11564 case FLASH_5717VENDOR_ST_A_M25PE20:
11565 case FLASH_5717VENDOR_ST_M_M45PE20:
11566 case FLASH_5717VENDOR_ST_A_M45PE20:
11567 case FLASH_5717VENDOR_ST_25USPT:
11568 case FLASH_5717VENDOR_ST_45USPT:
11569 tp->nvram_jedecnum = JEDEC_ST;
11570 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11571 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11573 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11574 case FLASH_5717VENDOR_ST_M_M25PE20:
11575 case FLASH_5717VENDOR_ST_A_M25PE20:
11576 case FLASH_5717VENDOR_ST_M_M45PE20:
11577 case FLASH_5717VENDOR_ST_A_M45PE20:
11578 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11581 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11586 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11590 tg3_nvram_get_pagesize(tp, nvcfg1);
11591 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11592 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11595 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11596 static void __devinit tg3_nvram_init(struct tg3 *tp)
11598 tw32_f(GRC_EEPROM_ADDR,
11599 (EEPROM_ADDR_FSM_RESET |
11600 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11601 EEPROM_ADDR_CLKPERD_SHIFT)));
11605 /* Enable seeprom accesses. */
11606 tw32_f(GRC_LOCAL_CTRL,
11607 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11610 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11611 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11612 tp->tg3_flags |= TG3_FLAG_NVRAM;
11614 if (tg3_nvram_lock(tp)) {
11615 netdev_warn(tp->dev,
11616 "Cannot get nvram lock, %s failed\n",
11620 tg3_enable_nvram_access(tp);
11622 tp->nvram_size = 0;
11624 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11625 tg3_get_5752_nvram_info(tp);
11626 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11627 tg3_get_5755_nvram_info(tp);
11628 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11629 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11630 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11631 tg3_get_5787_nvram_info(tp);
11632 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11633 tg3_get_5761_nvram_info(tp);
11634 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11635 tg3_get_5906_nvram_info(tp);
11636 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11637 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11638 tg3_get_57780_nvram_info(tp);
11639 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
11640 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
11641 tg3_get_5717_nvram_info(tp);
11643 tg3_get_nvram_info(tp);
11645 if (tp->nvram_size == 0)
11646 tg3_get_nvram_size(tp);
11648 tg3_disable_nvram_access(tp);
11649 tg3_nvram_unlock(tp);
11652 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11654 tg3_get_eeprom_size(tp);
11658 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11659 u32 offset, u32 len, u8 *buf)
11664 for (i = 0; i < len; i += 4) {
11670 memcpy(&data, buf + i, 4);
11673 * The SEEPROM interface expects the data to always be opposite
11674 * the native endian format. We accomplish this by reversing
11675 * all the operations that would have been performed on the
11676 * data from a call to tg3_nvram_read_be32().
11678 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11680 val = tr32(GRC_EEPROM_ADDR);
11681 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11683 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11685 tw32(GRC_EEPROM_ADDR, val |
11686 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11687 (addr & EEPROM_ADDR_ADDR_MASK) |
11688 EEPROM_ADDR_START |
11689 EEPROM_ADDR_WRITE);
11691 for (j = 0; j < 1000; j++) {
11692 val = tr32(GRC_EEPROM_ADDR);
11694 if (val & EEPROM_ADDR_COMPLETE)
11698 if (!(val & EEPROM_ADDR_COMPLETE)) {
11707 /* offset and length are dword aligned */
11708 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11712 u32 pagesize = tp->nvram_pagesize;
11713 u32 pagemask = pagesize - 1;
11717 tmp = kmalloc(pagesize, GFP_KERNEL);
11723 u32 phy_addr, page_off, size;
11725 phy_addr = offset & ~pagemask;
11727 for (j = 0; j < pagesize; j += 4) {
11728 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11729 (__be32 *) (tmp + j));
11736 page_off = offset & pagemask;
11743 memcpy(tmp + page_off, buf, size);
11745 offset = offset + (pagesize - page_off);
11747 tg3_enable_nvram_access(tp);
11750 * Before we can erase the flash page, we need
11751 * to issue a special "write enable" command.
11753 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11755 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11758 /* Erase the target page */
11759 tw32(NVRAM_ADDR, phy_addr);
11761 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11762 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11764 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11767 /* Issue another write enable to start the write. */
11768 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11770 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11773 for (j = 0; j < pagesize; j += 4) {
11776 data = *((__be32 *) (tmp + j));
11778 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11780 tw32(NVRAM_ADDR, phy_addr + j);
11782 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11786 nvram_cmd |= NVRAM_CMD_FIRST;
11787 else if (j == (pagesize - 4))
11788 nvram_cmd |= NVRAM_CMD_LAST;
11790 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11797 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11798 tg3_nvram_exec_cmd(tp, nvram_cmd);
11805 /* offset and length are dword aligned */
11806 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11811 for (i = 0; i < len; i += 4, offset += 4) {
11812 u32 page_off, phy_addr, nvram_cmd;
11815 memcpy(&data, buf + i, 4);
11816 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11818 page_off = offset % tp->nvram_pagesize;
11820 phy_addr = tg3_nvram_phys_addr(tp, offset);
11822 tw32(NVRAM_ADDR, phy_addr);
11824 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11826 if (page_off == 0 || i == 0)
11827 nvram_cmd |= NVRAM_CMD_FIRST;
11828 if (page_off == (tp->nvram_pagesize - 4))
11829 nvram_cmd |= NVRAM_CMD_LAST;
11831 if (i == (len - 4))
11832 nvram_cmd |= NVRAM_CMD_LAST;
11834 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11835 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11836 (tp->nvram_jedecnum == JEDEC_ST) &&
11837 (nvram_cmd & NVRAM_CMD_FIRST)) {
11839 if ((ret = tg3_nvram_exec_cmd(tp,
11840 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11845 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11846 /* We always do complete word writes to eeprom. */
11847 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11850 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11856 /* offset and length are dword aligned */
11857 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11861 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11862 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11863 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11867 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11868 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11872 ret = tg3_nvram_lock(tp);
11876 tg3_enable_nvram_access(tp);
11877 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11878 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11879 tw32(NVRAM_WRITE1, 0x406);
11881 grc_mode = tr32(GRC_MODE);
11882 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11884 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11885 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11887 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11890 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11894 grc_mode = tr32(GRC_MODE);
11895 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11897 tg3_disable_nvram_access(tp);
11898 tg3_nvram_unlock(tp);
11901 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11902 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11909 struct subsys_tbl_ent {
11910 u16 subsys_vendor, subsys_devid;
11914 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
11915 /* Broadcom boards. */
11916 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11917 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
11918 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11919 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
11920 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11921 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
11922 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11923 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
11924 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11925 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
11926 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11927 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
11928 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11929 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
11930 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11931 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
11932 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11933 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
11934 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11935 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
11936 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11937 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
11940 { TG3PCI_SUBVENDOR_ID_3COM,
11941 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
11942 { TG3PCI_SUBVENDOR_ID_3COM,
11943 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
11944 { TG3PCI_SUBVENDOR_ID_3COM,
11945 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
11946 { TG3PCI_SUBVENDOR_ID_3COM,
11947 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
11948 { TG3PCI_SUBVENDOR_ID_3COM,
11949 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
11952 { TG3PCI_SUBVENDOR_ID_DELL,
11953 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
11954 { TG3PCI_SUBVENDOR_ID_DELL,
11955 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
11956 { TG3PCI_SUBVENDOR_ID_DELL,
11957 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
11958 { TG3PCI_SUBVENDOR_ID_DELL,
11959 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
11961 /* Compaq boards. */
11962 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11963 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
11964 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11965 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
11966 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11967 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
11968 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11969 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
11970 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11971 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
11974 { TG3PCI_SUBVENDOR_ID_IBM,
11975 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
11978 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
11982 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11983 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11984 tp->pdev->subsystem_vendor) &&
11985 (subsys_id_to_phy_id[i].subsys_devid ==
11986 tp->pdev->subsystem_device))
11987 return &subsys_id_to_phy_id[i];
11992 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11997 /* On some early chips the SRAM cannot be accessed in D3hot state,
11998 * so need make sure we're in D0.
12000 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12001 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12002 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12005 /* Make sure register accesses (indirect or otherwise)
12006 * will function correctly.
12008 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12009 tp->misc_host_ctrl);
12011 /* The memory arbiter has to be enabled in order for SRAM accesses
12012 * to succeed. Normally on powerup the tg3 chip firmware will make
12013 * sure it is enabled, but other entities such as system netboot
12014 * code might disable it.
12016 val = tr32(MEMARB_MODE);
12017 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12019 tp->phy_id = TG3_PHY_ID_INVALID;
12020 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12022 /* Assume an onboard device and WOL capable by default. */
12023 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12026 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12027 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12028 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12030 val = tr32(VCPU_CFGSHDW);
12031 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12032 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12033 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12034 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12035 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12039 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12040 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12041 u32 nic_cfg, led_cfg;
12042 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12043 int eeprom_phy_serdes = 0;
12045 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12046 tp->nic_sram_data_cfg = nic_cfg;
12048 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12049 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12050 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12051 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12052 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12053 (ver > 0) && (ver < 0x100))
12054 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12057 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12059 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12060 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12061 eeprom_phy_serdes = 1;
12063 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12064 if (nic_phy_id != 0) {
12065 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12066 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12068 eeprom_phy_id = (id1 >> 16) << 10;
12069 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12070 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12074 tp->phy_id = eeprom_phy_id;
12075 if (eeprom_phy_serdes) {
12076 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12077 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12079 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
12082 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12083 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12084 SHASTA_EXT_LED_MODE_MASK);
12086 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12090 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12091 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12094 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12095 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12098 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12099 tp->led_ctrl = LED_CTRL_MODE_MAC;
12101 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12102 * read on some older 5700/5701 bootcode.
12104 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12106 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12108 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12112 case SHASTA_EXT_LED_SHARED:
12113 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12114 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12115 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12116 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12117 LED_CTRL_MODE_PHY_2);
12120 case SHASTA_EXT_LED_MAC:
12121 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12124 case SHASTA_EXT_LED_COMBO:
12125 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12126 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12127 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12128 LED_CTRL_MODE_PHY_2);
12133 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12134 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12135 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12136 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12138 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12139 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12141 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12142 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12143 if ((tp->pdev->subsystem_vendor ==
12144 PCI_VENDOR_ID_ARIMA) &&
12145 (tp->pdev->subsystem_device == 0x205a ||
12146 tp->pdev->subsystem_device == 0x2063))
12147 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12149 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12150 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12153 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12154 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12155 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12156 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12159 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12160 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12161 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12163 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
12164 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12165 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12167 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12168 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12169 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12171 if (cfg2 & (1 << 17))
12172 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
12174 /* serdes signal pre-emphasis in register 0x590 set by */
12175 /* bootcode if bit 18 is set */
12176 if (cfg2 & (1 << 18))
12177 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
12179 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12180 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12181 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12182 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
12184 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12187 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12188 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12189 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12192 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12193 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12194 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12195 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12196 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12197 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12200 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
12201 device_set_wakeup_enable(&tp->pdev->dev,
12202 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12205 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12210 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12211 tw32(OTP_CTRL, cmd);
12213 /* Wait for up to 1 ms for command to execute. */
12214 for (i = 0; i < 100; i++) {
12215 val = tr32(OTP_STATUS);
12216 if (val & OTP_STATUS_CMD_DONE)
12221 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12224 /* Read the gphy configuration from the OTP region of the chip. The gphy
12225 * configuration is a 32-bit value that straddles the alignment boundary.
12226 * We do two 32-bit reads and then shift and merge the results.
12228 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12230 u32 bhalf_otp, thalf_otp;
12232 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12234 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12237 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12239 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12242 thalf_otp = tr32(OTP_READ_DATA);
12244 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12246 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12249 bhalf_otp = tr32(OTP_READ_DATA);
12251 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12254 static int __devinit tg3_phy_probe(struct tg3 *tp)
12256 u32 hw_phy_id_1, hw_phy_id_2;
12257 u32 hw_phy_id, hw_phy_id_masked;
12260 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12261 return tg3_phy_init(tp);
12263 /* Reading the PHY ID register can conflict with ASF
12264 * firmware access to the PHY hardware.
12267 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12268 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12269 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12271 /* Now read the physical PHY_ID from the chip and verify
12272 * that it is sane. If it doesn't look good, we fall back
12273 * to either the hard-coded table based PHY_ID and failing
12274 * that the value found in the eeprom area.
12276 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12277 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12279 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12280 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12281 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12283 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12286 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12287 tp->phy_id = hw_phy_id;
12288 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12289 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12291 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
12293 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12294 /* Do nothing, phy ID already set up in
12295 * tg3_get_eeprom_hw_cfg().
12298 struct subsys_tbl_ent *p;
12300 /* No eeprom signature? Try the hardcoded
12301 * subsys device table.
12303 p = tg3_lookup_by_subsys(tp);
12307 tp->phy_id = p->phy_id;
12309 tp->phy_id == TG3_PHY_ID_BCM8002)
12310 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12314 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
12315 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12316 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12317 u32 bmsr, adv_reg, tg3_ctrl, mask;
12319 tg3_readphy(tp, MII_BMSR, &bmsr);
12320 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12321 (bmsr & BMSR_LSTATUS))
12322 goto skip_phy_reset;
12324 err = tg3_phy_reset(tp);
12328 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12329 ADVERTISE_100HALF | ADVERTISE_100FULL |
12330 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12332 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
12333 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12334 MII_TG3_CTRL_ADV_1000_FULL);
12335 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12336 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12337 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12338 MII_TG3_CTRL_ENABLE_AS_MASTER);
12341 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12342 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12343 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12344 if (!tg3_copper_is_advertising_all(tp, mask)) {
12345 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12347 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12348 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12350 tg3_writephy(tp, MII_BMCR,
12351 BMCR_ANENABLE | BMCR_ANRESTART);
12353 tg3_phy_set_wirespeed(tp);
12355 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12356 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12357 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12361 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12362 err = tg3_init_5401phy_dsp(tp);
12366 err = tg3_init_5401phy_dsp(tp);
12369 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
12370 tp->link_config.advertising =
12371 (ADVERTISED_1000baseT_Half |
12372 ADVERTISED_1000baseT_Full |
12373 ADVERTISED_Autoneg |
12375 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
12376 tp->link_config.advertising &=
12377 ~(ADVERTISED_1000baseT_Half |
12378 ADVERTISED_1000baseT_Full);
12383 static void __devinit tg3_read_vpd(struct tg3 *tp)
12385 u8 vpd_data[TG3_NVM_VPD_LEN];
12386 unsigned int block_end, rosize, len;
12390 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12391 tg3_nvram_read(tp, 0x0, &magic))
12392 goto out_not_found;
12394 if (magic == TG3_EEPROM_MAGIC) {
12395 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12398 /* The data is in little-endian format in NVRAM.
12399 * Use the big-endian read routines to preserve
12400 * the byte order as it exists in NVRAM.
12402 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12403 goto out_not_found;
12405 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12409 unsigned int pos = 0;
12411 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12412 cnt = pci_read_vpd(tp->pdev, pos,
12413 TG3_NVM_VPD_LEN - pos,
12415 if (cnt == -ETIMEDOUT || -EINTR)
12418 goto out_not_found;
12420 if (pos != TG3_NVM_VPD_LEN)
12421 goto out_not_found;
12424 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
12425 PCI_VPD_LRDT_RO_DATA);
12427 goto out_not_found;
12429 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
12430 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
12431 i += PCI_VPD_LRDT_TAG_SIZE;
12433 if (block_end > TG3_NVM_VPD_LEN)
12434 goto out_not_found;
12436 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12437 PCI_VPD_RO_KEYWORD_MFR_ID);
12439 len = pci_vpd_info_field_size(&vpd_data[j]);
12441 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12442 if (j + len > block_end || len != 4 ||
12443 memcmp(&vpd_data[j], "1028", 4))
12446 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12447 PCI_VPD_RO_KEYWORD_VENDOR0);
12451 len = pci_vpd_info_field_size(&vpd_data[j]);
12453 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12454 if (j + len > block_end)
12457 memcpy(tp->fw_ver, &vpd_data[j], len);
12458 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
12462 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12463 PCI_VPD_RO_KEYWORD_PARTNO);
12465 goto out_not_found;
12467 len = pci_vpd_info_field_size(&vpd_data[i]);
12469 i += PCI_VPD_INFO_FLD_HDR_SIZE;
12470 if (len > TG3_BPN_SIZE ||
12471 (len + i) > TG3_NVM_VPD_LEN)
12472 goto out_not_found;
12474 memcpy(tp->board_part_number, &vpd_data[i], len);
12479 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12480 strcpy(tp->board_part_number, "BCM95906");
12481 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12482 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12483 strcpy(tp->board_part_number, "BCM57780");
12484 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12485 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12486 strcpy(tp->board_part_number, "BCM57760");
12487 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12488 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12489 strcpy(tp->board_part_number, "BCM57790");
12490 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12491 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12492 strcpy(tp->board_part_number, "BCM57788");
12493 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12494 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12495 strcpy(tp->board_part_number, "BCM57761");
12496 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12497 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12498 strcpy(tp->board_part_number, "BCM57765");
12499 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12500 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12501 strcpy(tp->board_part_number, "BCM57781");
12502 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12503 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12504 strcpy(tp->board_part_number, "BCM57785");
12505 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12506 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12507 strcpy(tp->board_part_number, "BCM57791");
12508 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12509 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12510 strcpy(tp->board_part_number, "BCM57795");
12512 strcpy(tp->board_part_number, "none");
12515 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12519 if (tg3_nvram_read(tp, offset, &val) ||
12520 (val & 0xfc000000) != 0x0c000000 ||
12521 tg3_nvram_read(tp, offset + 4, &val) ||
12528 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12530 u32 val, offset, start, ver_offset;
12532 bool newver = false;
12534 if (tg3_nvram_read(tp, 0xc, &offset) ||
12535 tg3_nvram_read(tp, 0x4, &start))
12538 offset = tg3_nvram_logical_addr(tp, offset);
12540 if (tg3_nvram_read(tp, offset, &val))
12543 if ((val & 0xfc000000) == 0x0c000000) {
12544 if (tg3_nvram_read(tp, offset + 4, &val))
12551 dst_off = strlen(tp->fw_ver);
12554 if (TG3_VER_SIZE - dst_off < 16 ||
12555 tg3_nvram_read(tp, offset + 8, &ver_offset))
12558 offset = offset + ver_offset - start;
12559 for (i = 0; i < 16; i += 4) {
12561 if (tg3_nvram_read_be32(tp, offset + i, &v))
12564 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
12569 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12572 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12573 TG3_NVM_BCVER_MAJSFT;
12574 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12575 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
12576 "v%d.%02d", major, minor);
12580 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12582 u32 val, major, minor;
12584 /* Use native endian representation */
12585 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12588 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12589 TG3_NVM_HWSB_CFG1_MAJSFT;
12590 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12591 TG3_NVM_HWSB_CFG1_MINSFT;
12593 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12596 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12598 u32 offset, major, minor, build;
12600 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
12602 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12605 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12606 case TG3_EEPROM_SB_REVISION_0:
12607 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12609 case TG3_EEPROM_SB_REVISION_2:
12610 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12612 case TG3_EEPROM_SB_REVISION_3:
12613 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12615 case TG3_EEPROM_SB_REVISION_4:
12616 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
12618 case TG3_EEPROM_SB_REVISION_5:
12619 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
12625 if (tg3_nvram_read(tp, offset, &val))
12628 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12629 TG3_EEPROM_SB_EDH_BLD_SHFT;
12630 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12631 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12632 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12634 if (minor > 99 || build > 26)
12637 offset = strlen(tp->fw_ver);
12638 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
12639 " v%d.%02d", major, minor);
12642 offset = strlen(tp->fw_ver);
12643 if (offset < TG3_VER_SIZE - 1)
12644 tp->fw_ver[offset] = 'a' + build - 1;
12648 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12650 u32 val, offset, start;
12653 for (offset = TG3_NVM_DIR_START;
12654 offset < TG3_NVM_DIR_END;
12655 offset += TG3_NVM_DIRENT_SIZE) {
12656 if (tg3_nvram_read(tp, offset, &val))
12659 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12663 if (offset == TG3_NVM_DIR_END)
12666 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12667 start = 0x08000000;
12668 else if (tg3_nvram_read(tp, offset - 4, &start))
12671 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12672 !tg3_fw_img_is_valid(tp, offset) ||
12673 tg3_nvram_read(tp, offset + 8, &val))
12676 offset += val - start;
12678 vlen = strlen(tp->fw_ver);
12680 tp->fw_ver[vlen++] = ',';
12681 tp->fw_ver[vlen++] = ' ';
12683 for (i = 0; i < 4; i++) {
12685 if (tg3_nvram_read_be32(tp, offset, &v))
12688 offset += sizeof(v);
12690 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12691 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12695 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12700 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12705 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12706 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12709 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12710 if (apedata != APE_SEG_SIG_MAGIC)
12713 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12714 if (!(apedata & APE_FW_STATUS_READY))
12717 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12719 vlen = strlen(tp->fw_ver);
12721 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
12722 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12723 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12724 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12725 (apedata & APE_FW_VERSION_BLDMSK));
12728 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12731 bool vpd_vers = false;
12733 if (tp->fw_ver[0] != 0)
12736 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12737 strcat(tp->fw_ver, "sb");
12741 if (tg3_nvram_read(tp, 0, &val))
12744 if (val == TG3_EEPROM_MAGIC)
12745 tg3_read_bc_ver(tp);
12746 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12747 tg3_read_sb_ver(tp, val);
12748 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12749 tg3_read_hwsb_ver(tp);
12753 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12754 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
12757 tg3_read_mgmtfw_ver(tp);
12760 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12763 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12765 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
12767 #if TG3_VLAN_TAG_USED
12768 dev->vlan_features |= flags;
12772 static int __devinit tg3_get_invariants(struct tg3 *tp)
12774 static struct pci_device_id write_reorder_chipsets[] = {
12775 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12776 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12777 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12778 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12779 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12780 PCI_DEVICE_ID_VIA_8385_0) },
12784 u32 pci_state_reg, grc_misc_cfg;
12789 /* Force memory write invalidate off. If we leave it on,
12790 * then on 5700_BX chips we have to enable a workaround.
12791 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12792 * to match the cacheline size. The Broadcom driver have this
12793 * workaround but turns MWI off all the times so never uses
12794 * it. This seems to suggest that the workaround is insufficient.
12796 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12797 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12798 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12800 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12801 * has the register indirect write enable bit set before
12802 * we try to access any of the MMIO registers. It is also
12803 * critical that the PCI-X hw workaround situation is decided
12804 * before that as well.
12806 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12809 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12810 MISC_HOST_CTRL_CHIPREV_SHIFT);
12811 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12812 u32 prod_id_asic_rev;
12814 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12815 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12816 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 ||
12817 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
12818 pci_read_config_dword(tp->pdev,
12819 TG3PCI_GEN2_PRODID_ASICREV,
12820 &prod_id_asic_rev);
12821 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
12822 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
12823 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
12824 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
12825 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
12826 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12827 pci_read_config_dword(tp->pdev,
12828 TG3PCI_GEN15_PRODID_ASICREV,
12829 &prod_id_asic_rev);
12831 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12832 &prod_id_asic_rev);
12834 tp->pci_chip_rev_id = prod_id_asic_rev;
12837 /* Wrong chip ID in 5752 A0. This code can be removed later
12838 * as A0 is not in production.
12840 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12841 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12843 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12844 * we need to disable memory and use config. cycles
12845 * only to access all registers. The 5702/03 chips
12846 * can mistakenly decode the special cycles from the
12847 * ICH chipsets as memory write cycles, causing corruption
12848 * of register and memory space. Only certain ICH bridges
12849 * will drive special cycles with non-zero data during the
12850 * address phase which can fall within the 5703's address
12851 * range. This is not an ICH bug as the PCI spec allows
12852 * non-zero address during special cycles. However, only
12853 * these ICH bridges are known to drive non-zero addresses
12854 * during special cycles.
12856 * Since special cycles do not cross PCI bridges, we only
12857 * enable this workaround if the 5703 is on the secondary
12858 * bus of these ICH bridges.
12860 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12861 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12862 static struct tg3_dev_id {
12866 } ich_chipsets[] = {
12867 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12869 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12871 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12873 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12877 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12878 struct pci_dev *bridge = NULL;
12880 while (pci_id->vendor != 0) {
12881 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12887 if (pci_id->rev != PCI_ANY_ID) {
12888 if (bridge->revision > pci_id->rev)
12891 if (bridge->subordinate &&
12892 (bridge->subordinate->number ==
12893 tp->pdev->bus->number)) {
12895 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12896 pci_dev_put(bridge);
12902 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12903 static struct tg3_dev_id {
12906 } bridge_chipsets[] = {
12907 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12908 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12911 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12912 struct pci_dev *bridge = NULL;
12914 while (pci_id->vendor != 0) {
12915 bridge = pci_get_device(pci_id->vendor,
12922 if (bridge->subordinate &&
12923 (bridge->subordinate->number <=
12924 tp->pdev->bus->number) &&
12925 (bridge->subordinate->subordinate >=
12926 tp->pdev->bus->number)) {
12927 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12928 pci_dev_put(bridge);
12934 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12935 * DMA addresses > 40-bit. This bridge may have other additional
12936 * 57xx devices behind it in some 4-port NIC designs for example.
12937 * Any tg3 device found behind the bridge will also need the 40-bit
12940 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12941 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12942 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12943 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12944 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12946 struct pci_dev *bridge = NULL;
12949 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12950 PCI_DEVICE_ID_SERVERWORKS_EPB,
12952 if (bridge && bridge->subordinate &&
12953 (bridge->subordinate->number <=
12954 tp->pdev->bus->number) &&
12955 (bridge->subordinate->subordinate >=
12956 tp->pdev->bus->number)) {
12957 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12958 pci_dev_put(bridge);
12964 /* Initialize misc host control in PCI block. */
12965 tp->misc_host_ctrl |= (misc_ctrl_reg &
12966 MISC_HOST_CTRL_CHIPREV);
12967 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12968 tp->misc_host_ctrl);
12970 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
12971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
12972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12973 tp->pdev_peer = tg3_find_peer(tp);
12975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
12977 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12978 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
12980 /* Intentionally exclude ASIC_REV_5906 */
12981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12983 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12984 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12985 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12987 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
12988 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12993 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12994 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12995 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12997 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12998 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12999 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13001 /* 5700 B0 chips do not support checksumming correctly due
13002 * to hardware bugs.
13004 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
13005 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
13007 unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
13009 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13010 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13011 features |= NETIF_F_IPV6_CSUM;
13012 tp->dev->features |= features;
13013 vlan_features_add(tp->dev, features);
13016 /* Determine TSO capabilities */
13017 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13018 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13019 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13020 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13021 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13022 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13023 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13024 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13025 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13026 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13027 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13028 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13029 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13030 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13031 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13032 tp->fw_needed = FIRMWARE_TG3TSO5;
13034 tp->fw_needed = FIRMWARE_TG3TSO;
13039 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13040 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13041 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13042 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13043 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13044 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13045 tp->pdev_peer == tp->pdev))
13046 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13048 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13050 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13053 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13054 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13055 tp->irq_max = TG3_IRQ_MAX_VECS;
13059 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13062 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13063 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13064 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13065 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13068 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13069 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13071 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13072 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13073 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13074 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13076 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13079 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13080 if (tp->pcie_cap != 0) {
13083 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13085 pcie_set_readrq(tp->pdev, 4096);
13087 pci_read_config_word(tp->pdev,
13088 tp->pcie_cap + PCI_EXP_LNKCTL,
13090 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13091 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13092 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13095 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13096 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13097 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13098 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13099 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13101 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13102 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13103 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13104 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13105 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13106 if (!tp->pcix_cap) {
13107 dev_err(&tp->pdev->dev,
13108 "Cannot find PCI-X capability, aborting\n");
13112 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13113 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13116 /* If we have an AMD 762 or VIA K8T800 chipset, write
13117 * reordering to the mailbox registers done by the host
13118 * controller can cause major troubles. We read back from
13119 * every mailbox register write to force the writes to be
13120 * posted to the chip in order.
13122 if (pci_dev_present(write_reorder_chipsets) &&
13123 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13124 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13126 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13127 &tp->pci_cacheline_sz);
13128 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13129 &tp->pci_lat_timer);
13130 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13131 tp->pci_lat_timer < 64) {
13132 tp->pci_lat_timer = 64;
13133 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13134 tp->pci_lat_timer);
13137 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13138 /* 5700 BX chips need to have their TX producer index
13139 * mailboxes written twice to workaround a bug.
13141 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13143 /* If we are in PCI-X mode, enable register write workaround.
13145 * The workaround is to use indirect register accesses
13146 * for all chip writes not to mailbox registers.
13148 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13151 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13153 /* The chip can have it's power management PCI config
13154 * space registers clobbered due to this bug.
13155 * So explicitly force the chip into D0 here.
13157 pci_read_config_dword(tp->pdev,
13158 tp->pm_cap + PCI_PM_CTRL,
13160 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13161 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13162 pci_write_config_dword(tp->pdev,
13163 tp->pm_cap + PCI_PM_CTRL,
13166 /* Also, force SERR#/PERR# in PCI command. */
13167 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13168 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13169 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13173 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13174 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13175 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13176 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13178 /* Chip-specific fixup from Broadcom driver */
13179 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13180 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13181 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13182 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13185 /* Default fast path register access methods */
13186 tp->read32 = tg3_read32;
13187 tp->write32 = tg3_write32;
13188 tp->read32_mbox = tg3_read32;
13189 tp->write32_mbox = tg3_write32;
13190 tp->write32_tx_mbox = tg3_write32;
13191 tp->write32_rx_mbox = tg3_write32;
13193 /* Various workaround register access methods */
13194 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13195 tp->write32 = tg3_write_indirect_reg32;
13196 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13197 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13198 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13200 * Back to back register writes can cause problems on these
13201 * chips, the workaround is to read back all reg writes
13202 * except those to mailbox regs.
13204 * See tg3_write_indirect_reg32().
13206 tp->write32 = tg3_write_flush_reg32;
13209 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13210 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13211 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13212 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13213 tp->write32_rx_mbox = tg3_write_flush_reg32;
13216 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13217 tp->read32 = tg3_read_indirect_reg32;
13218 tp->write32 = tg3_write_indirect_reg32;
13219 tp->read32_mbox = tg3_read_indirect_mbox;
13220 tp->write32_mbox = tg3_write_indirect_mbox;
13221 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13222 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13227 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13228 pci_cmd &= ~PCI_COMMAND_MEMORY;
13229 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13231 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13232 tp->read32_mbox = tg3_read32_mbox_5906;
13233 tp->write32_mbox = tg3_write32_mbox_5906;
13234 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13235 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13238 if (tp->write32 == tg3_write_indirect_reg32 ||
13239 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13240 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13241 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13242 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13244 /* Get eeprom hw config before calling tg3_set_power_state().
13245 * In particular, the TG3_FLG2_IS_NIC flag must be
13246 * determined before calling tg3_set_power_state() so that
13247 * we know whether or not to switch out of Vaux power.
13248 * When the flag is set, it means that GPIO1 is used for eeprom
13249 * write protect and also implies that it is a LOM where GPIOs
13250 * are not used to switch power.
13252 tg3_get_eeprom_hw_cfg(tp);
13254 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13255 /* Allow reads and writes to the
13256 * APE register and memory space.
13258 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13259 PCISTATE_ALLOW_APE_SHMEM_WR |
13260 PCISTATE_ALLOW_APE_PSPACE_WR;
13261 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13265 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13269 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13270 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13272 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
13273 * GPIO1 driven high will bring 5700's external PHY out of reset.
13274 * It is also used as eeprom write protect on LOMs.
13276 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13277 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13278 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13279 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13280 GRC_LCLCTRL_GPIO_OUTPUT1);
13281 /* Unused GPIO3 must be driven as output on 5752 because there
13282 * are no pull-up resistors on unused GPIO pins.
13284 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13285 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13287 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13290 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13292 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13293 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13294 /* Turn off the debug UART. */
13295 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13296 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13297 /* Keep VMain power. */
13298 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13299 GRC_LCLCTRL_GPIO_OUTPUT0;
13302 /* Force the chip into D0. */
13303 err = tg3_set_power_state(tp, PCI_D0);
13305 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13309 /* Derive initial jumbo mode from MTU assigned in
13310 * ether_setup() via the alloc_etherdev() call
13312 if (tp->dev->mtu > ETH_DATA_LEN &&
13313 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13314 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13316 /* Determine WakeOnLan speed to use. */
13317 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13318 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13319 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13320 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13321 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13323 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13327 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
13329 /* A few boards don't want Ethernet@WireSpeed phy feature */
13330 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13331 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13332 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13333 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13334 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
13335 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
13336 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
13338 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13339 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13340 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
13341 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13342 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
13344 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13345 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
13346 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13347 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13348 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
13349 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13350 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13351 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13352 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13353 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13354 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13355 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
13356 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13357 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
13359 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
13362 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13363 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13364 tp->phy_otp = tg3_read_otp_phycfg(tp);
13365 if (tp->phy_otp == 0)
13366 tp->phy_otp = TG3_OTP_DEFAULT;
13369 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13370 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13372 tp->mi_mode = MAC_MI_MODE_BASE;
13374 tp->coalesce_mode = 0;
13375 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13376 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13377 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13379 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13380 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13381 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13383 err = tg3_mdio_init(tp);
13387 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
13388 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
13391 /* Initialize data/descriptor byte/word swapping. */
13392 val = tr32(GRC_MODE);
13393 val &= GRC_MODE_HOST_STACKUP;
13394 tw32(GRC_MODE, val | tp->grc_mode);
13396 tg3_switch_clocks(tp);
13398 /* Clear this out for sanity. */
13399 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13401 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13403 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13404 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13405 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13407 if (chiprevid == CHIPREV_ID_5701_A0 ||
13408 chiprevid == CHIPREV_ID_5701_B0 ||
13409 chiprevid == CHIPREV_ID_5701_B2 ||
13410 chiprevid == CHIPREV_ID_5701_B5) {
13411 void __iomem *sram_base;
13413 /* Write some dummy words into the SRAM status block
13414 * area, see if it reads back correctly. If the return
13415 * value is bad, force enable the PCIX workaround.
13417 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13419 writel(0x00000000, sram_base);
13420 writel(0x00000000, sram_base + 4);
13421 writel(0xffffffff, sram_base + 4);
13422 if (readl(sram_base) != 0x00000000)
13423 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13428 tg3_nvram_init(tp);
13430 grc_misc_cfg = tr32(GRC_MISC_CFG);
13431 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13433 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13434 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13435 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13436 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13438 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13439 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13440 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13441 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13442 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13443 HOSTCC_MODE_CLRTICK_TXBD);
13445 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13446 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13447 tp->misc_host_ctrl);
13450 /* Preserve the APE MAC_MODE bits */
13451 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13452 tp->mac_mode = tr32(MAC_MODE) |
13453 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13455 tp->mac_mode = TG3_DEF_MAC_MODE;
13457 /* these are limited to 10/100 only */
13458 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13459 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13460 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13461 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13462 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13463 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13464 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13465 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13466 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13467 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13468 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13469 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13470 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13471 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13472 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
13473 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
13475 err = tg3_phy_probe(tp);
13477 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
13478 /* ... but do not return immediately ... */
13483 tg3_read_fw_ver(tp);
13485 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
13486 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13488 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13489 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
13491 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13494 /* 5700 {AX,BX} chips have a broken status block link
13495 * change bit implementation, so we must use the
13496 * status register in those cases.
13498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13499 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13501 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13503 /* The led_ctrl is set during tg3_phy_probe, here we might
13504 * have to force the link status polling mechanism based
13505 * upon subsystem IDs.
13507 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13508 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13509 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
13510 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
13511 TG3_FLAG_USE_LINKCHG_REG);
13514 /* For all SERDES we poll the MAC status register. */
13515 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
13516 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13518 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13520 tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
13521 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13522 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13523 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13524 tp->rx_offset -= NET_IP_ALIGN;
13525 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13526 tp->rx_copy_thresh = ~(u16)0;
13530 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13532 /* Increment the rx prod index on the rx std ring by at most
13533 * 8 for these chips to workaround hw errata.
13535 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13536 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13537 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13538 tp->rx_std_max_post = 8;
13540 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13541 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13542 PCIE_PWR_MGMT_L1_THRESH_MSK;
13547 #ifdef CONFIG_SPARC
13548 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13550 struct net_device *dev = tp->dev;
13551 struct pci_dev *pdev = tp->pdev;
13552 struct device_node *dp = pci_device_to_OF_node(pdev);
13553 const unsigned char *addr;
13556 addr = of_get_property(dp, "local-mac-address", &len);
13557 if (addr && len == 6) {
13558 memcpy(dev->dev_addr, addr, 6);
13559 memcpy(dev->perm_addr, dev->dev_addr, 6);
13565 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13567 struct net_device *dev = tp->dev;
13569 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13570 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13575 static int __devinit tg3_get_device_address(struct tg3 *tp)
13577 struct net_device *dev = tp->dev;
13578 u32 hi, lo, mac_offset;
13581 #ifdef CONFIG_SPARC
13582 if (!tg3_get_macaddr_sparc(tp))
13587 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13588 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13589 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13591 if (tg3_nvram_lock(tp))
13592 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13594 tg3_nvram_unlock(tp);
13595 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13596 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13597 if (PCI_FUNC(tp->pdev->devfn) & 1)
13599 if (PCI_FUNC(tp->pdev->devfn) > 1)
13600 mac_offset += 0x18c;
13601 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13604 /* First try to get it from MAC address mailbox. */
13605 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13606 if ((hi >> 16) == 0x484b) {
13607 dev->dev_addr[0] = (hi >> 8) & 0xff;
13608 dev->dev_addr[1] = (hi >> 0) & 0xff;
13610 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13611 dev->dev_addr[2] = (lo >> 24) & 0xff;
13612 dev->dev_addr[3] = (lo >> 16) & 0xff;
13613 dev->dev_addr[4] = (lo >> 8) & 0xff;
13614 dev->dev_addr[5] = (lo >> 0) & 0xff;
13616 /* Some old bootcode may report a 0 MAC address in SRAM */
13617 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13620 /* Next, try NVRAM. */
13621 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13622 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13623 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13624 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13625 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13627 /* Finally just fetch it out of the MAC control regs. */
13629 hi = tr32(MAC_ADDR_0_HIGH);
13630 lo = tr32(MAC_ADDR_0_LOW);
13632 dev->dev_addr[5] = lo & 0xff;
13633 dev->dev_addr[4] = (lo >> 8) & 0xff;
13634 dev->dev_addr[3] = (lo >> 16) & 0xff;
13635 dev->dev_addr[2] = (lo >> 24) & 0xff;
13636 dev->dev_addr[1] = hi & 0xff;
13637 dev->dev_addr[0] = (hi >> 8) & 0xff;
13641 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13642 #ifdef CONFIG_SPARC
13643 if (!tg3_get_default_macaddr_sparc(tp))
13648 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13652 #define BOUNDARY_SINGLE_CACHELINE 1
13653 #define BOUNDARY_MULTI_CACHELINE 2
13655 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13657 int cacheline_size;
13661 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13663 cacheline_size = 1024;
13665 cacheline_size = (int) byte * 4;
13667 /* On 5703 and later chips, the boundary bits have no
13670 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13671 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13672 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13675 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13676 goal = BOUNDARY_MULTI_CACHELINE;
13678 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13679 goal = BOUNDARY_SINGLE_CACHELINE;
13685 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13686 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13693 /* PCI controllers on most RISC systems tend to disconnect
13694 * when a device tries to burst across a cache-line boundary.
13695 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13697 * Unfortunately, for PCI-E there are only limited
13698 * write-side controls for this, and thus for reads
13699 * we will still get the disconnects. We'll also waste
13700 * these PCI cycles for both read and write for chips
13701 * other than 5700 and 5701 which do not implement the
13704 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13705 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13706 switch (cacheline_size) {
13711 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13712 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13713 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13715 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13716 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13721 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13722 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13726 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13727 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13730 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13731 switch (cacheline_size) {
13735 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13736 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13737 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13743 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13744 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13748 switch (cacheline_size) {
13750 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13751 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13752 DMA_RWCTRL_WRITE_BNDRY_16);
13757 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13758 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13759 DMA_RWCTRL_WRITE_BNDRY_32);
13764 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13765 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13766 DMA_RWCTRL_WRITE_BNDRY_64);
13771 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13772 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13773 DMA_RWCTRL_WRITE_BNDRY_128);
13778 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13779 DMA_RWCTRL_WRITE_BNDRY_256);
13782 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13783 DMA_RWCTRL_WRITE_BNDRY_512);
13787 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13788 DMA_RWCTRL_WRITE_BNDRY_1024);
13797 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13799 struct tg3_internal_buffer_desc test_desc;
13800 u32 sram_dma_descs;
13803 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13805 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13806 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13807 tw32(RDMAC_STATUS, 0);
13808 tw32(WDMAC_STATUS, 0);
13810 tw32(BUFMGR_MODE, 0);
13811 tw32(FTQ_RESET, 0);
13813 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13814 test_desc.addr_lo = buf_dma & 0xffffffff;
13815 test_desc.nic_mbuf = 0x00002100;
13816 test_desc.len = size;
13819 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13820 * the *second* time the tg3 driver was getting loaded after an
13823 * Broadcom tells me:
13824 * ...the DMA engine is connected to the GRC block and a DMA
13825 * reset may affect the GRC block in some unpredictable way...
13826 * The behavior of resets to individual blocks has not been tested.
13828 * Broadcom noted the GRC reset will also reset all sub-components.
13831 test_desc.cqid_sqid = (13 << 8) | 2;
13833 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13836 test_desc.cqid_sqid = (16 << 8) | 7;
13838 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13841 test_desc.flags = 0x00000005;
13843 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13846 val = *(((u32 *)&test_desc) + i);
13847 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13848 sram_dma_descs + (i * sizeof(u32)));
13849 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13851 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13854 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13856 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13859 for (i = 0; i < 40; i++) {
13863 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13865 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13866 if ((val & 0xffff) == sram_dma_descs) {
13877 #define TEST_BUFFER_SIZE 0x2000
13879 static int __devinit tg3_test_dma(struct tg3 *tp)
13881 dma_addr_t buf_dma;
13882 u32 *buf, saved_dma_rwctrl;
13885 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13891 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13892 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13894 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13896 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13899 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13900 /* DMA read watermark not used on PCIE */
13901 tp->dma_rwctrl |= 0x00180000;
13902 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13905 tp->dma_rwctrl |= 0x003f0000;
13907 tp->dma_rwctrl |= 0x003f000f;
13909 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13910 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13911 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13912 u32 read_water = 0x7;
13914 /* If the 5704 is behind the EPB bridge, we can
13915 * do the less restrictive ONE_DMA workaround for
13916 * better performance.
13918 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13919 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13920 tp->dma_rwctrl |= 0x8000;
13921 else if (ccval == 0x6 || ccval == 0x7)
13922 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13926 /* Set bit 23 to enable PCIX hw bug fix */
13928 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13929 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13931 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13932 /* 5780 always in PCIX mode */
13933 tp->dma_rwctrl |= 0x00144000;
13934 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13935 /* 5714 always in PCIX mode */
13936 tp->dma_rwctrl |= 0x00148000;
13938 tp->dma_rwctrl |= 0x001b000f;
13942 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13944 tp->dma_rwctrl &= 0xfffffff0;
13946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13948 /* Remove this if it causes problems for some boards. */
13949 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13951 /* On 5700/5701 chips, we need to set this bit.
13952 * Otherwise the chip will issue cacheline transactions
13953 * to streamable DMA memory with not all the byte
13954 * enables turned on. This is an error on several
13955 * RISC PCI controllers, in particular sparc64.
13957 * On 5703/5704 chips, this bit has been reassigned
13958 * a different meaning. In particular, it is used
13959 * on those chips to enable a PCI-X workaround.
13961 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13964 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13967 /* Unneeded, already done by tg3_get_invariants. */
13968 tg3_switch_clocks(tp);
13971 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13972 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13975 /* It is best to perform DMA test with maximum write burst size
13976 * to expose the 5700/5701 write DMA bug.
13978 saved_dma_rwctrl = tp->dma_rwctrl;
13979 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13980 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13985 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13988 /* Send the buffer to the chip. */
13989 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13991 dev_err(&tp->pdev->dev,
13992 "%s: Buffer write failed. err = %d\n",
13998 /* validate data reached card RAM correctly. */
13999 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14001 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14002 if (le32_to_cpu(val) != p[i]) {
14003 dev_err(&tp->pdev->dev,
14004 "%s: Buffer corrupted on device! "
14005 "(%d != %d)\n", __func__, val, i);
14006 /* ret = -ENODEV here? */
14011 /* Now read it back. */
14012 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14014 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14015 "err = %d\n", __func__, ret);
14020 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14024 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14025 DMA_RWCTRL_WRITE_BNDRY_16) {
14026 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14027 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14028 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14031 dev_err(&tp->pdev->dev,
14032 "%s: Buffer corrupted on read back! "
14033 "(%d != %d)\n", __func__, p[i], i);
14039 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14045 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14046 DMA_RWCTRL_WRITE_BNDRY_16) {
14047 static struct pci_device_id dma_wait_state_chipsets[] = {
14048 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
14049 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14053 /* DMA test passed without adjusting DMA boundary,
14054 * now look for chipsets that are known to expose the
14055 * DMA bug without failing the test.
14057 if (pci_dev_present(dma_wait_state_chipsets)) {
14058 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14059 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14061 /* Safe to use the calculated DMA boundary. */
14062 tp->dma_rwctrl = saved_dma_rwctrl;
14065 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14069 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
14074 static void __devinit tg3_init_link_config(struct tg3 *tp)
14076 tp->link_config.advertising =
14077 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
14078 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
14079 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
14080 ADVERTISED_Autoneg | ADVERTISED_MII);
14081 tp->link_config.speed = SPEED_INVALID;
14082 tp->link_config.duplex = DUPLEX_INVALID;
14083 tp->link_config.autoneg = AUTONEG_ENABLE;
14084 tp->link_config.active_speed = SPEED_INVALID;
14085 tp->link_config.active_duplex = DUPLEX_INVALID;
14086 tp->link_config.phy_is_low_power = 0;
14087 tp->link_config.orig_speed = SPEED_INVALID;
14088 tp->link_config.orig_duplex = DUPLEX_INVALID;
14089 tp->link_config.orig_autoneg = AUTONEG_INVALID;
14092 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14094 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
14095 tp->bufmgr_config.mbuf_read_dma_low_water =
14096 DEFAULT_MB_RDMA_LOW_WATER_5705;
14097 tp->bufmgr_config.mbuf_mac_rx_low_water =
14098 DEFAULT_MB_MACRX_LOW_WATER_57765;
14099 tp->bufmgr_config.mbuf_high_water =
14100 DEFAULT_MB_HIGH_WATER_57765;
14102 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14103 DEFAULT_MB_RDMA_LOW_WATER_5705;
14104 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14105 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14106 tp->bufmgr_config.mbuf_high_water_jumbo =
14107 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14108 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14109 tp->bufmgr_config.mbuf_read_dma_low_water =
14110 DEFAULT_MB_RDMA_LOW_WATER_5705;
14111 tp->bufmgr_config.mbuf_mac_rx_low_water =
14112 DEFAULT_MB_MACRX_LOW_WATER_5705;
14113 tp->bufmgr_config.mbuf_high_water =
14114 DEFAULT_MB_HIGH_WATER_5705;
14115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14116 tp->bufmgr_config.mbuf_mac_rx_low_water =
14117 DEFAULT_MB_MACRX_LOW_WATER_5906;
14118 tp->bufmgr_config.mbuf_high_water =
14119 DEFAULT_MB_HIGH_WATER_5906;
14122 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14123 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14124 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14125 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14126 tp->bufmgr_config.mbuf_high_water_jumbo =
14127 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14129 tp->bufmgr_config.mbuf_read_dma_low_water =
14130 DEFAULT_MB_RDMA_LOW_WATER;
14131 tp->bufmgr_config.mbuf_mac_rx_low_water =
14132 DEFAULT_MB_MACRX_LOW_WATER;
14133 tp->bufmgr_config.mbuf_high_water =
14134 DEFAULT_MB_HIGH_WATER;
14136 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14137 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14138 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14139 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14140 tp->bufmgr_config.mbuf_high_water_jumbo =
14141 DEFAULT_MB_HIGH_WATER_JUMBO;
14144 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14145 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14148 static char * __devinit tg3_phy_string(struct tg3 *tp)
14150 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14151 case TG3_PHY_ID_BCM5400: return "5400";
14152 case TG3_PHY_ID_BCM5401: return "5401";
14153 case TG3_PHY_ID_BCM5411: return "5411";
14154 case TG3_PHY_ID_BCM5701: return "5701";
14155 case TG3_PHY_ID_BCM5703: return "5703";
14156 case TG3_PHY_ID_BCM5704: return "5704";
14157 case TG3_PHY_ID_BCM5705: return "5705";
14158 case TG3_PHY_ID_BCM5750: return "5750";
14159 case TG3_PHY_ID_BCM5752: return "5752";
14160 case TG3_PHY_ID_BCM5714: return "5714";
14161 case TG3_PHY_ID_BCM5780: return "5780";
14162 case TG3_PHY_ID_BCM5755: return "5755";
14163 case TG3_PHY_ID_BCM5787: return "5787";
14164 case TG3_PHY_ID_BCM5784: return "5784";
14165 case TG3_PHY_ID_BCM5756: return "5722/5756";
14166 case TG3_PHY_ID_BCM5906: return "5906";
14167 case TG3_PHY_ID_BCM5761: return "5761";
14168 case TG3_PHY_ID_BCM5718C: return "5718C";
14169 case TG3_PHY_ID_BCM5718S: return "5718S";
14170 case TG3_PHY_ID_BCM57765: return "57765";
14171 case TG3_PHY_ID_BCM5719C: return "5719C";
14172 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14173 case 0: return "serdes";
14174 default: return "unknown";
14178 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14180 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14181 strcpy(str, "PCI Express");
14183 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14184 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14186 strcpy(str, "PCIX:");
14188 if ((clock_ctrl == 7) ||
14189 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14190 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14191 strcat(str, "133MHz");
14192 else if (clock_ctrl == 0)
14193 strcat(str, "33MHz");
14194 else if (clock_ctrl == 2)
14195 strcat(str, "50MHz");
14196 else if (clock_ctrl == 4)
14197 strcat(str, "66MHz");
14198 else if (clock_ctrl == 6)
14199 strcat(str, "100MHz");
14201 strcpy(str, "PCI:");
14202 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14203 strcat(str, "66MHz");
14205 strcat(str, "33MHz");
14207 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14208 strcat(str, ":32-bit");
14210 strcat(str, ":64-bit");
14214 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14216 struct pci_dev *peer;
14217 unsigned int func, devnr = tp->pdev->devfn & ~7;
14219 for (func = 0; func < 8; func++) {
14220 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14221 if (peer && peer != tp->pdev)
14225 /* 5704 can be configured in single-port mode, set peer to
14226 * tp->pdev in that case.
14234 * We don't need to keep the refcount elevated; there's no way
14235 * to remove one half of this device without removing the other
14242 static void __devinit tg3_init_coal(struct tg3 *tp)
14244 struct ethtool_coalesce *ec = &tp->coal;
14246 memset(ec, 0, sizeof(*ec));
14247 ec->cmd = ETHTOOL_GCOALESCE;
14248 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14249 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14250 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14251 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14252 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14253 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14254 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14255 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14256 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14258 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14259 HOSTCC_MODE_CLRTICK_TXBD)) {
14260 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14261 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14262 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14263 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14266 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14267 ec->rx_coalesce_usecs_irq = 0;
14268 ec->tx_coalesce_usecs_irq = 0;
14269 ec->stats_block_coalesce_usecs = 0;
14273 static const struct net_device_ops tg3_netdev_ops = {
14274 .ndo_open = tg3_open,
14275 .ndo_stop = tg3_close,
14276 .ndo_start_xmit = tg3_start_xmit,
14277 .ndo_get_stats64 = tg3_get_stats64,
14278 .ndo_validate_addr = eth_validate_addr,
14279 .ndo_set_multicast_list = tg3_set_rx_mode,
14280 .ndo_set_mac_address = tg3_set_mac_addr,
14281 .ndo_do_ioctl = tg3_ioctl,
14282 .ndo_tx_timeout = tg3_tx_timeout,
14283 .ndo_change_mtu = tg3_change_mtu,
14284 #if TG3_VLAN_TAG_USED
14285 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14287 #ifdef CONFIG_NET_POLL_CONTROLLER
14288 .ndo_poll_controller = tg3_poll_controller,
14292 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14293 .ndo_open = tg3_open,
14294 .ndo_stop = tg3_close,
14295 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14296 .ndo_get_stats64 = tg3_get_stats64,
14297 .ndo_validate_addr = eth_validate_addr,
14298 .ndo_set_multicast_list = tg3_set_rx_mode,
14299 .ndo_set_mac_address = tg3_set_mac_addr,
14300 .ndo_do_ioctl = tg3_ioctl,
14301 .ndo_tx_timeout = tg3_tx_timeout,
14302 .ndo_change_mtu = tg3_change_mtu,
14303 #if TG3_VLAN_TAG_USED
14304 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14306 #ifdef CONFIG_NET_POLL_CONTROLLER
14307 .ndo_poll_controller = tg3_poll_controller,
14311 static int __devinit tg3_init_one(struct pci_dev *pdev,
14312 const struct pci_device_id *ent)
14314 struct net_device *dev;
14316 int i, err, pm_cap;
14317 u32 sndmbx, rcvmbx, intmbx;
14319 u64 dma_mask, persist_dma_mask;
14321 printk_once(KERN_INFO "%s\n", version);
14323 err = pci_enable_device(pdev);
14325 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14329 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14331 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14332 goto err_out_disable_pdev;
14335 pci_set_master(pdev);
14337 /* Find power-management capability. */
14338 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14340 dev_err(&pdev->dev,
14341 "Cannot find Power Management capability, aborting\n");
14343 goto err_out_free_res;
14346 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14348 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14350 goto err_out_free_res;
14353 SET_NETDEV_DEV(dev, &pdev->dev);
14355 #if TG3_VLAN_TAG_USED
14356 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14359 tp = netdev_priv(dev);
14362 tp->pm_cap = pm_cap;
14363 tp->rx_mode = TG3_DEF_RX_MODE;
14364 tp->tx_mode = TG3_DEF_TX_MODE;
14367 tp->msg_enable = tg3_debug;
14369 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14371 /* The word/byte swap controls here control register access byte
14372 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14375 tp->misc_host_ctrl =
14376 MISC_HOST_CTRL_MASK_PCI_INT |
14377 MISC_HOST_CTRL_WORD_SWAP |
14378 MISC_HOST_CTRL_INDIR_ACCESS |
14379 MISC_HOST_CTRL_PCISTATE_RW;
14381 /* The NONFRM (non-frame) byte/word swap controls take effect
14382 * on descriptor entries, anything which isn't packet data.
14384 * The StrongARM chips on the board (one for tx, one for rx)
14385 * are running in big-endian mode.
14387 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14388 GRC_MODE_WSWAP_NONFRM_DATA);
14389 #ifdef __BIG_ENDIAN
14390 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14392 spin_lock_init(&tp->lock);
14393 spin_lock_init(&tp->indirect_lock);
14394 INIT_WORK(&tp->reset_task, tg3_reset_task);
14396 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14398 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14400 goto err_out_free_dev;
14403 tg3_init_link_config(tp);
14405 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14406 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14408 dev->ethtool_ops = &tg3_ethtool_ops;
14409 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14410 dev->irq = pdev->irq;
14412 err = tg3_get_invariants(tp);
14414 dev_err(&pdev->dev,
14415 "Problem fetching invariants of chip, aborting\n");
14416 goto err_out_iounmap;
14419 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14420 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 &&
14421 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14422 dev->netdev_ops = &tg3_netdev_ops;
14424 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14427 /* The EPB bridge inside 5714, 5715, and 5780 and any
14428 * device behind the EPB cannot support DMA addresses > 40-bit.
14429 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14430 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14431 * do DMA address check in tg3_start_xmit().
14433 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14434 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14435 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14436 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14437 #ifdef CONFIG_HIGHMEM
14438 dma_mask = DMA_BIT_MASK(64);
14441 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14443 /* Configure DMA attributes. */
14444 if (dma_mask > DMA_BIT_MASK(32)) {
14445 err = pci_set_dma_mask(pdev, dma_mask);
14447 dev->features |= NETIF_F_HIGHDMA;
14448 err = pci_set_consistent_dma_mask(pdev,
14451 dev_err(&pdev->dev, "Unable to obtain 64 bit "
14452 "DMA for consistent allocations\n");
14453 goto err_out_iounmap;
14457 if (err || dma_mask == DMA_BIT_MASK(32)) {
14458 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14460 dev_err(&pdev->dev,
14461 "No usable DMA configuration, aborting\n");
14462 goto err_out_iounmap;
14466 tg3_init_bufmgr_config(tp);
14468 /* Selectively allow TSO based on operating conditions */
14469 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14470 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14471 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14473 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14474 tp->fw_needed = NULL;
14477 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14478 tp->fw_needed = FIRMWARE_TG3;
14480 /* TSO is on by default on chips that support hardware TSO.
14481 * Firmware TSO on older chips gives lower performance, so it
14482 * is off by default, but can be enabled using ethtool.
14484 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14485 (dev->features & NETIF_F_IP_CSUM)) {
14486 dev->features |= NETIF_F_TSO;
14487 vlan_features_add(dev, NETIF_F_TSO);
14489 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14490 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14491 if (dev->features & NETIF_F_IPV6_CSUM) {
14492 dev->features |= NETIF_F_TSO6;
14493 vlan_features_add(dev, NETIF_F_TSO6);
14495 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14497 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14498 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14499 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14500 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14501 dev->features |= NETIF_F_TSO_ECN;
14502 vlan_features_add(dev, NETIF_F_TSO_ECN);
14506 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14507 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14508 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14509 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14510 tp->rx_pending = 63;
14513 err = tg3_get_device_address(tp);
14515 dev_err(&pdev->dev,
14516 "Could not obtain valid ethernet address, aborting\n");
14517 goto err_out_iounmap;
14520 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14521 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14522 if (!tp->aperegs) {
14523 dev_err(&pdev->dev,
14524 "Cannot map APE registers, aborting\n");
14526 goto err_out_iounmap;
14529 tg3_ape_lock_init(tp);
14531 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14532 tg3_read_dash_ver(tp);
14536 * Reset chip in case UNDI or EFI driver did not shutdown
14537 * DMA self test will enable WDMAC and we'll see (spurious)
14538 * pending DMA on the PCI bus at that point.
14540 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14541 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14542 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14543 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14546 err = tg3_test_dma(tp);
14548 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
14549 goto err_out_apeunmap;
14552 /* flow control autonegotiation is default behavior */
14553 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14554 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14556 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14557 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14558 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14559 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14560 struct tg3_napi *tnapi = &tp->napi[i];
14563 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14565 tnapi->int_mbox = intmbx;
14571 tnapi->consmbox = rcvmbx;
14572 tnapi->prodmbox = sndmbx;
14575 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14576 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14578 tnapi->coal_now = HOSTCC_MODE_NOW;
14579 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14582 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14586 * If we support MSIX, we'll be using RSS. If we're using
14587 * RSS, the first vector only handles link interrupts and the
14588 * remaining vectors handle rx and tx interrupts. Reuse the
14589 * mailbox values for the next iteration. The values we setup
14590 * above are still useful for the single vectored mode.
14605 pci_set_drvdata(pdev, dev);
14607 err = register_netdev(dev);
14609 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
14610 goto err_out_apeunmap;
14613 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14614 tp->board_part_number,
14615 tp->pci_chip_rev_id,
14616 tg3_bus_string(tp, str),
14619 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14620 struct phy_device *phydev;
14621 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14623 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14624 phydev->drv->name, dev_name(&phydev->dev));
14626 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
14627 "(WireSpeed[%d])\n", tg3_phy_string(tp),
14628 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14629 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14630 "10/100/1000Base-T")),
14631 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
14633 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14634 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14635 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14636 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
14637 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14638 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14639 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14641 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
14642 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
14648 iounmap(tp->aperegs);
14649 tp->aperegs = NULL;
14662 pci_release_regions(pdev);
14664 err_out_disable_pdev:
14665 pci_disable_device(pdev);
14666 pci_set_drvdata(pdev, NULL);
14670 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14672 struct net_device *dev = pci_get_drvdata(pdev);
14675 struct tg3 *tp = netdev_priv(dev);
14678 release_firmware(tp->fw);
14680 flush_scheduled_work();
14682 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14687 unregister_netdev(dev);
14689 iounmap(tp->aperegs);
14690 tp->aperegs = NULL;
14697 pci_release_regions(pdev);
14698 pci_disable_device(pdev);
14699 pci_set_drvdata(pdev, NULL);
14703 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14705 struct net_device *dev = pci_get_drvdata(pdev);
14706 struct tg3 *tp = netdev_priv(dev);
14707 pci_power_t target_state;
14710 /* PCI register 4 needs to be saved whether netif_running() or not.
14711 * MSI address and data need to be saved if using MSI and
14714 pci_save_state(pdev);
14716 if (!netif_running(dev))
14719 flush_scheduled_work();
14721 tg3_netif_stop(tp);
14723 del_timer_sync(&tp->timer);
14725 tg3_full_lock(tp, 1);
14726 tg3_disable_ints(tp);
14727 tg3_full_unlock(tp);
14729 netif_device_detach(dev);
14731 tg3_full_lock(tp, 0);
14732 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14733 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14734 tg3_full_unlock(tp);
14736 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14738 err = tg3_set_power_state(tp, target_state);
14742 tg3_full_lock(tp, 0);
14744 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14745 err2 = tg3_restart_hw(tp, 1);
14749 tp->timer.expires = jiffies + tp->timer_offset;
14750 add_timer(&tp->timer);
14752 netif_device_attach(dev);
14753 tg3_netif_start(tp);
14756 tg3_full_unlock(tp);
14765 static int tg3_resume(struct pci_dev *pdev)
14767 struct net_device *dev = pci_get_drvdata(pdev);
14768 struct tg3 *tp = netdev_priv(dev);
14771 pci_restore_state(tp->pdev);
14773 if (!netif_running(dev))
14776 err = tg3_set_power_state(tp, PCI_D0);
14780 netif_device_attach(dev);
14782 tg3_full_lock(tp, 0);
14784 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14785 err = tg3_restart_hw(tp, 1);
14789 tp->timer.expires = jiffies + tp->timer_offset;
14790 add_timer(&tp->timer);
14792 tg3_netif_start(tp);
14795 tg3_full_unlock(tp);
14803 static struct pci_driver tg3_driver = {
14804 .name = DRV_MODULE_NAME,
14805 .id_table = tg3_pci_tbl,
14806 .probe = tg3_init_one,
14807 .remove = __devexit_p(tg3_remove_one),
14808 .suspend = tg3_suspend,
14809 .resume = tg3_resume
14812 static int __init tg3_init(void)
14814 return pci_register_driver(&tg3_driver);
14817 static void __exit tg3_cleanup(void)
14819 pci_unregister_driver(&tg3_driver);
14822 module_init(tg3_init);
14823 module_exit(tg3_cleanup);