2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/phy.h>
37 #include <linux/brcmphy.h>
38 #include <linux/if_vlan.h>
40 #include <linux/tcp.h>
41 #include <linux/workqueue.h>
42 #include <linux/prefetch.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/firmware.h>
46 #include <net/checksum.h>
49 #include <asm/system.h>
51 #include <asm/byteorder.h>
52 #include <asm/uaccess.h>
55 #include <asm/idprom.h>
62 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
63 #define TG3_VLAN_TAG_USED 1
65 #define TG3_VLAN_TAG_USED 0
70 #define DRV_MODULE_NAME "tg3"
72 #define TG3_MIN_NUM 114
73 #define DRV_MODULE_VERSION \
74 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
75 #define DRV_MODULE_RELDATE "September 30, 2010"
77 #define TG3_DEF_MAC_MODE 0
78 #define TG3_DEF_RX_MODE 0
79 #define TG3_DEF_TX_MODE 0
80 #define TG3_DEF_MSG_ENABLE \
90 /* length of time before we decide the hardware is borked,
91 * and dev->tx_timeout() should be called to fix the problem
93 #define TG3_TX_TIMEOUT (5 * HZ)
95 /* hardware minimum and maximum for a single frame's data payload */
96 #define TG3_MIN_MTU 60
97 #define TG3_MAX_MTU(tp) \
98 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
100 /* These numbers seem to be hard coded in the NIC firmware somehow.
101 * You can't change the ring sizes, but you can change where you place
102 * them in the NIC onboard memory.
104 #define TG3_RX_STD_RING_SIZE(tp) \
105 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
107 RX_STD_MAX_SIZE_5717 : 512)
108 #define TG3_DEF_RX_RING_PENDING 200
109 #define TG3_RX_JMB_RING_SIZE(tp) \
110 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
111 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
113 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
114 #define TG3_RSS_INDIR_TBL_SIZE 128
116 /* Do not place this n-ring entries value into the tp struct itself,
117 * we really want to expose these constants to GCC so that modulo et
118 * al. operations are done with shifts and masks instead of with
119 * hw multiply/modulo instructions. Another solution would be to
120 * replace things like '% foo' with '& (foo - 1)'.
123 #define TG3_TX_RING_SIZE 512
124 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
126 #define TG3_RX_STD_RING_BYTES(tp) \
127 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
128 #define TG3_RX_JMB_RING_BYTES(tp) \
129 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
130 #define TG3_RX_RCB_RING_BYTES(tp) \
131 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
132 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
134 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
136 #define TG3_RX_DMA_ALIGN 16
137 #define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
139 #define TG3_DMA_BYTE_ENAB 64
141 #define TG3_RX_STD_DMA_SZ 1536
142 #define TG3_RX_JMB_DMA_SZ 9046
144 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
146 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
147 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
149 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
150 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
153 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
155 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
156 * that are at least dword aligned when used in PCIX mode. The driver
157 * works around this bug by double copying the packet. This workaround
158 * is built into the normal double copy length check for efficiency.
160 * However, the double copy is only necessary on those architectures
161 * where unaligned memory accesses are inefficient. For those architectures
162 * where unaligned memory accesses incur little penalty, we can reintegrate
163 * the 5701 in the normal rx path. Doing so saves a device structure
164 * dereference by hardcoding the double copy threshold in place.
166 #define TG3_RX_COPY_THRESHOLD 256
167 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
168 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
170 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
173 /* minimum number of free TX descriptors required to wake up TX process */
174 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
176 #define TG3_RAW_IP_ALIGN 2
178 /* number of ETHTOOL_GSTATS u64's */
179 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
181 #define TG3_NUM_TEST 6
183 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
185 #define FIRMWARE_TG3 "tigon/tg3.bin"
186 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
187 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
189 static char version[] __devinitdata =
190 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
192 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
193 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
194 MODULE_LICENSE("GPL");
195 MODULE_VERSION(DRV_MODULE_VERSION);
196 MODULE_FIRMWARE(FIRMWARE_TG3);
197 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
198 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
200 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
201 module_param(tg3_debug, int, 0);
202 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
204 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
277 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
278 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
279 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
280 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
281 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
282 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
283 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
287 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
289 static const struct {
290 const char string[ETH_GSTRING_LEN];
291 } ethtool_stats_keys[TG3_NUM_STATS] = {
294 { "rx_ucast_packets" },
295 { "rx_mcast_packets" },
296 { "rx_bcast_packets" },
298 { "rx_align_errors" },
299 { "rx_xon_pause_rcvd" },
300 { "rx_xoff_pause_rcvd" },
301 { "rx_mac_ctrl_rcvd" },
302 { "rx_xoff_entered" },
303 { "rx_frame_too_long_errors" },
305 { "rx_undersize_packets" },
306 { "rx_in_length_errors" },
307 { "rx_out_length_errors" },
308 { "rx_64_or_less_octet_packets" },
309 { "rx_65_to_127_octet_packets" },
310 { "rx_128_to_255_octet_packets" },
311 { "rx_256_to_511_octet_packets" },
312 { "rx_512_to_1023_octet_packets" },
313 { "rx_1024_to_1522_octet_packets" },
314 { "rx_1523_to_2047_octet_packets" },
315 { "rx_2048_to_4095_octet_packets" },
316 { "rx_4096_to_8191_octet_packets" },
317 { "rx_8192_to_9022_octet_packets" },
324 { "tx_flow_control" },
326 { "tx_single_collisions" },
327 { "tx_mult_collisions" },
329 { "tx_excessive_collisions" },
330 { "tx_late_collisions" },
331 { "tx_collide_2times" },
332 { "tx_collide_3times" },
333 { "tx_collide_4times" },
334 { "tx_collide_5times" },
335 { "tx_collide_6times" },
336 { "tx_collide_7times" },
337 { "tx_collide_8times" },
338 { "tx_collide_9times" },
339 { "tx_collide_10times" },
340 { "tx_collide_11times" },
341 { "tx_collide_12times" },
342 { "tx_collide_13times" },
343 { "tx_collide_14times" },
344 { "tx_collide_15times" },
345 { "tx_ucast_packets" },
346 { "tx_mcast_packets" },
347 { "tx_bcast_packets" },
348 { "tx_carrier_sense_errors" },
352 { "dma_writeq_full" },
353 { "dma_write_prioq_full" },
357 { "rx_threshold_hit" },
359 { "dma_readq_full" },
360 { "dma_read_prioq_full" },
361 { "tx_comp_queue_full" },
363 { "ring_set_send_prod_index" },
364 { "ring_status_update" },
366 { "nic_avoided_irqs" },
367 { "nic_tx_threshold_hit" }
370 static const struct {
371 const char string[ETH_GSTRING_LEN];
372 } ethtool_test_keys[TG3_NUM_TEST] = {
373 { "nvram test (online) " },
374 { "link test (online) " },
375 { "register test (offline)" },
376 { "memory test (offline)" },
377 { "loopback test (offline)" },
378 { "interrupt test (offline)" },
381 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
383 writel(val, tp->regs + off);
386 static u32 tg3_read32(struct tg3 *tp, u32 off)
388 return readl(tp->regs + off);
391 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
393 writel(val, tp->aperegs + off);
396 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
398 return readl(tp->aperegs + off);
401 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
405 spin_lock_irqsave(&tp->indirect_lock, flags);
406 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
407 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
408 spin_unlock_irqrestore(&tp->indirect_lock, flags);
411 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
413 writel(val, tp->regs + off);
414 readl(tp->regs + off);
417 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
422 spin_lock_irqsave(&tp->indirect_lock, flags);
423 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
424 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
425 spin_unlock_irqrestore(&tp->indirect_lock, flags);
429 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
433 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
434 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
435 TG3_64BIT_REG_LOW, val);
438 if (off == TG3_RX_STD_PROD_IDX_REG) {
439 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
440 TG3_64BIT_REG_LOW, val);
444 spin_lock_irqsave(&tp->indirect_lock, flags);
445 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
447 spin_unlock_irqrestore(&tp->indirect_lock, flags);
449 /* In indirect mode when disabling interrupts, we also need
450 * to clear the interrupt bit in the GRC local ctrl register.
452 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
454 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
455 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
459 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
464 spin_lock_irqsave(&tp->indirect_lock, flags);
465 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
466 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
467 spin_unlock_irqrestore(&tp->indirect_lock, flags);
471 /* usec_wait specifies the wait time in usec when writing to certain registers
472 * where it is unsafe to read back the register without some delay.
473 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
474 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
476 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
478 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
479 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
480 /* Non-posted methods */
481 tp->write32(tp, off, val);
484 tg3_write32(tp, off, val);
489 /* Wait again after the read for the posted method to guarantee that
490 * the wait time is met.
496 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
498 tp->write32_mbox(tp, off, val);
499 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
500 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
501 tp->read32_mbox(tp, off);
504 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
506 void __iomem *mbox = tp->regs + off;
508 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
510 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
514 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
516 return readl(tp->regs + off + GRCMBOX_BASE);
519 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
521 writel(val, tp->regs + off + GRCMBOX_BASE);
524 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
525 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
526 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
527 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
528 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
530 #define tw32(reg, val) tp->write32(tp, reg, val)
531 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
532 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
533 #define tr32(reg) tp->read32(tp, reg)
535 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
539 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
540 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
543 spin_lock_irqsave(&tp->indirect_lock, flags);
544 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
545 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
546 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
548 /* Always leave this as zero. */
549 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
551 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
552 tw32_f(TG3PCI_MEM_WIN_DATA, val);
554 /* Always leave this as zero. */
555 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
557 spin_unlock_irqrestore(&tp->indirect_lock, flags);
560 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
564 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
565 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
570 spin_lock_irqsave(&tp->indirect_lock, flags);
571 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
572 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
573 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
575 /* Always leave this as zero. */
576 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
578 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
579 *val = tr32(TG3PCI_MEM_WIN_DATA);
581 /* Always leave this as zero. */
582 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
584 spin_unlock_irqrestore(&tp->indirect_lock, flags);
587 static void tg3_ape_lock_init(struct tg3 *tp)
592 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
593 regbase = TG3_APE_LOCK_GRANT;
595 regbase = TG3_APE_PER_LOCK_GRANT;
597 /* Make sure the driver hasn't any stale locks. */
598 for (i = 0; i < 8; i++)
599 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
602 static int tg3_ape_lock(struct tg3 *tp, int locknum)
606 u32 status, req, gnt;
608 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
612 case TG3_APE_LOCK_GRC:
613 case TG3_APE_LOCK_MEM:
619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
620 req = TG3_APE_LOCK_REQ;
621 gnt = TG3_APE_LOCK_GRANT;
623 req = TG3_APE_PER_LOCK_REQ;
624 gnt = TG3_APE_PER_LOCK_GRANT;
629 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
631 /* Wait for up to 1 millisecond to acquire lock. */
632 for (i = 0; i < 100; i++) {
633 status = tg3_ape_read32(tp, gnt + off);
634 if (status == APE_LOCK_GRANT_DRIVER)
639 if (status != APE_LOCK_GRANT_DRIVER) {
640 /* Revoke the lock request. */
641 tg3_ape_write32(tp, gnt + off,
642 APE_LOCK_GRANT_DRIVER);
650 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
654 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
658 case TG3_APE_LOCK_GRC:
659 case TG3_APE_LOCK_MEM:
665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
666 gnt = TG3_APE_LOCK_GRANT;
668 gnt = TG3_APE_PER_LOCK_GRANT;
670 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
673 static void tg3_disable_ints(struct tg3 *tp)
677 tw32(TG3PCI_MISC_HOST_CTRL,
678 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
679 for (i = 0; i < tp->irq_max; i++)
680 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
683 static void tg3_enable_ints(struct tg3 *tp)
690 tw32(TG3PCI_MISC_HOST_CTRL,
691 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
693 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
694 for (i = 0; i < tp->irq_cnt; i++) {
695 struct tg3_napi *tnapi = &tp->napi[i];
697 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
698 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
699 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
701 tp->coal_now |= tnapi->coal_now;
704 /* Force an initial interrupt */
705 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
706 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
707 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
709 tw32(HOSTCC_MODE, tp->coal_now);
711 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
714 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
716 struct tg3 *tp = tnapi->tp;
717 struct tg3_hw_status *sblk = tnapi->hw_status;
718 unsigned int work_exists = 0;
720 /* check for phy events */
721 if (!(tp->tg3_flags &
722 (TG3_FLAG_USE_LINKCHG_REG |
723 TG3_FLAG_POLL_SERDES))) {
724 if (sblk->status & SD_STATUS_LINK_CHG)
727 /* check for RX/TX work to do */
728 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
729 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
736 * similar to tg3_enable_ints, but it accurately determines whether there
737 * is new work pending and can return without flushing the PIO write
738 * which reenables interrupts
740 static void tg3_int_reenable(struct tg3_napi *tnapi)
742 struct tg3 *tp = tnapi->tp;
744 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
747 /* When doing tagged status, this work check is unnecessary.
748 * The last_tag we write above tells the chip which piece of
749 * work we've completed.
751 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
753 tw32(HOSTCC_MODE, tp->coalesce_mode |
754 HOSTCC_MODE_ENABLE | tnapi->coal_now);
757 static void tg3_switch_clocks(struct tg3 *tp)
762 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
763 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
766 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
768 orig_clock_ctrl = clock_ctrl;
769 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
770 CLOCK_CTRL_CLKRUN_OENABLE |
772 tp->pci_clock_ctrl = clock_ctrl;
774 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
775 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
776 tw32_wait_f(TG3PCI_CLOCK_CTRL,
777 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
779 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
780 tw32_wait_f(TG3PCI_CLOCK_CTRL,
782 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
784 tw32_wait_f(TG3PCI_CLOCK_CTRL,
785 clock_ctrl | (CLOCK_CTRL_ALTCLK),
788 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
791 #define PHY_BUSY_LOOPS 5000
793 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
801 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
807 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
808 MI_COM_PHY_ADDR_MASK);
809 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
810 MI_COM_REG_ADDR_MASK);
811 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
813 tw32_f(MAC_MI_COM, frame_val);
815 loops = PHY_BUSY_LOOPS;
818 frame_val = tr32(MAC_MI_COM);
820 if ((frame_val & MI_COM_BUSY) == 0) {
822 frame_val = tr32(MAC_MI_COM);
830 *val = frame_val & MI_COM_DATA_MASK;
834 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
835 tw32_f(MAC_MI_MODE, tp->mi_mode);
842 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
848 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
849 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
852 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
854 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
858 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
859 MI_COM_PHY_ADDR_MASK);
860 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
861 MI_COM_REG_ADDR_MASK);
862 frame_val |= (val & MI_COM_DATA_MASK);
863 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
865 tw32_f(MAC_MI_COM, frame_val);
867 loops = PHY_BUSY_LOOPS;
870 frame_val = tr32(MAC_MI_COM);
871 if ((frame_val & MI_COM_BUSY) == 0) {
873 frame_val = tr32(MAC_MI_COM);
883 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
884 tw32_f(MAC_MI_MODE, tp->mi_mode);
891 static int tg3_bmcr_reset(struct tg3 *tp)
896 /* OK, reset it, and poll the BMCR_RESET bit until it
897 * clears or we time out.
899 phy_control = BMCR_RESET;
900 err = tg3_writephy(tp, MII_BMCR, phy_control);
906 err = tg3_readphy(tp, MII_BMCR, &phy_control);
910 if ((phy_control & BMCR_RESET) == 0) {
922 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
924 struct tg3 *tp = bp->priv;
927 spin_lock_bh(&tp->lock);
929 if (tg3_readphy(tp, reg, &val))
932 spin_unlock_bh(&tp->lock);
937 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
939 struct tg3 *tp = bp->priv;
942 spin_lock_bh(&tp->lock);
944 if (tg3_writephy(tp, reg, val))
947 spin_unlock_bh(&tp->lock);
952 static int tg3_mdio_reset(struct mii_bus *bp)
957 static void tg3_mdio_config_5785(struct tg3 *tp)
960 struct phy_device *phydev;
962 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
963 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
964 case PHY_ID_BCM50610:
965 case PHY_ID_BCM50610M:
966 val = MAC_PHYCFG2_50610_LED_MODES;
968 case PHY_ID_BCMAC131:
969 val = MAC_PHYCFG2_AC131_LED_MODES;
971 case PHY_ID_RTL8211C:
972 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
974 case PHY_ID_RTL8201E:
975 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
981 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
982 tw32(MAC_PHYCFG2, val);
984 val = tr32(MAC_PHYCFG1);
985 val &= ~(MAC_PHYCFG1_RGMII_INT |
986 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
987 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
988 tw32(MAC_PHYCFG1, val);
993 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
994 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
995 MAC_PHYCFG2_FMODE_MASK_MASK |
996 MAC_PHYCFG2_GMODE_MASK_MASK |
997 MAC_PHYCFG2_ACT_MASK_MASK |
998 MAC_PHYCFG2_QUAL_MASK_MASK |
999 MAC_PHYCFG2_INBAND_ENABLE;
1001 tw32(MAC_PHYCFG2, val);
1003 val = tr32(MAC_PHYCFG1);
1004 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1005 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1006 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1007 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1008 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1009 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1010 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1012 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1013 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1014 tw32(MAC_PHYCFG1, val);
1016 val = tr32(MAC_EXT_RGMII_MODE);
1017 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1018 MAC_RGMII_MODE_RX_QUALITY |
1019 MAC_RGMII_MODE_RX_ACTIVITY |
1020 MAC_RGMII_MODE_RX_ENG_DET |
1021 MAC_RGMII_MODE_TX_ENABLE |
1022 MAC_RGMII_MODE_TX_LOWPWR |
1023 MAC_RGMII_MODE_TX_RESET);
1024 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1025 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1026 val |= MAC_RGMII_MODE_RX_INT_B |
1027 MAC_RGMII_MODE_RX_QUALITY |
1028 MAC_RGMII_MODE_RX_ACTIVITY |
1029 MAC_RGMII_MODE_RX_ENG_DET;
1030 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1031 val |= MAC_RGMII_MODE_TX_ENABLE |
1032 MAC_RGMII_MODE_TX_LOWPWR |
1033 MAC_RGMII_MODE_TX_RESET;
1035 tw32(MAC_EXT_RGMII_MODE, val);
1038 static void tg3_mdio_start(struct tg3 *tp)
1040 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1041 tw32_f(MAC_MI_MODE, tp->mi_mode);
1044 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1046 tg3_mdio_config_5785(tp);
1049 static int tg3_mdio_init(struct tg3 *tp)
1053 struct phy_device *phydev;
1055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1056 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1059 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1061 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1062 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1064 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1065 TG3_CPMU_PHY_STRAP_IS_SERDES;
1069 tp->phy_addr = TG3_PHY_MII_ADDR;
1073 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1074 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1077 tp->mdio_bus = mdiobus_alloc();
1078 if (tp->mdio_bus == NULL)
1081 tp->mdio_bus->name = "tg3 mdio bus";
1082 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1083 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1084 tp->mdio_bus->priv = tp;
1085 tp->mdio_bus->parent = &tp->pdev->dev;
1086 tp->mdio_bus->read = &tg3_mdio_read;
1087 tp->mdio_bus->write = &tg3_mdio_write;
1088 tp->mdio_bus->reset = &tg3_mdio_reset;
1089 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1090 tp->mdio_bus->irq = &tp->mdio_irq[0];
1092 for (i = 0; i < PHY_MAX_ADDR; i++)
1093 tp->mdio_bus->irq[i] = PHY_POLL;
1095 /* The bus registration will look for all the PHYs on the mdio bus.
1096 * Unfortunately, it does not ensure the PHY is powered up before
1097 * accessing the PHY ID registers. A chip reset is the
1098 * quickest way to bring the device back to an operational state..
1100 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1103 i = mdiobus_register(tp->mdio_bus);
1105 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1106 mdiobus_free(tp->mdio_bus);
1110 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1112 if (!phydev || !phydev->drv) {
1113 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1114 mdiobus_unregister(tp->mdio_bus);
1115 mdiobus_free(tp->mdio_bus);
1119 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1120 case PHY_ID_BCM57780:
1121 phydev->interface = PHY_INTERFACE_MODE_GMII;
1122 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1124 case PHY_ID_BCM50610:
1125 case PHY_ID_BCM50610M:
1126 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1127 PHY_BRCM_RX_REFCLK_UNUSED |
1128 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1129 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1130 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1131 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1132 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1133 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1134 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1135 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1137 case PHY_ID_RTL8211C:
1138 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1140 case PHY_ID_RTL8201E:
1141 case PHY_ID_BCMAC131:
1142 phydev->interface = PHY_INTERFACE_MODE_MII;
1143 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1144 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1148 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1151 tg3_mdio_config_5785(tp);
1156 static void tg3_mdio_fini(struct tg3 *tp)
1158 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1159 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1160 mdiobus_unregister(tp->mdio_bus);
1161 mdiobus_free(tp->mdio_bus);
1165 /* tp->lock is held. */
1166 static inline void tg3_generate_fw_event(struct tg3 *tp)
1170 val = tr32(GRC_RX_CPU_EVENT);
1171 val |= GRC_RX_CPU_DRIVER_EVENT;
1172 tw32_f(GRC_RX_CPU_EVENT, val);
1174 tp->last_event_jiffies = jiffies;
1177 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1179 /* tp->lock is held. */
1180 static void tg3_wait_for_event_ack(struct tg3 *tp)
1183 unsigned int delay_cnt;
1186 /* If enough time has passed, no wait is necessary. */
1187 time_remain = (long)(tp->last_event_jiffies + 1 +
1188 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1190 if (time_remain < 0)
1193 /* Check if we can shorten the wait time. */
1194 delay_cnt = jiffies_to_usecs(time_remain);
1195 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1196 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1197 delay_cnt = (delay_cnt >> 3) + 1;
1199 for (i = 0; i < delay_cnt; i++) {
1200 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1206 /* tp->lock is held. */
1207 static void tg3_ump_link_report(struct tg3 *tp)
1212 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1213 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1216 tg3_wait_for_event_ack(tp);
1218 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1220 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1223 if (!tg3_readphy(tp, MII_BMCR, ®))
1225 if (!tg3_readphy(tp, MII_BMSR, ®))
1226 val |= (reg & 0xffff);
1227 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1230 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1232 if (!tg3_readphy(tp, MII_LPA, ®))
1233 val |= (reg & 0xffff);
1234 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1237 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1238 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1240 if (!tg3_readphy(tp, MII_STAT1000, ®))
1241 val |= (reg & 0xffff);
1243 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1245 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1249 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1251 tg3_generate_fw_event(tp);
1254 static void tg3_link_report(struct tg3 *tp)
1256 if (!netif_carrier_ok(tp->dev)) {
1257 netif_info(tp, link, tp->dev, "Link is down\n");
1258 tg3_ump_link_report(tp);
1259 } else if (netif_msg_link(tp)) {
1260 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1261 (tp->link_config.active_speed == SPEED_1000 ?
1263 (tp->link_config.active_speed == SPEED_100 ?
1265 (tp->link_config.active_duplex == DUPLEX_FULL ?
1268 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1269 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1271 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1273 tg3_ump_link_report(tp);
1277 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1281 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1282 miireg = ADVERTISE_PAUSE_CAP;
1283 else if (flow_ctrl & FLOW_CTRL_TX)
1284 miireg = ADVERTISE_PAUSE_ASYM;
1285 else if (flow_ctrl & FLOW_CTRL_RX)
1286 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1293 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1297 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1298 miireg = ADVERTISE_1000XPAUSE;
1299 else if (flow_ctrl & FLOW_CTRL_TX)
1300 miireg = ADVERTISE_1000XPSE_ASYM;
1301 else if (flow_ctrl & FLOW_CTRL_RX)
1302 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1309 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1313 if (lcladv & ADVERTISE_1000XPAUSE) {
1314 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1315 if (rmtadv & LPA_1000XPAUSE)
1316 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1317 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1320 if (rmtadv & LPA_1000XPAUSE)
1321 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1323 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1324 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1331 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1335 u32 old_rx_mode = tp->rx_mode;
1336 u32 old_tx_mode = tp->tx_mode;
1338 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1339 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1341 autoneg = tp->link_config.autoneg;
1343 if (autoneg == AUTONEG_ENABLE &&
1344 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1345 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1346 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1348 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1350 flowctrl = tp->link_config.flowctrl;
1352 tp->link_config.active_flowctrl = flowctrl;
1354 if (flowctrl & FLOW_CTRL_RX)
1355 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1357 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1359 if (old_rx_mode != tp->rx_mode)
1360 tw32_f(MAC_RX_MODE, tp->rx_mode);
1362 if (flowctrl & FLOW_CTRL_TX)
1363 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1365 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1367 if (old_tx_mode != tp->tx_mode)
1368 tw32_f(MAC_TX_MODE, tp->tx_mode);
1371 static void tg3_adjust_link(struct net_device *dev)
1373 u8 oldflowctrl, linkmesg = 0;
1374 u32 mac_mode, lcl_adv, rmt_adv;
1375 struct tg3 *tp = netdev_priv(dev);
1376 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1378 spin_lock_bh(&tp->lock);
1380 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1381 MAC_MODE_HALF_DUPLEX);
1383 oldflowctrl = tp->link_config.active_flowctrl;
1389 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1390 mac_mode |= MAC_MODE_PORT_MODE_MII;
1391 else if (phydev->speed == SPEED_1000 ||
1392 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1393 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1395 mac_mode |= MAC_MODE_PORT_MODE_MII;
1397 if (phydev->duplex == DUPLEX_HALF)
1398 mac_mode |= MAC_MODE_HALF_DUPLEX;
1400 lcl_adv = tg3_advert_flowctrl_1000T(
1401 tp->link_config.flowctrl);
1404 rmt_adv = LPA_PAUSE_CAP;
1405 if (phydev->asym_pause)
1406 rmt_adv |= LPA_PAUSE_ASYM;
1409 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1411 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1413 if (mac_mode != tp->mac_mode) {
1414 tp->mac_mode = mac_mode;
1415 tw32_f(MAC_MODE, tp->mac_mode);
1419 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1420 if (phydev->speed == SPEED_10)
1422 MAC_MI_STAT_10MBPS_MODE |
1423 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1425 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1428 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1429 tw32(MAC_TX_LENGTHS,
1430 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1431 (6 << TX_LENGTHS_IPG_SHIFT) |
1432 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1434 tw32(MAC_TX_LENGTHS,
1435 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1436 (6 << TX_LENGTHS_IPG_SHIFT) |
1437 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1439 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1440 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1441 phydev->speed != tp->link_config.active_speed ||
1442 phydev->duplex != tp->link_config.active_duplex ||
1443 oldflowctrl != tp->link_config.active_flowctrl)
1446 tp->link_config.active_speed = phydev->speed;
1447 tp->link_config.active_duplex = phydev->duplex;
1449 spin_unlock_bh(&tp->lock);
1452 tg3_link_report(tp);
1455 static int tg3_phy_init(struct tg3 *tp)
1457 struct phy_device *phydev;
1459 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1462 /* Bring the PHY back to a known state. */
1465 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1467 /* Attach the MAC to the PHY. */
1468 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1469 phydev->dev_flags, phydev->interface);
1470 if (IS_ERR(phydev)) {
1471 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1472 return PTR_ERR(phydev);
1475 /* Mask with MAC supported features. */
1476 switch (phydev->interface) {
1477 case PHY_INTERFACE_MODE_GMII:
1478 case PHY_INTERFACE_MODE_RGMII:
1479 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1480 phydev->supported &= (PHY_GBIT_FEATURES |
1482 SUPPORTED_Asym_Pause);
1486 case PHY_INTERFACE_MODE_MII:
1487 phydev->supported &= (PHY_BASIC_FEATURES |
1489 SUPPORTED_Asym_Pause);
1492 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1496 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1498 phydev->advertising = phydev->supported;
1503 static void tg3_phy_start(struct tg3 *tp)
1505 struct phy_device *phydev;
1507 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1510 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1512 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1513 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1514 phydev->speed = tp->link_config.orig_speed;
1515 phydev->duplex = tp->link_config.orig_duplex;
1516 phydev->autoneg = tp->link_config.orig_autoneg;
1517 phydev->advertising = tp->link_config.orig_advertising;
1522 phy_start_aneg(phydev);
1525 static void tg3_phy_stop(struct tg3 *tp)
1527 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1530 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1533 static void tg3_phy_fini(struct tg3 *tp)
1535 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1536 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1537 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1541 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1545 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1547 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1552 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1556 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1559 tg3_writephy(tp, MII_TG3_FET_TEST,
1560 phytest | MII_TG3_FET_SHADOW_EN);
1561 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1563 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1565 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1566 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1568 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1572 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1576 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1577 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1578 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1579 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1582 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1583 tg3_phy_fet_toggle_apd(tp, enable);
1587 reg = MII_TG3_MISC_SHDW_WREN |
1588 MII_TG3_MISC_SHDW_SCR5_SEL |
1589 MII_TG3_MISC_SHDW_SCR5_LPED |
1590 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1591 MII_TG3_MISC_SHDW_SCR5_SDTL |
1592 MII_TG3_MISC_SHDW_SCR5_C125OE;
1593 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1594 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1596 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1599 reg = MII_TG3_MISC_SHDW_WREN |
1600 MII_TG3_MISC_SHDW_APD_SEL |
1601 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1603 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1605 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1608 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1612 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1613 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1616 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1619 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1620 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1622 tg3_writephy(tp, MII_TG3_FET_TEST,
1623 ephy | MII_TG3_FET_SHADOW_EN);
1624 if (!tg3_readphy(tp, reg, &phy)) {
1626 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1628 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1629 tg3_writephy(tp, reg, phy);
1631 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1634 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1635 MII_TG3_AUXCTL_SHDWSEL_MISC;
1636 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1637 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1639 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1641 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1642 phy |= MII_TG3_AUXCTL_MISC_WREN;
1643 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1648 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1652 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1655 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1656 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1657 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1658 (val | (1 << 15) | (1 << 4)));
1661 static void tg3_phy_apply_otp(struct tg3 *tp)
1670 /* Enable SM_DSP clock and tx 6dB coding. */
1671 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1672 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1673 MII_TG3_AUXCTL_ACTL_TX_6DB;
1674 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1676 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1677 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1678 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1680 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1681 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1682 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1684 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1685 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1686 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1688 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1689 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1691 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1692 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1694 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1695 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1696 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1698 /* Turn off SM_DSP clock. */
1699 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1700 MII_TG3_AUXCTL_ACTL_TX_6DB;
1701 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1704 static int tg3_wait_macro_done(struct tg3 *tp)
1711 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1712 if ((tmp32 & 0x1000) == 0)
1722 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1724 static const u32 test_pat[4][6] = {
1725 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1726 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1727 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1728 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1732 for (chan = 0; chan < 4; chan++) {
1735 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1736 (chan * 0x2000) | 0x0200);
1737 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1739 for (i = 0; i < 6; i++)
1740 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1743 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1744 if (tg3_wait_macro_done(tp)) {
1749 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1750 (chan * 0x2000) | 0x0200);
1751 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1752 if (tg3_wait_macro_done(tp)) {
1757 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1758 if (tg3_wait_macro_done(tp)) {
1763 for (i = 0; i < 6; i += 2) {
1766 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1767 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1768 tg3_wait_macro_done(tp)) {
1774 if (low != test_pat[chan][i] ||
1775 high != test_pat[chan][i+1]) {
1776 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1777 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1778 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1788 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1792 for (chan = 0; chan < 4; chan++) {
1795 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1796 (chan * 0x2000) | 0x0200);
1797 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1798 for (i = 0; i < 6; i++)
1799 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1800 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1801 if (tg3_wait_macro_done(tp))
1808 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1810 u32 reg32, phy9_orig;
1811 int retries, do_phy_reset, err;
1817 err = tg3_bmcr_reset(tp);
1823 /* Disable transmitter and interrupt. */
1824 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1828 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1830 /* Set full-duplex, 1000 mbps. */
1831 tg3_writephy(tp, MII_BMCR,
1832 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1834 /* Set to master mode. */
1835 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1838 tg3_writephy(tp, MII_TG3_CTRL,
1839 (MII_TG3_CTRL_AS_MASTER |
1840 MII_TG3_CTRL_ENABLE_AS_MASTER));
1842 /* Enable SM_DSP_CLOCK and 6dB. */
1843 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1845 /* Block the PHY control access. */
1846 tg3_phydsp_write(tp, 0x8005, 0x0800);
1848 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1851 } while (--retries);
1853 err = tg3_phy_reset_chanpat(tp);
1857 tg3_phydsp_write(tp, 0x8005, 0x0000);
1859 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1860 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1862 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1863 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1864 /* Set Extended packet length bit for jumbo frames */
1865 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1867 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1870 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1872 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1874 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1881 /* This will reset the tigon3 PHY if there is no valid
1882 * link unless the FORCE argument is non-zero.
1884 static int tg3_phy_reset(struct tg3 *tp)
1889 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1890 val = tr32(GRC_MISC_CFG);
1891 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1894 err = tg3_readphy(tp, MII_BMSR, &val);
1895 err |= tg3_readphy(tp, MII_BMSR, &val);
1899 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1900 netif_carrier_off(tp->dev);
1901 tg3_link_report(tp);
1904 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1905 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1907 err = tg3_phy_reset_5703_4_5(tp);
1914 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1915 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1916 cpmuctrl = tr32(TG3_CPMU_CTRL);
1917 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1919 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1922 err = tg3_bmcr_reset(tp);
1926 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1927 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1928 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
1930 tw32(TG3_CPMU_CTRL, cpmuctrl);
1933 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1934 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1935 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1936 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1937 CPMU_LSPD_1000MB_MACCLK_12_5) {
1938 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1940 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1944 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1945 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1946 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
1949 tg3_phy_apply_otp(tp);
1951 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
1952 tg3_phy_toggle_apd(tp, true);
1954 tg3_phy_toggle_apd(tp, false);
1957 if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
1958 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1959 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
1960 tg3_phydsp_write(tp, 0x000a, 0x0323);
1961 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1963 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
1964 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1965 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1967 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
1968 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1969 tg3_phydsp_write(tp, 0x000a, 0x310b);
1970 tg3_phydsp_write(tp, 0x201f, 0x9506);
1971 tg3_phydsp_write(tp, 0x401f, 0x14e2);
1972 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1973 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
1974 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1975 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1976 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
1977 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1978 tg3_writephy(tp, MII_TG3_TEST1,
1979 MII_TG3_TEST1_TRIM_EN | 0x4);
1981 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1982 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1984 /* Set Extended packet length bit (bit 14) on all chips that */
1985 /* support jumbo frames */
1986 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1987 /* Cannot do read-modify-write on 5401 */
1988 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1989 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1990 /* Set bit 14 with read-modify-write to preserve other bits */
1991 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1992 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1993 tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000);
1996 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1997 * jumbo frames transmission.
1999 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2000 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2001 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2002 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2005 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2006 /* adjust output voltage */
2007 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2010 tg3_phy_toggle_automdix(tp, 1);
2011 tg3_phy_set_wirespeed(tp);
2015 static void tg3_frob_aux_power(struct tg3 *tp)
2017 struct tg3 *tp_peer = tp;
2019 /* The GPIOs do something completely different on 57765. */
2020 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2021 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2022 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2026 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2027 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2028 struct net_device *dev_peer;
2030 dev_peer = pci_get_drvdata(tp->pdev_peer);
2031 /* remove_one() may have been run on the peer. */
2035 tp_peer = netdev_priv(dev_peer);
2038 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2039 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2040 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2041 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2042 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2043 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2044 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2045 (GRC_LCLCTRL_GPIO_OE0 |
2046 GRC_LCLCTRL_GPIO_OE1 |
2047 GRC_LCLCTRL_GPIO_OE2 |
2048 GRC_LCLCTRL_GPIO_OUTPUT0 |
2049 GRC_LCLCTRL_GPIO_OUTPUT1),
2051 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2053 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2054 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2055 GRC_LCLCTRL_GPIO_OE1 |
2056 GRC_LCLCTRL_GPIO_OE2 |
2057 GRC_LCLCTRL_GPIO_OUTPUT0 |
2058 GRC_LCLCTRL_GPIO_OUTPUT1 |
2060 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2062 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2063 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2065 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2066 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2069 u32 grc_local_ctrl = 0;
2071 if (tp_peer != tp &&
2072 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2075 /* Workaround to prevent overdrawing Amps. */
2076 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2078 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2079 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2080 grc_local_ctrl, 100);
2083 /* On 5753 and variants, GPIO2 cannot be used. */
2084 no_gpio2 = tp->nic_sram_data_cfg &
2085 NIC_SRAM_DATA_CFG_NO_GPIO2;
2087 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2088 GRC_LCLCTRL_GPIO_OE1 |
2089 GRC_LCLCTRL_GPIO_OE2 |
2090 GRC_LCLCTRL_GPIO_OUTPUT1 |
2091 GRC_LCLCTRL_GPIO_OUTPUT2;
2093 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2094 GRC_LCLCTRL_GPIO_OUTPUT2);
2096 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2097 grc_local_ctrl, 100);
2099 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2101 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2102 grc_local_ctrl, 100);
2105 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2106 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2107 grc_local_ctrl, 100);
2111 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2112 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2113 if (tp_peer != tp &&
2114 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2117 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2118 (GRC_LCLCTRL_GPIO_OE1 |
2119 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2121 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2122 GRC_LCLCTRL_GPIO_OE1, 100);
2124 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2125 (GRC_LCLCTRL_GPIO_OE1 |
2126 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2131 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2133 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2135 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2136 if (speed != SPEED_10)
2138 } else if (speed == SPEED_10)
2144 static int tg3_setup_phy(struct tg3 *, int);
2146 #define RESET_KIND_SHUTDOWN 0
2147 #define RESET_KIND_INIT 1
2148 #define RESET_KIND_SUSPEND 2
2150 static void tg3_write_sig_post_reset(struct tg3 *, int);
2151 static int tg3_halt_cpu(struct tg3 *, u32);
2153 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2157 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2158 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2159 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2160 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2163 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2164 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2165 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2172 val = tr32(GRC_MISC_CFG);
2173 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2176 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2178 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2181 tg3_writephy(tp, MII_ADVERTISE, 0);
2182 tg3_writephy(tp, MII_BMCR,
2183 BMCR_ANENABLE | BMCR_ANRESTART);
2185 tg3_writephy(tp, MII_TG3_FET_TEST,
2186 phytest | MII_TG3_FET_SHADOW_EN);
2187 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2188 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2190 MII_TG3_FET_SHDW_AUXMODE4,
2193 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2196 } else if (do_low_power) {
2197 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2198 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2200 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2201 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2202 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2203 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2204 MII_TG3_AUXCTL_PCTL_VREG_11V);
2207 /* The PHY should not be powered down on some chips because
2210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2211 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2212 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2213 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2216 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2217 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2218 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2219 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2220 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2221 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2224 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2227 /* tp->lock is held. */
2228 static int tg3_nvram_lock(struct tg3 *tp)
2230 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2233 if (tp->nvram_lock_cnt == 0) {
2234 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2235 for (i = 0; i < 8000; i++) {
2236 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2241 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2245 tp->nvram_lock_cnt++;
2250 /* tp->lock is held. */
2251 static void tg3_nvram_unlock(struct tg3 *tp)
2253 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2254 if (tp->nvram_lock_cnt > 0)
2255 tp->nvram_lock_cnt--;
2256 if (tp->nvram_lock_cnt == 0)
2257 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2261 /* tp->lock is held. */
2262 static void tg3_enable_nvram_access(struct tg3 *tp)
2264 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2265 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2266 u32 nvaccess = tr32(NVRAM_ACCESS);
2268 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2272 /* tp->lock is held. */
2273 static void tg3_disable_nvram_access(struct tg3 *tp)
2275 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2276 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2277 u32 nvaccess = tr32(NVRAM_ACCESS);
2279 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2283 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2284 u32 offset, u32 *val)
2289 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2292 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2293 EEPROM_ADDR_DEVID_MASK |
2295 tw32(GRC_EEPROM_ADDR,
2297 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2298 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2299 EEPROM_ADDR_ADDR_MASK) |
2300 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2302 for (i = 0; i < 1000; i++) {
2303 tmp = tr32(GRC_EEPROM_ADDR);
2305 if (tmp & EEPROM_ADDR_COMPLETE)
2309 if (!(tmp & EEPROM_ADDR_COMPLETE))
2312 tmp = tr32(GRC_EEPROM_DATA);
2315 * The data will always be opposite the native endian
2316 * format. Perform a blind byteswap to compensate.
2323 #define NVRAM_CMD_TIMEOUT 10000
2325 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2329 tw32(NVRAM_CMD, nvram_cmd);
2330 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2332 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2338 if (i == NVRAM_CMD_TIMEOUT)
2344 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2346 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2347 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2348 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2349 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2350 (tp->nvram_jedecnum == JEDEC_ATMEL))
2352 addr = ((addr / tp->nvram_pagesize) <<
2353 ATMEL_AT45DB0X1B_PAGE_POS) +
2354 (addr % tp->nvram_pagesize);
2359 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2361 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2362 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2363 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2364 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2365 (tp->nvram_jedecnum == JEDEC_ATMEL))
2367 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2368 tp->nvram_pagesize) +
2369 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2374 /* NOTE: Data read in from NVRAM is byteswapped according to
2375 * the byteswapping settings for all other register accesses.
2376 * tg3 devices are BE devices, so on a BE machine, the data
2377 * returned will be exactly as it is seen in NVRAM. On a LE
2378 * machine, the 32-bit value will be byteswapped.
2380 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2384 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2385 return tg3_nvram_read_using_eeprom(tp, offset, val);
2387 offset = tg3_nvram_phys_addr(tp, offset);
2389 if (offset > NVRAM_ADDR_MSK)
2392 ret = tg3_nvram_lock(tp);
2396 tg3_enable_nvram_access(tp);
2398 tw32(NVRAM_ADDR, offset);
2399 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2400 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2403 *val = tr32(NVRAM_RDDATA);
2405 tg3_disable_nvram_access(tp);
2407 tg3_nvram_unlock(tp);
2412 /* Ensures NVRAM data is in bytestream format. */
2413 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2416 int res = tg3_nvram_read(tp, offset, &v);
2418 *val = cpu_to_be32(v);
2422 /* tp->lock is held. */
2423 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2425 u32 addr_high, addr_low;
2428 addr_high = ((tp->dev->dev_addr[0] << 8) |
2429 tp->dev->dev_addr[1]);
2430 addr_low = ((tp->dev->dev_addr[2] << 24) |
2431 (tp->dev->dev_addr[3] << 16) |
2432 (tp->dev->dev_addr[4] << 8) |
2433 (tp->dev->dev_addr[5] << 0));
2434 for (i = 0; i < 4; i++) {
2435 if (i == 1 && skip_mac_1)
2437 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2438 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2441 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2443 for (i = 0; i < 12; i++) {
2444 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2445 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2449 addr_high = (tp->dev->dev_addr[0] +
2450 tp->dev->dev_addr[1] +
2451 tp->dev->dev_addr[2] +
2452 tp->dev->dev_addr[3] +
2453 tp->dev->dev_addr[4] +
2454 tp->dev->dev_addr[5]) &
2455 TX_BACKOFF_SEED_MASK;
2456 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2459 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2462 bool device_should_wake, do_low_power;
2464 /* Make sure register accesses (indirect or otherwise)
2465 * will function correctly.
2467 pci_write_config_dword(tp->pdev,
2468 TG3PCI_MISC_HOST_CTRL,
2469 tp->misc_host_ctrl);
2473 pci_enable_wake(tp->pdev, state, false);
2474 pci_set_power_state(tp->pdev, PCI_D0);
2476 /* Switch out of Vaux if it is a NIC */
2477 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2478 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2488 netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
2493 /* Restore the CLKREQ setting. */
2494 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2497 pci_read_config_word(tp->pdev,
2498 tp->pcie_cap + PCI_EXP_LNKCTL,
2500 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2501 pci_write_config_word(tp->pdev,
2502 tp->pcie_cap + PCI_EXP_LNKCTL,
2506 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2507 tw32(TG3PCI_MISC_HOST_CTRL,
2508 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2510 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2511 device_may_wakeup(&tp->pdev->dev) &&
2512 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2514 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2515 do_low_power = false;
2516 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2517 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2518 struct phy_device *phydev;
2519 u32 phyid, advertising;
2521 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2523 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2525 tp->link_config.orig_speed = phydev->speed;
2526 tp->link_config.orig_duplex = phydev->duplex;
2527 tp->link_config.orig_autoneg = phydev->autoneg;
2528 tp->link_config.orig_advertising = phydev->advertising;
2530 advertising = ADVERTISED_TP |
2532 ADVERTISED_Autoneg |
2533 ADVERTISED_10baseT_Half;
2535 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2536 device_should_wake) {
2537 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2539 ADVERTISED_100baseT_Half |
2540 ADVERTISED_100baseT_Full |
2541 ADVERTISED_10baseT_Full;
2543 advertising |= ADVERTISED_10baseT_Full;
2546 phydev->advertising = advertising;
2548 phy_start_aneg(phydev);
2550 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2551 if (phyid != PHY_ID_BCMAC131) {
2552 phyid &= PHY_BCM_OUI_MASK;
2553 if (phyid == PHY_BCM_OUI_1 ||
2554 phyid == PHY_BCM_OUI_2 ||
2555 phyid == PHY_BCM_OUI_3)
2556 do_low_power = true;
2560 do_low_power = true;
2562 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2563 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2564 tp->link_config.orig_speed = tp->link_config.speed;
2565 tp->link_config.orig_duplex = tp->link_config.duplex;
2566 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2569 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2570 tp->link_config.speed = SPEED_10;
2571 tp->link_config.duplex = DUPLEX_HALF;
2572 tp->link_config.autoneg = AUTONEG_ENABLE;
2573 tg3_setup_phy(tp, 0);
2577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2580 val = tr32(GRC_VCPU_EXT_CTRL);
2581 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2582 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2586 for (i = 0; i < 200; i++) {
2587 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2588 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2593 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2594 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2595 WOL_DRV_STATE_SHUTDOWN |
2599 if (device_should_wake) {
2602 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2604 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2608 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2609 mac_mode = MAC_MODE_PORT_MODE_GMII;
2611 mac_mode = MAC_MODE_PORT_MODE_MII;
2613 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2614 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2616 u32 speed = (tp->tg3_flags &
2617 TG3_FLAG_WOL_SPEED_100MB) ?
2618 SPEED_100 : SPEED_10;
2619 if (tg3_5700_link_polarity(tp, speed))
2620 mac_mode |= MAC_MODE_LINK_POLARITY;
2622 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2625 mac_mode = MAC_MODE_PORT_MODE_TBI;
2628 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2629 tw32(MAC_LED_CTRL, tp->led_ctrl);
2631 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2632 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2633 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2634 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2635 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2636 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2638 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2639 mac_mode |= tp->mac_mode &
2640 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2641 if (mac_mode & MAC_MODE_APE_TX_EN)
2642 mac_mode |= MAC_MODE_TDE_ENABLE;
2645 tw32_f(MAC_MODE, mac_mode);
2648 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2652 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2653 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2654 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2657 base_val = tp->pci_clock_ctrl;
2658 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2659 CLOCK_CTRL_TXCLK_DISABLE);
2661 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2662 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2663 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2664 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2665 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2667 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2668 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2669 u32 newbits1, newbits2;
2671 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2672 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2673 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2674 CLOCK_CTRL_TXCLK_DISABLE |
2676 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2677 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2678 newbits1 = CLOCK_CTRL_625_CORE;
2679 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2681 newbits1 = CLOCK_CTRL_ALTCLK;
2682 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2685 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2688 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2691 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2695 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2696 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2697 CLOCK_CTRL_TXCLK_DISABLE |
2698 CLOCK_CTRL_44MHZ_CORE);
2700 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2703 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2704 tp->pci_clock_ctrl | newbits3, 40);
2708 if (!(device_should_wake) &&
2709 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2710 tg3_power_down_phy(tp, do_low_power);
2712 tg3_frob_aux_power(tp);
2714 /* Workaround for unstable PLL clock */
2715 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2716 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2717 u32 val = tr32(0x7d00);
2719 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2721 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2724 err = tg3_nvram_lock(tp);
2725 tg3_halt_cpu(tp, RX_CPU_BASE);
2727 tg3_nvram_unlock(tp);
2731 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2733 if (device_should_wake)
2734 pci_enable_wake(tp->pdev, state, true);
2736 /* Finally, set the new power state. */
2737 pci_set_power_state(tp->pdev, state);
2742 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2744 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2745 case MII_TG3_AUX_STAT_10HALF:
2747 *duplex = DUPLEX_HALF;
2750 case MII_TG3_AUX_STAT_10FULL:
2752 *duplex = DUPLEX_FULL;
2755 case MII_TG3_AUX_STAT_100HALF:
2757 *duplex = DUPLEX_HALF;
2760 case MII_TG3_AUX_STAT_100FULL:
2762 *duplex = DUPLEX_FULL;
2765 case MII_TG3_AUX_STAT_1000HALF:
2766 *speed = SPEED_1000;
2767 *duplex = DUPLEX_HALF;
2770 case MII_TG3_AUX_STAT_1000FULL:
2771 *speed = SPEED_1000;
2772 *duplex = DUPLEX_FULL;
2776 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2777 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2779 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2783 *speed = SPEED_INVALID;
2784 *duplex = DUPLEX_INVALID;
2789 static void tg3_phy_copper_begin(struct tg3 *tp)
2794 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2795 /* Entering low power mode. Disable gigabit and
2796 * 100baseT advertisements.
2798 tg3_writephy(tp, MII_TG3_CTRL, 0);
2800 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2801 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2802 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2803 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2805 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2806 } else if (tp->link_config.speed == SPEED_INVALID) {
2807 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2808 tp->link_config.advertising &=
2809 ~(ADVERTISED_1000baseT_Half |
2810 ADVERTISED_1000baseT_Full);
2812 new_adv = ADVERTISE_CSMA;
2813 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2814 new_adv |= ADVERTISE_10HALF;
2815 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2816 new_adv |= ADVERTISE_10FULL;
2817 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2818 new_adv |= ADVERTISE_100HALF;
2819 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2820 new_adv |= ADVERTISE_100FULL;
2822 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2824 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2826 if (tp->link_config.advertising &
2827 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2829 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2830 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2831 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2832 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2833 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2834 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2835 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2836 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2837 MII_TG3_CTRL_ENABLE_AS_MASTER);
2838 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2840 tg3_writephy(tp, MII_TG3_CTRL, 0);
2843 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2844 new_adv |= ADVERTISE_CSMA;
2846 /* Asking for a specific link mode. */
2847 if (tp->link_config.speed == SPEED_1000) {
2848 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2850 if (tp->link_config.duplex == DUPLEX_FULL)
2851 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2853 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2854 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2855 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2856 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2857 MII_TG3_CTRL_ENABLE_AS_MASTER);
2859 if (tp->link_config.speed == SPEED_100) {
2860 if (tp->link_config.duplex == DUPLEX_FULL)
2861 new_adv |= ADVERTISE_100FULL;
2863 new_adv |= ADVERTISE_100HALF;
2865 if (tp->link_config.duplex == DUPLEX_FULL)
2866 new_adv |= ADVERTISE_10FULL;
2868 new_adv |= ADVERTISE_10HALF;
2870 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2875 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2878 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2879 tp->link_config.speed != SPEED_INVALID) {
2880 u32 bmcr, orig_bmcr;
2882 tp->link_config.active_speed = tp->link_config.speed;
2883 tp->link_config.active_duplex = tp->link_config.duplex;
2886 switch (tp->link_config.speed) {
2892 bmcr |= BMCR_SPEED100;
2896 bmcr |= TG3_BMCR_SPEED1000;
2900 if (tp->link_config.duplex == DUPLEX_FULL)
2901 bmcr |= BMCR_FULLDPLX;
2903 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2904 (bmcr != orig_bmcr)) {
2905 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2906 for (i = 0; i < 1500; i++) {
2910 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2911 tg3_readphy(tp, MII_BMSR, &tmp))
2913 if (!(tmp & BMSR_LSTATUS)) {
2918 tg3_writephy(tp, MII_BMCR, bmcr);
2922 tg3_writephy(tp, MII_BMCR,
2923 BMCR_ANENABLE | BMCR_ANRESTART);
2927 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2931 /* Turn off tap power management. */
2932 /* Set Extended packet length bit */
2933 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2935 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
2936 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
2937 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
2938 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
2939 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
2946 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2948 u32 adv_reg, all_mask = 0;
2950 if (mask & ADVERTISED_10baseT_Half)
2951 all_mask |= ADVERTISE_10HALF;
2952 if (mask & ADVERTISED_10baseT_Full)
2953 all_mask |= ADVERTISE_10FULL;
2954 if (mask & ADVERTISED_100baseT_Half)
2955 all_mask |= ADVERTISE_100HALF;
2956 if (mask & ADVERTISED_100baseT_Full)
2957 all_mask |= ADVERTISE_100FULL;
2959 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2962 if ((adv_reg & all_mask) != all_mask)
2964 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2968 if (mask & ADVERTISED_1000baseT_Half)
2969 all_mask |= ADVERTISE_1000HALF;
2970 if (mask & ADVERTISED_1000baseT_Full)
2971 all_mask |= ADVERTISE_1000FULL;
2973 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2976 if ((tg3_ctrl & all_mask) != all_mask)
2982 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2986 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2989 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2990 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2992 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2993 if (curadv != reqadv)
2996 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2997 tg3_readphy(tp, MII_LPA, rmtadv);
2999 /* Reprogram the advertisement register, even if it
3000 * does not affect the current link. If the link
3001 * gets renegotiated in the future, we can save an
3002 * additional renegotiation cycle by advertising
3003 * it correctly in the first place.
3005 if (curadv != reqadv) {
3006 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3007 ADVERTISE_PAUSE_ASYM);
3008 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3015 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3017 int current_link_up;
3019 u32 lcl_adv, rmt_adv;
3027 (MAC_STATUS_SYNC_CHANGED |
3028 MAC_STATUS_CFG_CHANGED |
3029 MAC_STATUS_MI_COMPLETION |
3030 MAC_STATUS_LNKSTATE_CHANGED));
3033 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3035 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3039 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3041 /* Some third-party PHYs need to be reset on link going
3044 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3047 netif_carrier_ok(tp->dev)) {
3048 tg3_readphy(tp, MII_BMSR, &bmsr);
3049 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3050 !(bmsr & BMSR_LSTATUS))
3056 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3057 tg3_readphy(tp, MII_BMSR, &bmsr);
3058 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3059 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3062 if (!(bmsr & BMSR_LSTATUS)) {
3063 err = tg3_init_5401phy_dsp(tp);
3067 tg3_readphy(tp, MII_BMSR, &bmsr);
3068 for (i = 0; i < 1000; i++) {
3070 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3071 (bmsr & BMSR_LSTATUS)) {
3077 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3078 TG3_PHY_REV_BCM5401_B0 &&
3079 !(bmsr & BMSR_LSTATUS) &&
3080 tp->link_config.active_speed == SPEED_1000) {
3081 err = tg3_phy_reset(tp);
3083 err = tg3_init_5401phy_dsp(tp);
3088 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3089 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3090 /* 5701 {A0,B0} CRC bug workaround */
3091 tg3_writephy(tp, 0x15, 0x0a75);
3092 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3093 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3094 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3097 /* Clear pending interrupts... */
3098 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3099 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3101 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3102 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3103 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3104 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3107 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3108 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3109 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3110 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3112 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3115 current_link_up = 0;
3116 current_speed = SPEED_INVALID;
3117 current_duplex = DUPLEX_INVALID;
3119 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3120 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3121 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3122 if (!(val & (1 << 10))) {
3124 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3130 for (i = 0; i < 100; i++) {
3131 tg3_readphy(tp, MII_BMSR, &bmsr);
3132 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3133 (bmsr & BMSR_LSTATUS))
3138 if (bmsr & BMSR_LSTATUS) {
3141 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3142 for (i = 0; i < 2000; i++) {
3144 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3149 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3154 for (i = 0; i < 200; i++) {
3155 tg3_readphy(tp, MII_BMCR, &bmcr);
3156 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3158 if (bmcr && bmcr != 0x7fff)
3166 tp->link_config.active_speed = current_speed;
3167 tp->link_config.active_duplex = current_duplex;
3169 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3170 if ((bmcr & BMCR_ANENABLE) &&
3171 tg3_copper_is_advertising_all(tp,
3172 tp->link_config.advertising)) {
3173 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3175 current_link_up = 1;
3178 if (!(bmcr & BMCR_ANENABLE) &&
3179 tp->link_config.speed == current_speed &&
3180 tp->link_config.duplex == current_duplex &&
3181 tp->link_config.flowctrl ==
3182 tp->link_config.active_flowctrl) {
3183 current_link_up = 1;
3187 if (current_link_up == 1 &&
3188 tp->link_config.active_duplex == DUPLEX_FULL)
3189 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3193 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3194 tg3_phy_copper_begin(tp);
3196 tg3_readphy(tp, MII_BMSR, &bmsr);
3197 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3198 (bmsr & BMSR_LSTATUS))
3199 current_link_up = 1;
3202 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3203 if (current_link_up == 1) {
3204 if (tp->link_config.active_speed == SPEED_100 ||
3205 tp->link_config.active_speed == SPEED_10)
3206 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3208 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3209 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3210 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3212 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3214 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3215 if (tp->link_config.active_duplex == DUPLEX_HALF)
3216 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3218 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3219 if (current_link_up == 1 &&
3220 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3221 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3223 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3226 /* ??? Without this setting Netgear GA302T PHY does not
3227 * ??? send/receive packets...
3229 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3230 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3231 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3232 tw32_f(MAC_MI_MODE, tp->mi_mode);
3236 tw32_f(MAC_MODE, tp->mac_mode);
3239 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3240 /* Polled via timer. */
3241 tw32_f(MAC_EVENT, 0);
3243 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3247 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3248 current_link_up == 1 &&
3249 tp->link_config.active_speed == SPEED_1000 &&
3250 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3251 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3254 (MAC_STATUS_SYNC_CHANGED |
3255 MAC_STATUS_CFG_CHANGED));
3258 NIC_SRAM_FIRMWARE_MBOX,
3259 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3262 /* Prevent send BD corruption. */
3263 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3264 u16 oldlnkctl, newlnkctl;
3266 pci_read_config_word(tp->pdev,
3267 tp->pcie_cap + PCI_EXP_LNKCTL,
3269 if (tp->link_config.active_speed == SPEED_100 ||
3270 tp->link_config.active_speed == SPEED_10)
3271 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3273 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3274 if (newlnkctl != oldlnkctl)
3275 pci_write_config_word(tp->pdev,
3276 tp->pcie_cap + PCI_EXP_LNKCTL,
3280 if (current_link_up != netif_carrier_ok(tp->dev)) {
3281 if (current_link_up)
3282 netif_carrier_on(tp->dev);
3284 netif_carrier_off(tp->dev);
3285 tg3_link_report(tp);
3291 struct tg3_fiber_aneginfo {
3293 #define ANEG_STATE_UNKNOWN 0
3294 #define ANEG_STATE_AN_ENABLE 1
3295 #define ANEG_STATE_RESTART_INIT 2
3296 #define ANEG_STATE_RESTART 3
3297 #define ANEG_STATE_DISABLE_LINK_OK 4
3298 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3299 #define ANEG_STATE_ABILITY_DETECT 6
3300 #define ANEG_STATE_ACK_DETECT_INIT 7
3301 #define ANEG_STATE_ACK_DETECT 8
3302 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3303 #define ANEG_STATE_COMPLETE_ACK 10
3304 #define ANEG_STATE_IDLE_DETECT_INIT 11
3305 #define ANEG_STATE_IDLE_DETECT 12
3306 #define ANEG_STATE_LINK_OK 13
3307 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3308 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3311 #define MR_AN_ENABLE 0x00000001
3312 #define MR_RESTART_AN 0x00000002
3313 #define MR_AN_COMPLETE 0x00000004
3314 #define MR_PAGE_RX 0x00000008
3315 #define MR_NP_LOADED 0x00000010
3316 #define MR_TOGGLE_TX 0x00000020
3317 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3318 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3319 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3320 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3321 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3322 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3323 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3324 #define MR_TOGGLE_RX 0x00002000
3325 #define MR_NP_RX 0x00004000
3327 #define MR_LINK_OK 0x80000000
3329 unsigned long link_time, cur_time;
3331 u32 ability_match_cfg;
3332 int ability_match_count;
3334 char ability_match, idle_match, ack_match;
3336 u32 txconfig, rxconfig;
3337 #define ANEG_CFG_NP 0x00000080
3338 #define ANEG_CFG_ACK 0x00000040
3339 #define ANEG_CFG_RF2 0x00000020
3340 #define ANEG_CFG_RF1 0x00000010
3341 #define ANEG_CFG_PS2 0x00000001
3342 #define ANEG_CFG_PS1 0x00008000
3343 #define ANEG_CFG_HD 0x00004000
3344 #define ANEG_CFG_FD 0x00002000
3345 #define ANEG_CFG_INVAL 0x00001f06
3350 #define ANEG_TIMER_ENAB 2
3351 #define ANEG_FAILED -1
3353 #define ANEG_STATE_SETTLE_TIME 10000
3355 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3356 struct tg3_fiber_aneginfo *ap)
3359 unsigned long delta;
3363 if (ap->state == ANEG_STATE_UNKNOWN) {
3367 ap->ability_match_cfg = 0;
3368 ap->ability_match_count = 0;
3369 ap->ability_match = 0;
3375 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3376 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3378 if (rx_cfg_reg != ap->ability_match_cfg) {
3379 ap->ability_match_cfg = rx_cfg_reg;
3380 ap->ability_match = 0;
3381 ap->ability_match_count = 0;
3383 if (++ap->ability_match_count > 1) {
3384 ap->ability_match = 1;
3385 ap->ability_match_cfg = rx_cfg_reg;
3388 if (rx_cfg_reg & ANEG_CFG_ACK)
3396 ap->ability_match_cfg = 0;
3397 ap->ability_match_count = 0;
3398 ap->ability_match = 0;
3404 ap->rxconfig = rx_cfg_reg;
3407 switch (ap->state) {
3408 case ANEG_STATE_UNKNOWN:
3409 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3410 ap->state = ANEG_STATE_AN_ENABLE;
3413 case ANEG_STATE_AN_ENABLE:
3414 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3415 if (ap->flags & MR_AN_ENABLE) {
3418 ap->ability_match_cfg = 0;
3419 ap->ability_match_count = 0;
3420 ap->ability_match = 0;
3424 ap->state = ANEG_STATE_RESTART_INIT;
3426 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3430 case ANEG_STATE_RESTART_INIT:
3431 ap->link_time = ap->cur_time;
3432 ap->flags &= ~(MR_NP_LOADED);
3434 tw32(MAC_TX_AUTO_NEG, 0);
3435 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3436 tw32_f(MAC_MODE, tp->mac_mode);
3439 ret = ANEG_TIMER_ENAB;
3440 ap->state = ANEG_STATE_RESTART;
3443 case ANEG_STATE_RESTART:
3444 delta = ap->cur_time - ap->link_time;
3445 if (delta > ANEG_STATE_SETTLE_TIME)
3446 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3448 ret = ANEG_TIMER_ENAB;
3451 case ANEG_STATE_DISABLE_LINK_OK:
3455 case ANEG_STATE_ABILITY_DETECT_INIT:
3456 ap->flags &= ~(MR_TOGGLE_TX);
3457 ap->txconfig = ANEG_CFG_FD;
3458 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3459 if (flowctrl & ADVERTISE_1000XPAUSE)
3460 ap->txconfig |= ANEG_CFG_PS1;
3461 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3462 ap->txconfig |= ANEG_CFG_PS2;
3463 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3464 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3465 tw32_f(MAC_MODE, tp->mac_mode);
3468 ap->state = ANEG_STATE_ABILITY_DETECT;
3471 case ANEG_STATE_ABILITY_DETECT:
3472 if (ap->ability_match != 0 && ap->rxconfig != 0)
3473 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3476 case ANEG_STATE_ACK_DETECT_INIT:
3477 ap->txconfig |= ANEG_CFG_ACK;
3478 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3479 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3480 tw32_f(MAC_MODE, tp->mac_mode);
3483 ap->state = ANEG_STATE_ACK_DETECT;
3486 case ANEG_STATE_ACK_DETECT:
3487 if (ap->ack_match != 0) {
3488 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3489 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3490 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3492 ap->state = ANEG_STATE_AN_ENABLE;
3494 } else if (ap->ability_match != 0 &&
3495 ap->rxconfig == 0) {
3496 ap->state = ANEG_STATE_AN_ENABLE;
3500 case ANEG_STATE_COMPLETE_ACK_INIT:
3501 if (ap->rxconfig & ANEG_CFG_INVAL) {
3505 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3506 MR_LP_ADV_HALF_DUPLEX |
3507 MR_LP_ADV_SYM_PAUSE |
3508 MR_LP_ADV_ASYM_PAUSE |
3509 MR_LP_ADV_REMOTE_FAULT1 |
3510 MR_LP_ADV_REMOTE_FAULT2 |
3511 MR_LP_ADV_NEXT_PAGE |
3514 if (ap->rxconfig & ANEG_CFG_FD)
3515 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3516 if (ap->rxconfig & ANEG_CFG_HD)
3517 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3518 if (ap->rxconfig & ANEG_CFG_PS1)
3519 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3520 if (ap->rxconfig & ANEG_CFG_PS2)
3521 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3522 if (ap->rxconfig & ANEG_CFG_RF1)
3523 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3524 if (ap->rxconfig & ANEG_CFG_RF2)
3525 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3526 if (ap->rxconfig & ANEG_CFG_NP)
3527 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3529 ap->link_time = ap->cur_time;
3531 ap->flags ^= (MR_TOGGLE_TX);
3532 if (ap->rxconfig & 0x0008)
3533 ap->flags |= MR_TOGGLE_RX;
3534 if (ap->rxconfig & ANEG_CFG_NP)
3535 ap->flags |= MR_NP_RX;
3536 ap->flags |= MR_PAGE_RX;
3538 ap->state = ANEG_STATE_COMPLETE_ACK;
3539 ret = ANEG_TIMER_ENAB;
3542 case ANEG_STATE_COMPLETE_ACK:
3543 if (ap->ability_match != 0 &&
3544 ap->rxconfig == 0) {
3545 ap->state = ANEG_STATE_AN_ENABLE;
3548 delta = ap->cur_time - ap->link_time;
3549 if (delta > ANEG_STATE_SETTLE_TIME) {
3550 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3551 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3553 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3554 !(ap->flags & MR_NP_RX)) {
3555 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3563 case ANEG_STATE_IDLE_DETECT_INIT:
3564 ap->link_time = ap->cur_time;
3565 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3566 tw32_f(MAC_MODE, tp->mac_mode);
3569 ap->state = ANEG_STATE_IDLE_DETECT;
3570 ret = ANEG_TIMER_ENAB;
3573 case ANEG_STATE_IDLE_DETECT:
3574 if (ap->ability_match != 0 &&
3575 ap->rxconfig == 0) {
3576 ap->state = ANEG_STATE_AN_ENABLE;
3579 delta = ap->cur_time - ap->link_time;
3580 if (delta > ANEG_STATE_SETTLE_TIME) {
3581 /* XXX another gem from the Broadcom driver :( */
3582 ap->state = ANEG_STATE_LINK_OK;
3586 case ANEG_STATE_LINK_OK:
3587 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3591 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3592 /* ??? unimplemented */
3595 case ANEG_STATE_NEXT_PAGE_WAIT:
3596 /* ??? unimplemented */
3607 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3610 struct tg3_fiber_aneginfo aninfo;
3611 int status = ANEG_FAILED;
3615 tw32_f(MAC_TX_AUTO_NEG, 0);
3617 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3618 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3621 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3624 memset(&aninfo, 0, sizeof(aninfo));
3625 aninfo.flags |= MR_AN_ENABLE;
3626 aninfo.state = ANEG_STATE_UNKNOWN;
3627 aninfo.cur_time = 0;
3629 while (++tick < 195000) {
3630 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3631 if (status == ANEG_DONE || status == ANEG_FAILED)
3637 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3638 tw32_f(MAC_MODE, tp->mac_mode);
3641 *txflags = aninfo.txconfig;
3642 *rxflags = aninfo.flags;
3644 if (status == ANEG_DONE &&
3645 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3646 MR_LP_ADV_FULL_DUPLEX)))
3652 static void tg3_init_bcm8002(struct tg3 *tp)
3654 u32 mac_status = tr32(MAC_STATUS);
3657 /* Reset when initting first time or we have a link. */
3658 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3659 !(mac_status & MAC_STATUS_PCS_SYNCED))
3662 /* Set PLL lock range. */
3663 tg3_writephy(tp, 0x16, 0x8007);
3666 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3668 /* Wait for reset to complete. */
3669 /* XXX schedule_timeout() ... */
3670 for (i = 0; i < 500; i++)
3673 /* Config mode; select PMA/Ch 1 regs. */
3674 tg3_writephy(tp, 0x10, 0x8411);
3676 /* Enable auto-lock and comdet, select txclk for tx. */
3677 tg3_writephy(tp, 0x11, 0x0a10);
3679 tg3_writephy(tp, 0x18, 0x00a0);
3680 tg3_writephy(tp, 0x16, 0x41ff);
3682 /* Assert and deassert POR. */
3683 tg3_writephy(tp, 0x13, 0x0400);
3685 tg3_writephy(tp, 0x13, 0x0000);
3687 tg3_writephy(tp, 0x11, 0x0a50);
3689 tg3_writephy(tp, 0x11, 0x0a10);
3691 /* Wait for signal to stabilize */
3692 /* XXX schedule_timeout() ... */
3693 for (i = 0; i < 15000; i++)
3696 /* Deselect the channel register so we can read the PHYID
3699 tg3_writephy(tp, 0x10, 0x8011);
3702 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3705 u32 sg_dig_ctrl, sg_dig_status;
3706 u32 serdes_cfg, expected_sg_dig_ctrl;
3707 int workaround, port_a;
3708 int current_link_up;
3711 expected_sg_dig_ctrl = 0;
3714 current_link_up = 0;
3716 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3717 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3719 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3722 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3723 /* preserve bits 20-23 for voltage regulator */
3724 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3727 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3729 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3730 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3732 u32 val = serdes_cfg;
3738 tw32_f(MAC_SERDES_CFG, val);
3741 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3743 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3744 tg3_setup_flow_control(tp, 0, 0);
3745 current_link_up = 1;
3750 /* Want auto-negotiation. */
3751 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3753 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3754 if (flowctrl & ADVERTISE_1000XPAUSE)
3755 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3756 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3757 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3759 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3760 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3761 tp->serdes_counter &&
3762 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3763 MAC_STATUS_RCVD_CFG)) ==
3764 MAC_STATUS_PCS_SYNCED)) {
3765 tp->serdes_counter--;
3766 current_link_up = 1;
3771 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3772 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3774 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3776 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3777 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3778 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3779 MAC_STATUS_SIGNAL_DET)) {
3780 sg_dig_status = tr32(SG_DIG_STATUS);
3781 mac_status = tr32(MAC_STATUS);
3783 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3784 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3785 u32 local_adv = 0, remote_adv = 0;
3787 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3788 local_adv |= ADVERTISE_1000XPAUSE;
3789 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3790 local_adv |= ADVERTISE_1000XPSE_ASYM;
3792 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3793 remote_adv |= LPA_1000XPAUSE;
3794 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3795 remote_adv |= LPA_1000XPAUSE_ASYM;
3797 tg3_setup_flow_control(tp, local_adv, remote_adv);
3798 current_link_up = 1;
3799 tp->serdes_counter = 0;
3800 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3801 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3802 if (tp->serdes_counter)
3803 tp->serdes_counter--;
3806 u32 val = serdes_cfg;
3813 tw32_f(MAC_SERDES_CFG, val);
3816 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3819 /* Link parallel detection - link is up */
3820 /* only if we have PCS_SYNC and not */
3821 /* receiving config code words */
3822 mac_status = tr32(MAC_STATUS);
3823 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3824 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3825 tg3_setup_flow_control(tp, 0, 0);
3826 current_link_up = 1;
3828 TG3_PHYFLG_PARALLEL_DETECT;
3829 tp->serdes_counter =
3830 SERDES_PARALLEL_DET_TIMEOUT;
3832 goto restart_autoneg;
3836 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3837 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3841 return current_link_up;
3844 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3846 int current_link_up = 0;
3848 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3851 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3852 u32 txflags, rxflags;
3855 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3856 u32 local_adv = 0, remote_adv = 0;
3858 if (txflags & ANEG_CFG_PS1)
3859 local_adv |= ADVERTISE_1000XPAUSE;
3860 if (txflags & ANEG_CFG_PS2)
3861 local_adv |= ADVERTISE_1000XPSE_ASYM;
3863 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3864 remote_adv |= LPA_1000XPAUSE;
3865 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3866 remote_adv |= LPA_1000XPAUSE_ASYM;
3868 tg3_setup_flow_control(tp, local_adv, remote_adv);
3870 current_link_up = 1;
3872 for (i = 0; i < 30; i++) {
3875 (MAC_STATUS_SYNC_CHANGED |
3876 MAC_STATUS_CFG_CHANGED));
3878 if ((tr32(MAC_STATUS) &
3879 (MAC_STATUS_SYNC_CHANGED |
3880 MAC_STATUS_CFG_CHANGED)) == 0)
3884 mac_status = tr32(MAC_STATUS);
3885 if (current_link_up == 0 &&
3886 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3887 !(mac_status & MAC_STATUS_RCVD_CFG))
3888 current_link_up = 1;
3890 tg3_setup_flow_control(tp, 0, 0);
3892 /* Forcing 1000FD link up. */
3893 current_link_up = 1;
3895 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3898 tw32_f(MAC_MODE, tp->mac_mode);
3903 return current_link_up;
3906 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3909 u16 orig_active_speed;
3910 u8 orig_active_duplex;
3912 int current_link_up;
3915 orig_pause_cfg = tp->link_config.active_flowctrl;
3916 orig_active_speed = tp->link_config.active_speed;
3917 orig_active_duplex = tp->link_config.active_duplex;
3919 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3920 netif_carrier_ok(tp->dev) &&
3921 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3922 mac_status = tr32(MAC_STATUS);
3923 mac_status &= (MAC_STATUS_PCS_SYNCED |
3924 MAC_STATUS_SIGNAL_DET |
3925 MAC_STATUS_CFG_CHANGED |
3926 MAC_STATUS_RCVD_CFG);
3927 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3928 MAC_STATUS_SIGNAL_DET)) {
3929 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3930 MAC_STATUS_CFG_CHANGED));
3935 tw32_f(MAC_TX_AUTO_NEG, 0);
3937 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3938 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3939 tw32_f(MAC_MODE, tp->mac_mode);
3942 if (tp->phy_id == TG3_PHY_ID_BCM8002)
3943 tg3_init_bcm8002(tp);
3945 /* Enable link change event even when serdes polling. */
3946 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3949 current_link_up = 0;
3950 mac_status = tr32(MAC_STATUS);
3952 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3953 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3955 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3957 tp->napi[0].hw_status->status =
3958 (SD_STATUS_UPDATED |
3959 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3961 for (i = 0; i < 100; i++) {
3962 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3963 MAC_STATUS_CFG_CHANGED));
3965 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3966 MAC_STATUS_CFG_CHANGED |
3967 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3971 mac_status = tr32(MAC_STATUS);
3972 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3973 current_link_up = 0;
3974 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3975 tp->serdes_counter == 0) {
3976 tw32_f(MAC_MODE, (tp->mac_mode |
3977 MAC_MODE_SEND_CONFIGS));
3979 tw32_f(MAC_MODE, tp->mac_mode);
3983 if (current_link_up == 1) {
3984 tp->link_config.active_speed = SPEED_1000;
3985 tp->link_config.active_duplex = DUPLEX_FULL;
3986 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3987 LED_CTRL_LNKLED_OVERRIDE |
3988 LED_CTRL_1000MBPS_ON));
3990 tp->link_config.active_speed = SPEED_INVALID;
3991 tp->link_config.active_duplex = DUPLEX_INVALID;
3992 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3993 LED_CTRL_LNKLED_OVERRIDE |
3994 LED_CTRL_TRAFFIC_OVERRIDE));
3997 if (current_link_up != netif_carrier_ok(tp->dev)) {
3998 if (current_link_up)
3999 netif_carrier_on(tp->dev);
4001 netif_carrier_off(tp->dev);
4002 tg3_link_report(tp);
4004 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4005 if (orig_pause_cfg != now_pause_cfg ||
4006 orig_active_speed != tp->link_config.active_speed ||
4007 orig_active_duplex != tp->link_config.active_duplex)
4008 tg3_link_report(tp);
4014 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4016 int current_link_up, err = 0;
4020 u32 local_adv, remote_adv;
4022 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4023 tw32_f(MAC_MODE, tp->mac_mode);
4029 (MAC_STATUS_SYNC_CHANGED |
4030 MAC_STATUS_CFG_CHANGED |
4031 MAC_STATUS_MI_COMPLETION |
4032 MAC_STATUS_LNKSTATE_CHANGED));
4038 current_link_up = 0;
4039 current_speed = SPEED_INVALID;
4040 current_duplex = DUPLEX_INVALID;
4042 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4043 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4045 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4046 bmsr |= BMSR_LSTATUS;
4048 bmsr &= ~BMSR_LSTATUS;
4051 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4053 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4054 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4055 /* do nothing, just check for link up at the end */
4056 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4059 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4060 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4061 ADVERTISE_1000XPAUSE |
4062 ADVERTISE_1000XPSE_ASYM |
4065 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4067 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4068 new_adv |= ADVERTISE_1000XHALF;
4069 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4070 new_adv |= ADVERTISE_1000XFULL;
4072 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4073 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4074 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4075 tg3_writephy(tp, MII_BMCR, bmcr);
4077 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4078 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4079 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4086 bmcr &= ~BMCR_SPEED1000;
4087 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4089 if (tp->link_config.duplex == DUPLEX_FULL)
4090 new_bmcr |= BMCR_FULLDPLX;
4092 if (new_bmcr != bmcr) {
4093 /* BMCR_SPEED1000 is a reserved bit that needs
4094 * to be set on write.
4096 new_bmcr |= BMCR_SPEED1000;
4098 /* Force a linkdown */
4099 if (netif_carrier_ok(tp->dev)) {
4102 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4103 adv &= ~(ADVERTISE_1000XFULL |
4104 ADVERTISE_1000XHALF |
4106 tg3_writephy(tp, MII_ADVERTISE, adv);
4107 tg3_writephy(tp, MII_BMCR, bmcr |
4111 netif_carrier_off(tp->dev);
4113 tg3_writephy(tp, MII_BMCR, new_bmcr);
4115 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4116 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4117 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4119 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4120 bmsr |= BMSR_LSTATUS;
4122 bmsr &= ~BMSR_LSTATUS;
4124 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4128 if (bmsr & BMSR_LSTATUS) {
4129 current_speed = SPEED_1000;
4130 current_link_up = 1;
4131 if (bmcr & BMCR_FULLDPLX)
4132 current_duplex = DUPLEX_FULL;
4134 current_duplex = DUPLEX_HALF;
4139 if (bmcr & BMCR_ANENABLE) {
4142 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4143 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4144 common = local_adv & remote_adv;
4145 if (common & (ADVERTISE_1000XHALF |
4146 ADVERTISE_1000XFULL)) {
4147 if (common & ADVERTISE_1000XFULL)
4148 current_duplex = DUPLEX_FULL;
4150 current_duplex = DUPLEX_HALF;
4151 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4152 /* Link is up via parallel detect */
4154 current_link_up = 0;
4159 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4160 tg3_setup_flow_control(tp, local_adv, remote_adv);
4162 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4163 if (tp->link_config.active_duplex == DUPLEX_HALF)
4164 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4166 tw32_f(MAC_MODE, tp->mac_mode);
4169 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4171 tp->link_config.active_speed = current_speed;
4172 tp->link_config.active_duplex = current_duplex;
4174 if (current_link_up != netif_carrier_ok(tp->dev)) {
4175 if (current_link_up)
4176 netif_carrier_on(tp->dev);
4178 netif_carrier_off(tp->dev);
4179 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4181 tg3_link_report(tp);
4186 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4188 if (tp->serdes_counter) {
4189 /* Give autoneg time to complete. */
4190 tp->serdes_counter--;
4194 if (!netif_carrier_ok(tp->dev) &&
4195 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4198 tg3_readphy(tp, MII_BMCR, &bmcr);
4199 if (bmcr & BMCR_ANENABLE) {
4202 /* Select shadow register 0x1f */
4203 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4204 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4206 /* Select expansion interrupt status register */
4207 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4208 MII_TG3_DSP_EXP1_INT_STAT);
4209 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4210 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4212 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4213 /* We have signal detect and not receiving
4214 * config code words, link is up by parallel
4218 bmcr &= ~BMCR_ANENABLE;
4219 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4220 tg3_writephy(tp, MII_BMCR, bmcr);
4221 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4224 } else if (netif_carrier_ok(tp->dev) &&
4225 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4226 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4229 /* Select expansion interrupt status register */
4230 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4231 MII_TG3_DSP_EXP1_INT_STAT);
4232 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4236 /* Config code words received, turn on autoneg. */
4237 tg3_readphy(tp, MII_BMCR, &bmcr);
4238 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4240 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4246 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4250 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4251 err = tg3_setup_fiber_phy(tp, force_reset);
4252 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4253 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4255 err = tg3_setup_copper_phy(tp, force_reset);
4257 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4260 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4261 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4263 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4268 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4269 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4270 tw32(GRC_MISC_CFG, val);
4273 if (tp->link_config.active_speed == SPEED_1000 &&
4274 tp->link_config.active_duplex == DUPLEX_HALF)
4275 tw32(MAC_TX_LENGTHS,
4276 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4277 (6 << TX_LENGTHS_IPG_SHIFT) |
4278 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4280 tw32(MAC_TX_LENGTHS,
4281 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4282 (6 << TX_LENGTHS_IPG_SHIFT) |
4283 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4285 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4286 if (netif_carrier_ok(tp->dev)) {
4287 tw32(HOSTCC_STAT_COAL_TICKS,
4288 tp->coal.stats_block_coalesce_usecs);
4290 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4294 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4295 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4296 if (!netif_carrier_ok(tp->dev))
4297 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4300 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4301 tw32(PCIE_PWR_MGMT_THRESH, val);
4307 static inline int tg3_irq_sync(struct tg3 *tp)
4309 return tp->irq_sync;
4312 /* This is called whenever we suspect that the system chipset is re-
4313 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4314 * is bogus tx completions. We try to recover by setting the
4315 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4318 static void tg3_tx_recover(struct tg3 *tp)
4320 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4321 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4323 netdev_warn(tp->dev,
4324 "The system may be re-ordering memory-mapped I/O "
4325 "cycles to the network device, attempting to recover. "
4326 "Please report the problem to the driver maintainer "
4327 "and include system chipset information.\n");
4329 spin_lock(&tp->lock);
4330 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4331 spin_unlock(&tp->lock);
4334 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4336 /* Tell compiler to fetch tx indices from memory. */
4338 return tnapi->tx_pending -
4339 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4342 /* Tigon3 never reports partial packet sends. So we do not
4343 * need special logic to handle SKBs that have not had all
4344 * of their frags sent yet, like SunGEM does.
4346 static void tg3_tx(struct tg3_napi *tnapi)
4348 struct tg3 *tp = tnapi->tp;
4349 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4350 u32 sw_idx = tnapi->tx_cons;
4351 struct netdev_queue *txq;
4352 int index = tnapi - tp->napi;
4354 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4357 txq = netdev_get_tx_queue(tp->dev, index);
4359 while (sw_idx != hw_idx) {
4360 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4361 struct sk_buff *skb = ri->skb;
4364 if (unlikely(skb == NULL)) {
4369 pci_unmap_single(tp->pdev,
4370 dma_unmap_addr(ri, mapping),
4376 sw_idx = NEXT_TX(sw_idx);
4378 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4379 ri = &tnapi->tx_buffers[sw_idx];
4380 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4383 pci_unmap_page(tp->pdev,
4384 dma_unmap_addr(ri, mapping),
4385 skb_shinfo(skb)->frags[i].size,
4387 sw_idx = NEXT_TX(sw_idx);
4392 if (unlikely(tx_bug)) {
4398 tnapi->tx_cons = sw_idx;
4400 /* Need to make the tx_cons update visible to tg3_start_xmit()
4401 * before checking for netif_queue_stopped(). Without the
4402 * memory barrier, there is a small possibility that tg3_start_xmit()
4403 * will miss it and cause the queue to be stopped forever.
4407 if (unlikely(netif_tx_queue_stopped(txq) &&
4408 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4409 __netif_tx_lock(txq, smp_processor_id());
4410 if (netif_tx_queue_stopped(txq) &&
4411 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4412 netif_tx_wake_queue(txq);
4413 __netif_tx_unlock(txq);
4417 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4422 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4423 map_sz, PCI_DMA_FROMDEVICE);
4424 dev_kfree_skb_any(ri->skb);
4428 /* Returns size of skb allocated or < 0 on error.
4430 * We only need to fill in the address because the other members
4431 * of the RX descriptor are invariant, see tg3_init_rings.
4433 * Note the purposeful assymetry of cpu vs. chip accesses. For
4434 * posting buffers we only dirty the first cache line of the RX
4435 * descriptor (containing the address). Whereas for the RX status
4436 * buffers the cpu only reads the last cacheline of the RX descriptor
4437 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4439 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4440 u32 opaque_key, u32 dest_idx_unmasked)
4442 struct tg3_rx_buffer_desc *desc;
4443 struct ring_info *map, *src_map;
4444 struct sk_buff *skb;
4446 int skb_size, dest_idx;
4449 switch (opaque_key) {
4450 case RXD_OPAQUE_RING_STD:
4451 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4452 desc = &tpr->rx_std[dest_idx];
4453 map = &tpr->rx_std_buffers[dest_idx];
4454 skb_size = tp->rx_pkt_map_sz;
4457 case RXD_OPAQUE_RING_JUMBO:
4458 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4459 desc = &tpr->rx_jmb[dest_idx].std;
4460 map = &tpr->rx_jmb_buffers[dest_idx];
4461 skb_size = TG3_RX_JMB_MAP_SZ;
4468 /* Do not overwrite any of the map or rp information
4469 * until we are sure we can commit to a new buffer.
4471 * Callers depend upon this behavior and assume that
4472 * we leave everything unchanged if we fail.
4474 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4478 skb_reserve(skb, tp->rx_offset);
4480 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4481 PCI_DMA_FROMDEVICE);
4482 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4488 dma_unmap_addr_set(map, mapping, mapping);
4490 desc->addr_hi = ((u64)mapping >> 32);
4491 desc->addr_lo = ((u64)mapping & 0xffffffff);
4496 /* We only need to move over in the address because the other
4497 * members of the RX descriptor are invariant. See notes above
4498 * tg3_alloc_rx_skb for full details.
4500 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4501 struct tg3_rx_prodring_set *dpr,
4502 u32 opaque_key, int src_idx,
4503 u32 dest_idx_unmasked)
4505 struct tg3 *tp = tnapi->tp;
4506 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4507 struct ring_info *src_map, *dest_map;
4508 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4511 switch (opaque_key) {
4512 case RXD_OPAQUE_RING_STD:
4513 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4514 dest_desc = &dpr->rx_std[dest_idx];
4515 dest_map = &dpr->rx_std_buffers[dest_idx];
4516 src_desc = &spr->rx_std[src_idx];
4517 src_map = &spr->rx_std_buffers[src_idx];
4520 case RXD_OPAQUE_RING_JUMBO:
4521 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4522 dest_desc = &dpr->rx_jmb[dest_idx].std;
4523 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4524 src_desc = &spr->rx_jmb[src_idx].std;
4525 src_map = &spr->rx_jmb_buffers[src_idx];
4532 dest_map->skb = src_map->skb;
4533 dma_unmap_addr_set(dest_map, mapping,
4534 dma_unmap_addr(src_map, mapping));
4535 dest_desc->addr_hi = src_desc->addr_hi;
4536 dest_desc->addr_lo = src_desc->addr_lo;
4538 /* Ensure that the update to the skb happens after the physical
4539 * addresses have been transferred to the new BD location.
4543 src_map->skb = NULL;
4546 /* The RX ring scheme is composed of multiple rings which post fresh
4547 * buffers to the chip, and one special ring the chip uses to report
4548 * status back to the host.
4550 * The special ring reports the status of received packets to the
4551 * host. The chip does not write into the original descriptor the
4552 * RX buffer was obtained from. The chip simply takes the original
4553 * descriptor as provided by the host, updates the status and length
4554 * field, then writes this into the next status ring entry.
4556 * Each ring the host uses to post buffers to the chip is described
4557 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4558 * it is first placed into the on-chip ram. When the packet's length
4559 * is known, it walks down the TG3_BDINFO entries to select the ring.
4560 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4561 * which is within the range of the new packet's length is chosen.
4563 * The "separate ring for rx status" scheme may sound queer, but it makes
4564 * sense from a cache coherency perspective. If only the host writes
4565 * to the buffer post rings, and only the chip writes to the rx status
4566 * rings, then cache lines never move beyond shared-modified state.
4567 * If both the host and chip were to write into the same ring, cache line
4568 * eviction could occur since both entities want it in an exclusive state.
4570 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4572 struct tg3 *tp = tnapi->tp;
4573 u32 work_mask, rx_std_posted = 0;
4574 u32 std_prod_idx, jmb_prod_idx;
4575 u32 sw_idx = tnapi->rx_rcb_ptr;
4578 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4580 hw_idx = *(tnapi->rx_rcb_prod_idx);
4582 * We need to order the read of hw_idx and the read of
4583 * the opaque cookie.
4588 std_prod_idx = tpr->rx_std_prod_idx;
4589 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4590 while (sw_idx != hw_idx && budget > 0) {
4591 struct ring_info *ri;
4592 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4594 struct sk_buff *skb;
4595 dma_addr_t dma_addr;
4596 u32 opaque_key, desc_idx, *post_ptr;
4597 bool hw_vlan __maybe_unused = false;
4598 u16 vtag __maybe_unused = 0;
4600 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4601 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4602 if (opaque_key == RXD_OPAQUE_RING_STD) {
4603 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4604 dma_addr = dma_unmap_addr(ri, mapping);
4606 post_ptr = &std_prod_idx;
4608 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4609 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4610 dma_addr = dma_unmap_addr(ri, mapping);
4612 post_ptr = &jmb_prod_idx;
4614 goto next_pkt_nopost;
4616 work_mask |= opaque_key;
4618 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4619 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4621 tg3_recycle_rx(tnapi, tpr, opaque_key,
4622 desc_idx, *post_ptr);
4624 /* Other statistics kept track of by card. */
4625 tp->net_stats.rx_dropped++;
4629 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4632 if (len > TG3_RX_COPY_THRESH(tp)) {
4635 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4640 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4641 PCI_DMA_FROMDEVICE);
4643 /* Ensure that the update to the skb happens
4644 * after the usage of the old DMA mapping.
4652 struct sk_buff *copy_skb;
4654 tg3_recycle_rx(tnapi, tpr, opaque_key,
4655 desc_idx, *post_ptr);
4657 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
4659 if (copy_skb == NULL)
4660 goto drop_it_no_recycle;
4662 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
4663 skb_put(copy_skb, len);
4664 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4665 skb_copy_from_linear_data(skb, copy_skb->data, len);
4666 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4668 /* We'll reuse the original ring buffer. */
4672 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4673 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4674 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4675 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4676 skb->ip_summed = CHECKSUM_UNNECESSARY;
4678 skb_checksum_none_assert(skb);
4680 skb->protocol = eth_type_trans(skb, tp->dev);
4682 if (len > (tp->dev->mtu + ETH_HLEN) &&
4683 skb->protocol != htons(ETH_P_8021Q)) {
4688 if (desc->type_flags & RXD_FLAG_VLAN &&
4689 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
4690 vtag = desc->err_vlan & RXD_VLAN_MASK;
4691 #if TG3_VLAN_TAG_USED
4697 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4698 __skb_push(skb, VLAN_HLEN);
4700 memmove(ve, skb->data + VLAN_HLEN,
4702 ve->h_vlan_proto = htons(ETH_P_8021Q);
4703 ve->h_vlan_TCI = htons(vtag);
4707 #if TG3_VLAN_TAG_USED
4709 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4712 napi_gro_receive(&tnapi->napi, skb);
4720 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4721 tpr->rx_std_prod_idx = std_prod_idx &
4722 tp->rx_std_ring_mask;
4723 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4724 tpr->rx_std_prod_idx);
4725 work_mask &= ~RXD_OPAQUE_RING_STD;
4730 sw_idx &= tp->rx_ret_ring_mask;
4732 /* Refresh hw_idx to see if there is new work */
4733 if (sw_idx == hw_idx) {
4734 hw_idx = *(tnapi->rx_rcb_prod_idx);
4739 /* ACK the status ring. */
4740 tnapi->rx_rcb_ptr = sw_idx;
4741 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4743 /* Refill RX ring(s). */
4744 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4745 if (work_mask & RXD_OPAQUE_RING_STD) {
4746 tpr->rx_std_prod_idx = std_prod_idx &
4747 tp->rx_std_ring_mask;
4748 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4749 tpr->rx_std_prod_idx);
4751 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4752 tpr->rx_jmb_prod_idx = jmb_prod_idx &
4753 tp->rx_jmb_ring_mask;
4754 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4755 tpr->rx_jmb_prod_idx);
4758 } else if (work_mask) {
4759 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4760 * updated before the producer indices can be updated.
4764 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
4765 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
4767 if (tnapi != &tp->napi[1])
4768 napi_schedule(&tp->napi[1].napi);
4774 static void tg3_poll_link(struct tg3 *tp)
4776 /* handle link change and other phy events */
4777 if (!(tp->tg3_flags &
4778 (TG3_FLAG_USE_LINKCHG_REG |
4779 TG3_FLAG_POLL_SERDES))) {
4780 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4782 if (sblk->status & SD_STATUS_LINK_CHG) {
4783 sblk->status = SD_STATUS_UPDATED |
4784 (sblk->status & ~SD_STATUS_LINK_CHG);
4785 spin_lock(&tp->lock);
4786 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4788 (MAC_STATUS_SYNC_CHANGED |
4789 MAC_STATUS_CFG_CHANGED |
4790 MAC_STATUS_MI_COMPLETION |
4791 MAC_STATUS_LNKSTATE_CHANGED));
4794 tg3_setup_phy(tp, 0);
4795 spin_unlock(&tp->lock);
4800 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4801 struct tg3_rx_prodring_set *dpr,
4802 struct tg3_rx_prodring_set *spr)
4804 u32 si, di, cpycnt, src_prod_idx;
4808 src_prod_idx = spr->rx_std_prod_idx;
4810 /* Make sure updates to the rx_std_buffers[] entries and the
4811 * standard producer index are seen in the correct order.
4815 if (spr->rx_std_cons_idx == src_prod_idx)
4818 if (spr->rx_std_cons_idx < src_prod_idx)
4819 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4821 cpycnt = tp->rx_std_ring_mask + 1 -
4822 spr->rx_std_cons_idx;
4824 cpycnt = min(cpycnt,
4825 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
4827 si = spr->rx_std_cons_idx;
4828 di = dpr->rx_std_prod_idx;
4830 for (i = di; i < di + cpycnt; i++) {
4831 if (dpr->rx_std_buffers[i].skb) {
4841 /* Ensure that updates to the rx_std_buffers ring and the
4842 * shadowed hardware producer ring from tg3_recycle_skb() are
4843 * ordered correctly WRT the skb check above.
4847 memcpy(&dpr->rx_std_buffers[di],
4848 &spr->rx_std_buffers[si],
4849 cpycnt * sizeof(struct ring_info));
4851 for (i = 0; i < cpycnt; i++, di++, si++) {
4852 struct tg3_rx_buffer_desc *sbd, *dbd;
4853 sbd = &spr->rx_std[si];
4854 dbd = &dpr->rx_std[di];
4855 dbd->addr_hi = sbd->addr_hi;
4856 dbd->addr_lo = sbd->addr_lo;
4859 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
4860 tp->rx_std_ring_mask;
4861 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
4862 tp->rx_std_ring_mask;
4866 src_prod_idx = spr->rx_jmb_prod_idx;
4868 /* Make sure updates to the rx_jmb_buffers[] entries and
4869 * the jumbo producer index are seen in the correct order.
4873 if (spr->rx_jmb_cons_idx == src_prod_idx)
4876 if (spr->rx_jmb_cons_idx < src_prod_idx)
4877 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4879 cpycnt = tp->rx_jmb_ring_mask + 1 -
4880 spr->rx_jmb_cons_idx;
4882 cpycnt = min(cpycnt,
4883 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
4885 si = spr->rx_jmb_cons_idx;
4886 di = dpr->rx_jmb_prod_idx;
4888 for (i = di; i < di + cpycnt; i++) {
4889 if (dpr->rx_jmb_buffers[i].skb) {
4899 /* Ensure that updates to the rx_jmb_buffers ring and the
4900 * shadowed hardware producer ring from tg3_recycle_skb() are
4901 * ordered correctly WRT the skb check above.
4905 memcpy(&dpr->rx_jmb_buffers[di],
4906 &spr->rx_jmb_buffers[si],
4907 cpycnt * sizeof(struct ring_info));
4909 for (i = 0; i < cpycnt; i++, di++, si++) {
4910 struct tg3_rx_buffer_desc *sbd, *dbd;
4911 sbd = &spr->rx_jmb[si].std;
4912 dbd = &dpr->rx_jmb[di].std;
4913 dbd->addr_hi = sbd->addr_hi;
4914 dbd->addr_lo = sbd->addr_lo;
4917 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
4918 tp->rx_jmb_ring_mask;
4919 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
4920 tp->rx_jmb_ring_mask;
4926 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4928 struct tg3 *tp = tnapi->tp;
4930 /* run TX completion thread */
4931 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4933 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4937 /* run RX thread, within the bounds set by NAPI.
4938 * All RX "locking" is done by ensuring outside
4939 * code synchronizes with tg3->napi.poll()
4941 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4942 work_done += tg3_rx(tnapi, budget - work_done);
4944 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4945 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
4947 u32 std_prod_idx = dpr->rx_std_prod_idx;
4948 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4950 for (i = 1; i < tp->irq_cnt; i++)
4951 err |= tg3_rx_prodring_xfer(tp, dpr,
4952 &tp->napi[i].prodring);
4956 if (std_prod_idx != dpr->rx_std_prod_idx)
4957 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4958 dpr->rx_std_prod_idx);
4960 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
4961 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4962 dpr->rx_jmb_prod_idx);
4967 tw32_f(HOSTCC_MODE, tp->coal_now);
4973 static int tg3_poll_msix(struct napi_struct *napi, int budget)
4975 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4976 struct tg3 *tp = tnapi->tp;
4978 struct tg3_hw_status *sblk = tnapi->hw_status;
4981 work_done = tg3_poll_work(tnapi, work_done, budget);
4983 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4986 if (unlikely(work_done >= budget))
4989 /* tp->last_tag is used in tg3_int_reenable() below
4990 * to tell the hw how much work has been processed,
4991 * so we must read it before checking for more work.
4993 tnapi->last_tag = sblk->status_tag;
4994 tnapi->last_irq_tag = tnapi->last_tag;
4997 /* check for RX/TX work to do */
4998 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4999 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5000 napi_complete(napi);
5001 /* Reenable interrupts. */
5002 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5011 /* work_done is guaranteed to be less than budget. */
5012 napi_complete(napi);
5013 schedule_work(&tp->reset_task);
5017 static int tg3_poll(struct napi_struct *napi, int budget)
5019 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5020 struct tg3 *tp = tnapi->tp;
5022 struct tg3_hw_status *sblk = tnapi->hw_status;
5027 work_done = tg3_poll_work(tnapi, work_done, budget);
5029 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5032 if (unlikely(work_done >= budget))
5035 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5036 /* tp->last_tag is used in tg3_int_reenable() below
5037 * to tell the hw how much work has been processed,
5038 * so we must read it before checking for more work.
5040 tnapi->last_tag = sblk->status_tag;
5041 tnapi->last_irq_tag = tnapi->last_tag;
5044 sblk->status &= ~SD_STATUS_UPDATED;
5046 if (likely(!tg3_has_work(tnapi))) {
5047 napi_complete(napi);
5048 tg3_int_reenable(tnapi);
5056 /* work_done is guaranteed to be less than budget. */
5057 napi_complete(napi);
5058 schedule_work(&tp->reset_task);
5062 static void tg3_napi_disable(struct tg3 *tp)
5066 for (i = tp->irq_cnt - 1; i >= 0; i--)
5067 napi_disable(&tp->napi[i].napi);
5070 static void tg3_napi_enable(struct tg3 *tp)
5074 for (i = 0; i < tp->irq_cnt; i++)
5075 napi_enable(&tp->napi[i].napi);
5078 static void tg3_napi_init(struct tg3 *tp)
5082 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5083 for (i = 1; i < tp->irq_cnt; i++)
5084 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5087 static void tg3_napi_fini(struct tg3 *tp)
5091 for (i = 0; i < tp->irq_cnt; i++)
5092 netif_napi_del(&tp->napi[i].napi);
5095 static inline void tg3_netif_stop(struct tg3 *tp)
5097 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5098 tg3_napi_disable(tp);
5099 netif_tx_disable(tp->dev);
5102 static inline void tg3_netif_start(struct tg3 *tp)
5104 /* NOTE: unconditional netif_tx_wake_all_queues is only
5105 * appropriate so long as all callers are assured to
5106 * have free tx slots (such as after tg3_init_hw)
5108 netif_tx_wake_all_queues(tp->dev);
5110 tg3_napi_enable(tp);
5111 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5112 tg3_enable_ints(tp);
5115 static void tg3_irq_quiesce(struct tg3 *tp)
5119 BUG_ON(tp->irq_sync);
5124 for (i = 0; i < tp->irq_cnt; i++)
5125 synchronize_irq(tp->napi[i].irq_vec);
5128 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5129 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5130 * with as well. Most of the time, this is not necessary except when
5131 * shutting down the device.
5133 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5135 spin_lock_bh(&tp->lock);
5137 tg3_irq_quiesce(tp);
5140 static inline void tg3_full_unlock(struct tg3 *tp)
5142 spin_unlock_bh(&tp->lock);
5145 /* One-shot MSI handler - Chip automatically disables interrupt
5146 * after sending MSI so driver doesn't have to do it.
5148 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5150 struct tg3_napi *tnapi = dev_id;
5151 struct tg3 *tp = tnapi->tp;
5153 prefetch(tnapi->hw_status);
5155 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5157 if (likely(!tg3_irq_sync(tp)))
5158 napi_schedule(&tnapi->napi);
5163 /* MSI ISR - No need to check for interrupt sharing and no need to
5164 * flush status block and interrupt mailbox. PCI ordering rules
5165 * guarantee that MSI will arrive after the status block.
5167 static irqreturn_t tg3_msi(int irq, void *dev_id)
5169 struct tg3_napi *tnapi = dev_id;
5170 struct tg3 *tp = tnapi->tp;
5172 prefetch(tnapi->hw_status);
5174 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5176 * Writing any value to intr-mbox-0 clears PCI INTA# and
5177 * chip-internal interrupt pending events.
5178 * Writing non-zero to intr-mbox-0 additional tells the
5179 * NIC to stop sending us irqs, engaging "in-intr-handler"
5182 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5183 if (likely(!tg3_irq_sync(tp)))
5184 napi_schedule(&tnapi->napi);
5186 return IRQ_RETVAL(1);
5189 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5191 struct tg3_napi *tnapi = dev_id;
5192 struct tg3 *tp = tnapi->tp;
5193 struct tg3_hw_status *sblk = tnapi->hw_status;
5194 unsigned int handled = 1;
5196 /* In INTx mode, it is possible for the interrupt to arrive at
5197 * the CPU before the status block posted prior to the interrupt.
5198 * Reading the PCI State register will confirm whether the
5199 * interrupt is ours and will flush the status block.
5201 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5202 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5203 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5210 * Writing any value to intr-mbox-0 clears PCI INTA# and
5211 * chip-internal interrupt pending events.
5212 * Writing non-zero to intr-mbox-0 additional tells the
5213 * NIC to stop sending us irqs, engaging "in-intr-handler"
5216 * Flush the mailbox to de-assert the IRQ immediately to prevent
5217 * spurious interrupts. The flush impacts performance but
5218 * excessive spurious interrupts can be worse in some cases.
5220 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5221 if (tg3_irq_sync(tp))
5223 sblk->status &= ~SD_STATUS_UPDATED;
5224 if (likely(tg3_has_work(tnapi))) {
5225 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5226 napi_schedule(&tnapi->napi);
5228 /* No work, shared interrupt perhaps? re-enable
5229 * interrupts, and flush that PCI write
5231 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5235 return IRQ_RETVAL(handled);
5238 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5240 struct tg3_napi *tnapi = dev_id;
5241 struct tg3 *tp = tnapi->tp;
5242 struct tg3_hw_status *sblk = tnapi->hw_status;
5243 unsigned int handled = 1;
5245 /* In INTx mode, it is possible for the interrupt to arrive at
5246 * the CPU before the status block posted prior to the interrupt.
5247 * Reading the PCI State register will confirm whether the
5248 * interrupt is ours and will flush the status block.
5250 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5251 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5252 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5259 * writing any value to intr-mbox-0 clears PCI INTA# and
5260 * chip-internal interrupt pending events.
5261 * writing non-zero to intr-mbox-0 additional tells the
5262 * NIC to stop sending us irqs, engaging "in-intr-handler"
5265 * Flush the mailbox to de-assert the IRQ immediately to prevent
5266 * spurious interrupts. The flush impacts performance but
5267 * excessive spurious interrupts can be worse in some cases.
5269 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5272 * In a shared interrupt configuration, sometimes other devices'
5273 * interrupts will scream. We record the current status tag here
5274 * so that the above check can report that the screaming interrupts
5275 * are unhandled. Eventually they will be silenced.
5277 tnapi->last_irq_tag = sblk->status_tag;
5279 if (tg3_irq_sync(tp))
5282 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5284 napi_schedule(&tnapi->napi);
5287 return IRQ_RETVAL(handled);
5290 /* ISR for interrupt test */
5291 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5293 struct tg3_napi *tnapi = dev_id;
5294 struct tg3 *tp = tnapi->tp;
5295 struct tg3_hw_status *sblk = tnapi->hw_status;
5297 if ((sblk->status & SD_STATUS_UPDATED) ||
5298 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5299 tg3_disable_ints(tp);
5300 return IRQ_RETVAL(1);
5302 return IRQ_RETVAL(0);
5305 static int tg3_init_hw(struct tg3 *, int);
5306 static int tg3_halt(struct tg3 *, int, int);
5308 /* Restart hardware after configuration changes, self-test, etc.
5309 * Invoked with tp->lock held.
5311 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5312 __releases(tp->lock)
5313 __acquires(tp->lock)
5317 err = tg3_init_hw(tp, reset_phy);
5320 "Failed to re-initialize device, aborting\n");
5321 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5322 tg3_full_unlock(tp);
5323 del_timer_sync(&tp->timer);
5325 tg3_napi_enable(tp);
5327 tg3_full_lock(tp, 0);
5332 #ifdef CONFIG_NET_POLL_CONTROLLER
5333 static void tg3_poll_controller(struct net_device *dev)
5336 struct tg3 *tp = netdev_priv(dev);
5338 for (i = 0; i < tp->irq_cnt; i++)
5339 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5343 static void tg3_reset_task(struct work_struct *work)
5345 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5347 unsigned int restart_timer;
5349 tg3_full_lock(tp, 0);
5351 if (!netif_running(tp->dev)) {
5352 tg3_full_unlock(tp);
5356 tg3_full_unlock(tp);
5362 tg3_full_lock(tp, 1);
5364 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5365 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5367 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5368 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5369 tp->write32_rx_mbox = tg3_write_flush_reg32;
5370 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5371 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5374 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5375 err = tg3_init_hw(tp, 1);
5379 tg3_netif_start(tp);
5382 mod_timer(&tp->timer, jiffies + 1);
5385 tg3_full_unlock(tp);
5391 static void tg3_dump_short_state(struct tg3 *tp)
5393 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5394 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5395 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5396 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5399 static void tg3_tx_timeout(struct net_device *dev)
5401 struct tg3 *tp = netdev_priv(dev);
5403 if (netif_msg_tx_err(tp)) {
5404 netdev_err(dev, "transmit timed out, resetting\n");
5405 tg3_dump_short_state(tp);
5408 schedule_work(&tp->reset_task);
5411 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5412 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5414 u32 base = (u32) mapping & 0xffffffff;
5416 return (base > 0xffffdcc0) && (base + len + 8 < base);
5419 /* Test for DMA addresses > 40-bit */
5420 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5423 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5424 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5425 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5432 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5434 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5435 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5436 struct sk_buff *skb, u32 last_plus_one,
5437 u32 *start, u32 base_flags, u32 mss)
5439 struct tg3 *tp = tnapi->tp;
5440 struct sk_buff *new_skb;
5441 dma_addr_t new_addr = 0;
5445 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5446 new_skb = skb_copy(skb, GFP_ATOMIC);
5448 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5450 new_skb = skb_copy_expand(skb,
5451 skb_headroom(skb) + more_headroom,
5452 skb_tailroom(skb), GFP_ATOMIC);
5458 /* New SKB is guaranteed to be linear. */
5460 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5462 /* Make sure the mapping succeeded */
5463 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5465 dev_kfree_skb(new_skb);
5468 /* Make sure new skb does not cross any 4G boundaries.
5469 * Drop the packet if it does.
5471 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5472 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5473 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5476 dev_kfree_skb(new_skb);
5479 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5480 base_flags, 1 | (mss << 1));
5481 *start = NEXT_TX(entry);
5485 /* Now clean up the sw ring entries. */
5487 while (entry != last_plus_one) {
5491 len = skb_headlen(skb);
5493 len = skb_shinfo(skb)->frags[i-1].size;
5495 pci_unmap_single(tp->pdev,
5496 dma_unmap_addr(&tnapi->tx_buffers[entry],
5498 len, PCI_DMA_TODEVICE);
5500 tnapi->tx_buffers[entry].skb = new_skb;
5501 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5504 tnapi->tx_buffers[entry].skb = NULL;
5506 entry = NEXT_TX(entry);
5515 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5516 dma_addr_t mapping, int len, u32 flags,
5519 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5520 int is_end = (mss_and_is_end & 0x1);
5521 u32 mss = (mss_and_is_end >> 1);
5525 flags |= TXD_FLAG_END;
5526 if (flags & TXD_FLAG_VLAN) {
5527 vlan_tag = flags >> 16;
5530 vlan_tag |= (mss << TXD_MSS_SHIFT);
5532 txd->addr_hi = ((u64) mapping >> 32);
5533 txd->addr_lo = ((u64) mapping & 0xffffffff);
5534 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5535 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5538 /* hard_start_xmit for devices that don't have any bugs and
5539 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5541 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5542 struct net_device *dev)
5544 struct tg3 *tp = netdev_priv(dev);
5545 u32 len, entry, base_flags, mss;
5547 struct tg3_napi *tnapi;
5548 struct netdev_queue *txq;
5549 unsigned int i, last;
5551 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5552 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5553 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5556 /* We are running in BH disabled context with netif_tx_lock
5557 * and TX reclaim runs via tp->napi.poll inside of a software
5558 * interrupt. Furthermore, IRQ processing runs lockless so we have
5559 * no IRQ context deadlocks to worry about either. Rejoice!
5561 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5562 if (!netif_tx_queue_stopped(txq)) {
5563 netif_tx_stop_queue(txq);
5565 /* This is a hard error, log it. */
5567 "BUG! Tx Ring full when queue awake!\n");
5569 return NETDEV_TX_BUSY;
5572 entry = tnapi->tx_prod;
5574 mss = skb_shinfo(skb)->gso_size;
5576 int tcp_opt_len, ip_tcp_len;
5579 if (skb_header_cloned(skb) &&
5580 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5585 if (skb_is_gso_v6(skb)) {
5586 hdrlen = skb_headlen(skb) - ETH_HLEN;
5588 struct iphdr *iph = ip_hdr(skb);
5590 tcp_opt_len = tcp_optlen(skb);
5591 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5594 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5595 hdrlen = ip_tcp_len + tcp_opt_len;
5598 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5599 mss |= (hdrlen & 0xc) << 12;
5601 base_flags |= 0x00000010;
5602 base_flags |= (hdrlen & 0x3e0) << 5;
5606 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5607 TXD_FLAG_CPU_POST_DMA);
5609 tcp_hdr(skb)->check = 0;
5611 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5612 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5615 #if TG3_VLAN_TAG_USED
5616 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5617 base_flags |= (TXD_FLAG_VLAN |
5618 (vlan_tx_tag_get(skb) << 16));
5621 len = skb_headlen(skb);
5623 /* Queue skb data, a.k.a. the main skb fragment. */
5624 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5625 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5630 tnapi->tx_buffers[entry].skb = skb;
5631 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5633 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5634 !mss && skb->len > ETH_DATA_LEN)
5635 base_flags |= TXD_FLAG_JMB_PKT;
5637 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5638 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5640 entry = NEXT_TX(entry);
5642 /* Now loop through additional data fragments, and queue them. */
5643 if (skb_shinfo(skb)->nr_frags > 0) {
5644 last = skb_shinfo(skb)->nr_frags - 1;
5645 for (i = 0; i <= last; i++) {
5646 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5649 mapping = pci_map_page(tp->pdev,
5652 len, PCI_DMA_TODEVICE);
5653 if (pci_dma_mapping_error(tp->pdev, mapping))
5656 tnapi->tx_buffers[entry].skb = NULL;
5657 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5660 tg3_set_txd(tnapi, entry, mapping, len,
5661 base_flags, (i == last) | (mss << 1));
5663 entry = NEXT_TX(entry);
5667 /* Packets are ready, update Tx producer idx local and on card. */
5668 tw32_tx_mbox(tnapi->prodmbox, entry);
5670 tnapi->tx_prod = entry;
5671 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5672 netif_tx_stop_queue(txq);
5674 /* netif_tx_stop_queue() must be done before checking
5675 * checking tx index in tg3_tx_avail() below, because in
5676 * tg3_tx(), we update tx index before checking for
5677 * netif_tx_queue_stopped().
5680 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5681 netif_tx_wake_queue(txq);
5687 return NETDEV_TX_OK;
5691 entry = tnapi->tx_prod;
5692 tnapi->tx_buffers[entry].skb = NULL;
5693 pci_unmap_single(tp->pdev,
5694 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5697 for (i = 0; i <= last; i++) {
5698 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5699 entry = NEXT_TX(entry);
5701 pci_unmap_page(tp->pdev,
5702 dma_unmap_addr(&tnapi->tx_buffers[entry],
5704 frag->size, PCI_DMA_TODEVICE);
5708 return NETDEV_TX_OK;
5711 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5712 struct net_device *);
5714 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5715 * TSO header is greater than 80 bytes.
5717 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5719 struct sk_buff *segs, *nskb;
5720 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5722 /* Estimate the number of fragments in the worst case */
5723 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5724 netif_stop_queue(tp->dev);
5726 /* netif_tx_stop_queue() must be done before checking
5727 * checking tx index in tg3_tx_avail() below, because in
5728 * tg3_tx(), we update tx index before checking for
5729 * netif_tx_queue_stopped().
5732 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5733 return NETDEV_TX_BUSY;
5735 netif_wake_queue(tp->dev);
5738 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5740 goto tg3_tso_bug_end;
5746 tg3_start_xmit_dma_bug(nskb, tp->dev);
5752 return NETDEV_TX_OK;
5755 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5756 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5758 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5759 struct net_device *dev)
5761 struct tg3 *tp = netdev_priv(dev);
5762 u32 len, entry, base_flags, mss;
5763 int would_hit_hwbug;
5765 struct tg3_napi *tnapi;
5766 struct netdev_queue *txq;
5767 unsigned int i, last;
5769 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5770 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5771 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5774 /* We are running in BH disabled context with netif_tx_lock
5775 * and TX reclaim runs via tp->napi.poll inside of a software
5776 * interrupt. Furthermore, IRQ processing runs lockless so we have
5777 * no IRQ context deadlocks to worry about either. Rejoice!
5779 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5780 if (!netif_tx_queue_stopped(txq)) {
5781 netif_tx_stop_queue(txq);
5783 /* This is a hard error, log it. */
5785 "BUG! Tx Ring full when queue awake!\n");
5787 return NETDEV_TX_BUSY;
5790 entry = tnapi->tx_prod;
5792 if (skb->ip_summed == CHECKSUM_PARTIAL)
5793 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5795 mss = skb_shinfo(skb)->gso_size;
5798 u32 tcp_opt_len, hdr_len;
5800 if (skb_header_cloned(skb) &&
5801 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5807 tcp_opt_len = tcp_optlen(skb);
5809 if (skb_is_gso_v6(skb)) {
5810 hdr_len = skb_headlen(skb) - ETH_HLEN;
5814 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5815 hdr_len = ip_tcp_len + tcp_opt_len;
5818 iph->tot_len = htons(mss + hdr_len);
5821 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5822 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5823 return tg3_tso_bug(tp, skb);
5825 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5826 TXD_FLAG_CPU_POST_DMA);
5828 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5829 tcp_hdr(skb)->check = 0;
5830 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5832 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5837 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5838 mss |= (hdr_len & 0xc) << 12;
5840 base_flags |= 0x00000010;
5841 base_flags |= (hdr_len & 0x3e0) << 5;
5842 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5843 mss |= hdr_len << 9;
5844 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5846 if (tcp_opt_len || iph->ihl > 5) {
5849 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5850 mss |= (tsflags << 11);
5853 if (tcp_opt_len || iph->ihl > 5) {
5856 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5857 base_flags |= tsflags << 12;
5861 #if TG3_VLAN_TAG_USED
5862 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5863 base_flags |= (TXD_FLAG_VLAN |
5864 (vlan_tx_tag_get(skb) << 16));
5867 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5868 !mss && skb->len > ETH_DATA_LEN)
5869 base_flags |= TXD_FLAG_JMB_PKT;
5871 len = skb_headlen(skb);
5873 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5874 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5879 tnapi->tx_buffers[entry].skb = skb;
5880 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5882 would_hit_hwbug = 0;
5884 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5885 would_hit_hwbug = 1;
5887 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5888 tg3_4g_overflow_test(mapping, len))
5889 would_hit_hwbug = 1;
5891 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5892 tg3_40bit_overflow_test(tp, mapping, len))
5893 would_hit_hwbug = 1;
5895 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5896 would_hit_hwbug = 1;
5898 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5899 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5901 entry = NEXT_TX(entry);
5903 /* Now loop through additional data fragments, and queue them. */
5904 if (skb_shinfo(skb)->nr_frags > 0) {
5905 last = skb_shinfo(skb)->nr_frags - 1;
5906 for (i = 0; i <= last; i++) {
5907 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5910 mapping = pci_map_page(tp->pdev,
5913 len, PCI_DMA_TODEVICE);
5915 tnapi->tx_buffers[entry].skb = NULL;
5916 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5918 if (pci_dma_mapping_error(tp->pdev, mapping))
5921 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5923 would_hit_hwbug = 1;
5925 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5926 tg3_4g_overflow_test(mapping, len))
5927 would_hit_hwbug = 1;
5929 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5930 tg3_40bit_overflow_test(tp, mapping, len))
5931 would_hit_hwbug = 1;
5933 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5934 tg3_set_txd(tnapi, entry, mapping, len,
5935 base_flags, (i == last)|(mss << 1));
5937 tg3_set_txd(tnapi, entry, mapping, len,
5938 base_flags, (i == last));
5940 entry = NEXT_TX(entry);
5944 if (would_hit_hwbug) {
5945 u32 last_plus_one = entry;
5948 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5949 start &= (TG3_TX_RING_SIZE - 1);
5951 /* If the workaround fails due to memory/mapping
5952 * failure, silently drop this packet.
5954 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5955 &start, base_flags, mss))
5961 /* Packets are ready, update Tx producer idx local and on card. */
5962 tw32_tx_mbox(tnapi->prodmbox, entry);
5964 tnapi->tx_prod = entry;
5965 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5966 netif_tx_stop_queue(txq);
5968 /* netif_tx_stop_queue() must be done before checking
5969 * checking tx index in tg3_tx_avail() below, because in
5970 * tg3_tx(), we update tx index before checking for
5971 * netif_tx_queue_stopped().
5974 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5975 netif_tx_wake_queue(txq);
5981 return NETDEV_TX_OK;
5985 entry = tnapi->tx_prod;
5986 tnapi->tx_buffers[entry].skb = NULL;
5987 pci_unmap_single(tp->pdev,
5988 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5991 for (i = 0; i <= last; i++) {
5992 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5993 entry = NEXT_TX(entry);
5995 pci_unmap_page(tp->pdev,
5996 dma_unmap_addr(&tnapi->tx_buffers[entry],
5998 frag->size, PCI_DMA_TODEVICE);
6002 return NETDEV_TX_OK;
6005 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6010 if (new_mtu > ETH_DATA_LEN) {
6011 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6012 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
6013 ethtool_op_set_tso(dev, 0);
6015 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
6018 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6019 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
6020 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
6024 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6026 struct tg3 *tp = netdev_priv(dev);
6029 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6032 if (!netif_running(dev)) {
6033 /* We'll just catch it later when the
6036 tg3_set_mtu(dev, tp, new_mtu);
6044 tg3_full_lock(tp, 1);
6046 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6048 tg3_set_mtu(dev, tp, new_mtu);
6050 err = tg3_restart_hw(tp, 0);
6053 tg3_netif_start(tp);
6055 tg3_full_unlock(tp);
6063 static void tg3_rx_prodring_free(struct tg3 *tp,
6064 struct tg3_rx_prodring_set *tpr)
6068 if (tpr != &tp->napi[0].prodring) {
6069 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6070 i = (i + 1) & tp->rx_std_ring_mask)
6071 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6074 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6075 for (i = tpr->rx_jmb_cons_idx;
6076 i != tpr->rx_jmb_prod_idx;
6077 i = (i + 1) & tp->rx_jmb_ring_mask) {
6078 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6086 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6087 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6090 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6091 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6092 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6097 /* Initialize rx rings for packet processing.
6099 * The chip has been shut down and the driver detached from
6100 * the networking, so no interrupts or new tx packets will
6101 * end up in the driver. tp->{tx,}lock are held and thus
6104 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6105 struct tg3_rx_prodring_set *tpr)
6107 u32 i, rx_pkt_dma_sz;
6109 tpr->rx_std_cons_idx = 0;
6110 tpr->rx_std_prod_idx = 0;
6111 tpr->rx_jmb_cons_idx = 0;
6112 tpr->rx_jmb_prod_idx = 0;
6114 if (tpr != &tp->napi[0].prodring) {
6115 memset(&tpr->rx_std_buffers[0], 0,
6116 TG3_RX_STD_BUFF_RING_SIZE(tp));
6117 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
6118 memset(&tpr->rx_jmb_buffers[0], 0,
6119 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6123 /* Zero out all descriptors. */
6124 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6126 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6127 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6128 tp->dev->mtu > ETH_DATA_LEN)
6129 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6130 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6132 /* Initialize invariants of the rings, we only set this
6133 * stuff once. This works because the card does not
6134 * write into the rx buffer posting rings.
6136 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6137 struct tg3_rx_buffer_desc *rxd;
6139 rxd = &tpr->rx_std[i];
6140 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6141 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6142 rxd->opaque = (RXD_OPAQUE_RING_STD |
6143 (i << RXD_OPAQUE_INDEX_SHIFT));
6146 /* Now allocate fresh SKBs for each rx ring. */
6147 for (i = 0; i < tp->rx_pending; i++) {
6148 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6149 netdev_warn(tp->dev,
6150 "Using a smaller RX standard ring. Only "
6151 "%d out of %d buffers were allocated "
6152 "successfully\n", i, tp->rx_pending);
6160 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
6163 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6165 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6168 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6169 struct tg3_rx_buffer_desc *rxd;
6171 rxd = &tpr->rx_jmb[i].std;
6172 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6173 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6175 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6176 (i << RXD_OPAQUE_INDEX_SHIFT));
6179 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6180 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6181 netdev_warn(tp->dev,
6182 "Using a smaller RX jumbo ring. Only %d "
6183 "out of %d buffers were allocated "
6184 "successfully\n", i, tp->rx_jumbo_pending);
6187 tp->rx_jumbo_pending = i;
6196 tg3_rx_prodring_free(tp, tpr);
6200 static void tg3_rx_prodring_fini(struct tg3 *tp,
6201 struct tg3_rx_prodring_set *tpr)
6203 kfree(tpr->rx_std_buffers);
6204 tpr->rx_std_buffers = NULL;
6205 kfree(tpr->rx_jmb_buffers);
6206 tpr->rx_jmb_buffers = NULL;
6208 pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
6209 tpr->rx_std, tpr->rx_std_mapping);
6213 pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp),
6214 tpr->rx_jmb, tpr->rx_jmb_mapping);
6219 static int tg3_rx_prodring_init(struct tg3 *tp,
6220 struct tg3_rx_prodring_set *tpr)
6222 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6224 if (!tpr->rx_std_buffers)
6227 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
6228 &tpr->rx_std_mapping);
6232 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6233 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6235 if (!tpr->rx_jmb_buffers)
6238 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6239 TG3_RX_JMB_RING_BYTES(tp),
6240 &tpr->rx_jmb_mapping);
6248 tg3_rx_prodring_fini(tp, tpr);
6252 /* Free up pending packets in all rx/tx rings.
6254 * The chip has been shut down and the driver detached from
6255 * the networking, so no interrupts or new tx packets will
6256 * end up in the driver. tp->{tx,}lock is not held and we are not
6257 * in an interrupt context and thus may sleep.
6259 static void tg3_free_rings(struct tg3 *tp)
6263 for (j = 0; j < tp->irq_cnt; j++) {
6264 struct tg3_napi *tnapi = &tp->napi[j];
6266 tg3_rx_prodring_free(tp, &tnapi->prodring);
6268 if (!tnapi->tx_buffers)
6271 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6272 struct ring_info *txp;
6273 struct sk_buff *skb;
6276 txp = &tnapi->tx_buffers[i];
6284 pci_unmap_single(tp->pdev,
6285 dma_unmap_addr(txp, mapping),
6292 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6293 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6294 pci_unmap_page(tp->pdev,
6295 dma_unmap_addr(txp, mapping),
6296 skb_shinfo(skb)->frags[k].size,
6301 dev_kfree_skb_any(skb);
6306 /* Initialize tx/rx rings for packet processing.
6308 * The chip has been shut down and the driver detached from
6309 * the networking, so no interrupts or new tx packets will
6310 * end up in the driver. tp->{tx,}lock are held and thus
6313 static int tg3_init_rings(struct tg3 *tp)
6317 /* Free up all the SKBs. */
6320 for (i = 0; i < tp->irq_cnt; i++) {
6321 struct tg3_napi *tnapi = &tp->napi[i];
6323 tnapi->last_tag = 0;
6324 tnapi->last_irq_tag = 0;
6325 tnapi->hw_status->status = 0;
6326 tnapi->hw_status->status_tag = 0;
6327 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6332 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6334 tnapi->rx_rcb_ptr = 0;
6336 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6338 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6348 * Must not be invoked with interrupt sources disabled and
6349 * the hardware shutdown down.
6351 static void tg3_free_consistent(struct tg3 *tp)
6355 for (i = 0; i < tp->irq_cnt; i++) {
6356 struct tg3_napi *tnapi = &tp->napi[i];
6358 if (tnapi->tx_ring) {
6359 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
6360 tnapi->tx_ring, tnapi->tx_desc_mapping);
6361 tnapi->tx_ring = NULL;
6364 kfree(tnapi->tx_buffers);
6365 tnapi->tx_buffers = NULL;
6367 if (tnapi->rx_rcb) {
6368 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
6370 tnapi->rx_rcb_mapping);
6371 tnapi->rx_rcb = NULL;
6374 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6376 if (tnapi->hw_status) {
6377 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6379 tnapi->status_mapping);
6380 tnapi->hw_status = NULL;
6385 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
6386 tp->hw_stats, tp->stats_mapping);
6387 tp->hw_stats = NULL;
6392 * Must not be invoked with interrupt sources disabled and
6393 * the hardware shutdown down. Can sleep.
6395 static int tg3_alloc_consistent(struct tg3 *tp)
6399 tp->hw_stats = pci_alloc_consistent(tp->pdev,
6400 sizeof(struct tg3_hw_stats),
6401 &tp->stats_mapping);
6405 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6407 for (i = 0; i < tp->irq_cnt; i++) {
6408 struct tg3_napi *tnapi = &tp->napi[i];
6409 struct tg3_hw_status *sblk;
6411 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
6413 &tnapi->status_mapping);
6414 if (!tnapi->hw_status)
6417 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6418 sblk = tnapi->hw_status;
6420 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6423 /* If multivector TSS is enabled, vector 0 does not handle
6424 * tx interrupts. Don't allocate any resources for it.
6426 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6427 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6428 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6431 if (!tnapi->tx_buffers)
6434 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6436 &tnapi->tx_desc_mapping);
6437 if (!tnapi->tx_ring)
6442 * When RSS is enabled, the status block format changes
6443 * slightly. The "rx_jumbo_consumer", "reserved",
6444 * and "rx_mini_consumer" members get mapped to the
6445 * other three rx return ring producer indexes.
6449 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6452 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6455 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6458 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6463 * If multivector RSS is enabled, vector 0 does not handle
6464 * rx or tx interrupts. Don't allocate any resources for it.
6466 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6469 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
6470 TG3_RX_RCB_RING_BYTES(tp),
6471 &tnapi->rx_rcb_mapping);
6475 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6481 tg3_free_consistent(tp);
6485 #define MAX_WAIT_CNT 1000
6487 /* To stop a block, clear the enable bit and poll till it
6488 * clears. tp->lock is held.
6490 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6495 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6502 /* We can't enable/disable these bits of the
6503 * 5705/5750, just say success.
6516 for (i = 0; i < MAX_WAIT_CNT; i++) {
6519 if ((val & enable_bit) == 0)
6523 if (i == MAX_WAIT_CNT && !silent) {
6524 dev_err(&tp->pdev->dev,
6525 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6533 /* tp->lock is held. */
6534 static int tg3_abort_hw(struct tg3 *tp, int silent)
6538 tg3_disable_ints(tp);
6540 tp->rx_mode &= ~RX_MODE_ENABLE;
6541 tw32_f(MAC_RX_MODE, tp->rx_mode);
6544 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6545 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6546 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6547 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6548 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6549 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6551 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6552 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6553 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6554 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6555 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6556 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6557 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6559 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6560 tw32_f(MAC_MODE, tp->mac_mode);
6563 tp->tx_mode &= ~TX_MODE_ENABLE;
6564 tw32_f(MAC_TX_MODE, tp->tx_mode);
6566 for (i = 0; i < MAX_WAIT_CNT; i++) {
6568 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6571 if (i >= MAX_WAIT_CNT) {
6572 dev_err(&tp->pdev->dev,
6573 "%s timed out, TX_MODE_ENABLE will not clear "
6574 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6578 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6579 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6580 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6582 tw32(FTQ_RESET, 0xffffffff);
6583 tw32(FTQ_RESET, 0x00000000);
6585 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6586 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6588 for (i = 0; i < tp->irq_cnt; i++) {
6589 struct tg3_napi *tnapi = &tp->napi[i];
6590 if (tnapi->hw_status)
6591 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6594 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6599 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6604 /* NCSI does not support APE events */
6605 if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
6608 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6609 if (apedata != APE_SEG_SIG_MAGIC)
6612 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6613 if (!(apedata & APE_FW_STATUS_READY))
6616 /* Wait for up to 1 millisecond for APE to service previous event. */
6617 for (i = 0; i < 10; i++) {
6618 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6621 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6623 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6624 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6625 event | APE_EVENT_STATUS_EVENT_PENDING);
6627 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6629 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6635 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6636 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6639 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6644 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6648 case RESET_KIND_INIT:
6649 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6650 APE_HOST_SEG_SIG_MAGIC);
6651 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6652 APE_HOST_SEG_LEN_MAGIC);
6653 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6654 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6655 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6656 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6657 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6658 APE_HOST_BEHAV_NO_PHYLOCK);
6659 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6660 TG3_APE_HOST_DRVR_STATE_START);
6662 event = APE_EVENT_STATUS_STATE_START;
6664 case RESET_KIND_SHUTDOWN:
6665 /* With the interface we are currently using,
6666 * APE does not track driver state. Wiping
6667 * out the HOST SEGMENT SIGNATURE forces
6668 * the APE to assume OS absent status.
6670 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6672 if (device_may_wakeup(&tp->pdev->dev) &&
6673 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
6674 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6675 TG3_APE_HOST_WOL_SPEED_AUTO);
6676 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6678 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6680 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6682 event = APE_EVENT_STATUS_STATE_UNLOAD;
6684 case RESET_KIND_SUSPEND:
6685 event = APE_EVENT_STATUS_STATE_SUSPEND;
6691 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6693 tg3_ape_send_event(tp, event);
6696 /* tp->lock is held. */
6697 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6699 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6700 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6702 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6704 case RESET_KIND_INIT:
6705 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6709 case RESET_KIND_SHUTDOWN:
6710 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6714 case RESET_KIND_SUSPEND:
6715 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6724 if (kind == RESET_KIND_INIT ||
6725 kind == RESET_KIND_SUSPEND)
6726 tg3_ape_driver_state_change(tp, kind);
6729 /* tp->lock is held. */
6730 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6732 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6734 case RESET_KIND_INIT:
6735 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6736 DRV_STATE_START_DONE);
6739 case RESET_KIND_SHUTDOWN:
6740 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6741 DRV_STATE_UNLOAD_DONE);
6749 if (kind == RESET_KIND_SHUTDOWN)
6750 tg3_ape_driver_state_change(tp, kind);
6753 /* tp->lock is held. */
6754 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6756 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6758 case RESET_KIND_INIT:
6759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6763 case RESET_KIND_SHUTDOWN:
6764 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6768 case RESET_KIND_SUSPEND:
6769 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6779 static int tg3_poll_fw(struct tg3 *tp)
6784 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6785 /* Wait up to 20ms for init done. */
6786 for (i = 0; i < 200; i++) {
6787 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6794 /* Wait for firmware initialization to complete. */
6795 for (i = 0; i < 100000; i++) {
6796 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6797 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6802 /* Chip might not be fitted with firmware. Some Sun onboard
6803 * parts are configured like that. So don't signal the timeout
6804 * of the above loop as an error, but do report the lack of
6805 * running firmware once.
6808 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6809 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6811 netdev_info(tp->dev, "No firmware running\n");
6814 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6815 /* The 57765 A0 needs a little more
6816 * time to do some important work.
6824 /* Save PCI command register before chip reset */
6825 static void tg3_save_pci_state(struct tg3 *tp)
6827 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6830 /* Restore PCI state after chip reset */
6831 static void tg3_restore_pci_state(struct tg3 *tp)
6835 /* Re-enable indirect register accesses. */
6836 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6837 tp->misc_host_ctrl);
6839 /* Set MAX PCI retry to zero. */
6840 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6841 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6842 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6843 val |= PCISTATE_RETRY_SAME_DMA;
6844 /* Allow reads and writes to the APE register and memory space. */
6845 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6846 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6847 PCISTATE_ALLOW_APE_SHMEM_WR |
6848 PCISTATE_ALLOW_APE_PSPACE_WR;
6849 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6851 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6853 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6854 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6855 pcie_set_readrq(tp->pdev, 4096);
6857 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6858 tp->pci_cacheline_sz);
6859 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6864 /* Make sure PCI-X relaxed ordering bit is clear. */
6865 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6868 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6870 pcix_cmd &= ~PCI_X_CMD_ERO;
6871 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6875 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6877 /* Chip reset on 5780 will reset MSI enable bit,
6878 * so need to restore it.
6880 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6883 pci_read_config_word(tp->pdev,
6884 tp->msi_cap + PCI_MSI_FLAGS,
6886 pci_write_config_word(tp->pdev,
6887 tp->msi_cap + PCI_MSI_FLAGS,
6888 ctrl | PCI_MSI_FLAGS_ENABLE);
6889 val = tr32(MSGINT_MODE);
6890 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6895 static void tg3_stop_fw(struct tg3 *);
6897 /* tp->lock is held. */
6898 static int tg3_chip_reset(struct tg3 *tp)
6901 void (*write_op)(struct tg3 *, u32, u32);
6906 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6908 /* No matching tg3_nvram_unlock() after this because
6909 * chip reset below will undo the nvram lock.
6911 tp->nvram_lock_cnt = 0;
6913 /* GRC_MISC_CFG core clock reset will clear the memory
6914 * enable bit in PCI register 4 and the MSI enable bit
6915 * on some chips, so we save relevant registers here.
6917 tg3_save_pci_state(tp);
6919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6920 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6921 tw32(GRC_FASTBOOT_PC, 0);
6924 * We must avoid the readl() that normally takes place.
6925 * It locks machines, causes machine checks, and other
6926 * fun things. So, temporarily disable the 5701
6927 * hardware workaround, while we do the reset.
6929 write_op = tp->write32;
6930 if (write_op == tg3_write_flush_reg32)
6931 tp->write32 = tg3_write32;
6933 /* Prevent the irq handler from reading or writing PCI registers
6934 * during chip reset when the memory enable bit in the PCI command
6935 * register may be cleared. The chip does not generate interrupt
6936 * at this time, but the irq handler may still be called due to irq
6937 * sharing or irqpoll.
6939 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6940 for (i = 0; i < tp->irq_cnt; i++) {
6941 struct tg3_napi *tnapi = &tp->napi[i];
6942 if (tnapi->hw_status) {
6943 tnapi->hw_status->status = 0;
6944 tnapi->hw_status->status_tag = 0;
6946 tnapi->last_tag = 0;
6947 tnapi->last_irq_tag = 0;
6951 for (i = 0; i < tp->irq_cnt; i++)
6952 synchronize_irq(tp->napi[i].irq_vec);
6954 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6955 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6956 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6960 val = GRC_MISC_CFG_CORECLK_RESET;
6962 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6963 /* Force PCIe 1.0a mode */
6964 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
6965 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
6966 tr32(TG3_PCIE_PHY_TSTCTL) ==
6967 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
6968 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
6970 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6971 tw32(GRC_MISC_CFG, (1 << 29));
6976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6977 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6978 tw32(GRC_VCPU_EXT_CTRL,
6979 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6982 /* Manage gphy power for all CPMU absent PCIe devices. */
6983 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6984 !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
6985 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6987 tw32(GRC_MISC_CFG, val);
6989 /* restore 5701 hardware bug workaround write method */
6990 tp->write32 = write_op;
6992 /* Unfortunately, we have to delay before the PCI read back.
6993 * Some 575X chips even will not respond to a PCI cfg access
6994 * when the reset command is given to the chip.
6996 * How do these hardware designers expect things to work
6997 * properly if the PCI write is posted for a long period
6998 * of time? It is always necessary to have some method by
6999 * which a register read back can occur to push the write
7000 * out which does the reset.
7002 * For most tg3 variants the trick below was working.
7007 /* Flush PCI posted writes. The normal MMIO registers
7008 * are inaccessible at this time so this is the only
7009 * way to make this reliably (actually, this is no longer
7010 * the case, see above). I tried to use indirect
7011 * register read/write but this upset some 5701 variants.
7013 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7017 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
7020 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7024 /* Wait for link training to complete. */
7025 for (i = 0; i < 5000; i++)
7028 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7029 pci_write_config_dword(tp->pdev, 0xc4,
7030 cfg_val | (1 << 15));
7033 /* Clear the "no snoop" and "relaxed ordering" bits. */
7034 pci_read_config_word(tp->pdev,
7035 tp->pcie_cap + PCI_EXP_DEVCTL,
7037 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7038 PCI_EXP_DEVCTL_NOSNOOP_EN);
7040 * Older PCIe devices only support the 128 byte
7041 * MPS setting. Enforce the restriction.
7043 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7044 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7045 pci_write_config_word(tp->pdev,
7046 tp->pcie_cap + PCI_EXP_DEVCTL,
7049 pcie_set_readrq(tp->pdev, 4096);
7051 /* Clear error status */
7052 pci_write_config_word(tp->pdev,
7053 tp->pcie_cap + PCI_EXP_DEVSTA,
7054 PCI_EXP_DEVSTA_CED |
7055 PCI_EXP_DEVSTA_NFED |
7056 PCI_EXP_DEVSTA_FED |
7057 PCI_EXP_DEVSTA_URD);
7060 tg3_restore_pci_state(tp);
7062 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
7065 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7066 val = tr32(MEMARB_MODE);
7067 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7069 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7071 tw32(0x5000, 0x400);
7074 tw32(GRC_MODE, tp->grc_mode);
7076 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7079 tw32(0xc4, val | (1 << 15));
7082 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7084 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7085 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7086 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7087 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7090 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7091 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7092 tw32_f(MAC_MODE, tp->mac_mode);
7093 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7094 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7095 tw32_f(MAC_MODE, tp->mac_mode);
7096 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7097 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
7098 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
7099 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
7100 tw32_f(MAC_MODE, tp->mac_mode);
7102 tw32_f(MAC_MODE, 0);
7105 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7107 err = tg3_poll_fw(tp);
7113 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7114 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7115 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7116 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
7119 tw32(0x7c00, val | (1 << 25));
7122 /* Reprobe ASF enable state. */
7123 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7124 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7125 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7126 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7129 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7130 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7131 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7132 tp->last_event_jiffies = jiffies;
7133 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7134 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7141 /* tp->lock is held. */
7142 static void tg3_stop_fw(struct tg3 *tp)
7144 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7145 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7146 /* Wait for RX cpu to ACK the previous event. */
7147 tg3_wait_for_event_ack(tp);
7149 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7151 tg3_generate_fw_event(tp);
7153 /* Wait for RX cpu to ACK this event. */
7154 tg3_wait_for_event_ack(tp);
7158 /* tp->lock is held. */
7159 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7165 tg3_write_sig_pre_reset(tp, kind);
7167 tg3_abort_hw(tp, silent);
7168 err = tg3_chip_reset(tp);
7170 __tg3_set_mac_addr(tp, 0);
7172 tg3_write_sig_legacy(tp, kind);
7173 tg3_write_sig_post_reset(tp, kind);
7181 #define RX_CPU_SCRATCH_BASE 0x30000
7182 #define RX_CPU_SCRATCH_SIZE 0x04000
7183 #define TX_CPU_SCRATCH_BASE 0x34000
7184 #define TX_CPU_SCRATCH_SIZE 0x04000
7186 /* tp->lock is held. */
7187 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7191 BUG_ON(offset == TX_CPU_BASE &&
7192 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7194 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7195 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7197 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7200 if (offset == RX_CPU_BASE) {
7201 for (i = 0; i < 10000; i++) {
7202 tw32(offset + CPU_STATE, 0xffffffff);
7203 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7204 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7208 tw32(offset + CPU_STATE, 0xffffffff);
7209 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7212 for (i = 0; i < 10000; i++) {
7213 tw32(offset + CPU_STATE, 0xffffffff);
7214 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7215 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7221 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7222 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7226 /* Clear firmware's nvram arbitration. */
7227 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7228 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7233 unsigned int fw_base;
7234 unsigned int fw_len;
7235 const __be32 *fw_data;
7238 /* tp->lock is held. */
7239 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7240 int cpu_scratch_size, struct fw_info *info)
7242 int err, lock_err, i;
7243 void (*write_op)(struct tg3 *, u32, u32);
7245 if (cpu_base == TX_CPU_BASE &&
7246 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7248 "%s: Trying to load TX cpu firmware which is 5705\n",
7253 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7254 write_op = tg3_write_mem;
7256 write_op = tg3_write_indirect_reg32;
7258 /* It is possible that bootcode is still loading at this point.
7259 * Get the nvram lock first before halting the cpu.
7261 lock_err = tg3_nvram_lock(tp);
7262 err = tg3_halt_cpu(tp, cpu_base);
7264 tg3_nvram_unlock(tp);
7268 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7269 write_op(tp, cpu_scratch_base + i, 0);
7270 tw32(cpu_base + CPU_STATE, 0xffffffff);
7271 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7272 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7273 write_op(tp, (cpu_scratch_base +
7274 (info->fw_base & 0xffff) +
7276 be32_to_cpu(info->fw_data[i]));
7284 /* tp->lock is held. */
7285 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7287 struct fw_info info;
7288 const __be32 *fw_data;
7291 fw_data = (void *)tp->fw->data;
7293 /* Firmware blob starts with version numbers, followed by
7294 start address and length. We are setting complete length.
7295 length = end_address_of_bss - start_address_of_text.
7296 Remainder is the blob to be loaded contiguously
7297 from start address. */
7299 info.fw_base = be32_to_cpu(fw_data[1]);
7300 info.fw_len = tp->fw->size - 12;
7301 info.fw_data = &fw_data[3];
7303 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7304 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7309 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7310 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7315 /* Now startup only the RX cpu. */
7316 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7317 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7319 for (i = 0; i < 5; i++) {
7320 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7322 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7323 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7324 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7328 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7329 "should be %08x\n", __func__,
7330 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7333 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7334 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7339 /* 5705 needs a special version of the TSO firmware. */
7341 /* tp->lock is held. */
7342 static int tg3_load_tso_firmware(struct tg3 *tp)
7344 struct fw_info info;
7345 const __be32 *fw_data;
7346 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7349 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7352 fw_data = (void *)tp->fw->data;
7354 /* Firmware blob starts with version numbers, followed by
7355 start address and length. We are setting complete length.
7356 length = end_address_of_bss - start_address_of_text.
7357 Remainder is the blob to be loaded contiguously
7358 from start address. */
7360 info.fw_base = be32_to_cpu(fw_data[1]);
7361 cpu_scratch_size = tp->fw_len;
7362 info.fw_len = tp->fw->size - 12;
7363 info.fw_data = &fw_data[3];
7365 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7366 cpu_base = RX_CPU_BASE;
7367 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7369 cpu_base = TX_CPU_BASE;
7370 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7371 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7374 err = tg3_load_firmware_cpu(tp, cpu_base,
7375 cpu_scratch_base, cpu_scratch_size,
7380 /* Now startup the cpu. */
7381 tw32(cpu_base + CPU_STATE, 0xffffffff);
7382 tw32_f(cpu_base + CPU_PC, info.fw_base);
7384 for (i = 0; i < 5; i++) {
7385 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7387 tw32(cpu_base + CPU_STATE, 0xffffffff);
7388 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7389 tw32_f(cpu_base + CPU_PC, info.fw_base);
7394 "%s fails to set CPU PC, is %08x should be %08x\n",
7395 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7398 tw32(cpu_base + CPU_STATE, 0xffffffff);
7399 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7404 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7406 struct tg3 *tp = netdev_priv(dev);
7407 struct sockaddr *addr = p;
7408 int err = 0, skip_mac_1 = 0;
7410 if (!is_valid_ether_addr(addr->sa_data))
7413 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7415 if (!netif_running(dev))
7418 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7419 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7421 addr0_high = tr32(MAC_ADDR_0_HIGH);
7422 addr0_low = tr32(MAC_ADDR_0_LOW);
7423 addr1_high = tr32(MAC_ADDR_1_HIGH);
7424 addr1_low = tr32(MAC_ADDR_1_LOW);
7426 /* Skip MAC addr 1 if ASF is using it. */
7427 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7428 !(addr1_high == 0 && addr1_low == 0))
7431 spin_lock_bh(&tp->lock);
7432 __tg3_set_mac_addr(tp, skip_mac_1);
7433 spin_unlock_bh(&tp->lock);
7438 /* tp->lock is held. */
7439 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7440 dma_addr_t mapping, u32 maxlen_flags,
7444 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7445 ((u64) mapping >> 32));
7447 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7448 ((u64) mapping & 0xffffffff));
7450 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7453 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7455 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7459 static void __tg3_set_rx_mode(struct net_device *);
7460 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7464 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7465 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7466 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7467 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7469 tw32(HOSTCC_TXCOL_TICKS, 0);
7470 tw32(HOSTCC_TXMAX_FRAMES, 0);
7471 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7474 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
7475 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7476 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7477 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7479 tw32(HOSTCC_RXCOL_TICKS, 0);
7480 tw32(HOSTCC_RXMAX_FRAMES, 0);
7481 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7484 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7485 u32 val = ec->stats_block_coalesce_usecs;
7487 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7488 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7490 if (!netif_carrier_ok(tp->dev))
7493 tw32(HOSTCC_STAT_COAL_TICKS, val);
7496 for (i = 0; i < tp->irq_cnt - 1; i++) {
7499 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7500 tw32(reg, ec->rx_coalesce_usecs);
7501 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7502 tw32(reg, ec->rx_max_coalesced_frames);
7503 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7504 tw32(reg, ec->rx_max_coalesced_frames_irq);
7506 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7507 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7508 tw32(reg, ec->tx_coalesce_usecs);
7509 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7510 tw32(reg, ec->tx_max_coalesced_frames);
7511 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7512 tw32(reg, ec->tx_max_coalesced_frames_irq);
7516 for (; i < tp->irq_max - 1; i++) {
7517 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7518 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7519 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7521 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7522 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7523 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7524 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7529 /* tp->lock is held. */
7530 static void tg3_rings_reset(struct tg3 *tp)
7533 u32 stblk, txrcb, rxrcb, limit;
7534 struct tg3_napi *tnapi = &tp->napi[0];
7536 /* Disable all transmit rings but the first. */
7537 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7538 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7539 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7540 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7541 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7542 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7543 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7545 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7547 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7548 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7549 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7550 BDINFO_FLAGS_DISABLED);
7553 /* Disable all receive return rings but the first. */
7554 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7555 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7556 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7557 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7558 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7559 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7560 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7561 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7563 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7565 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7566 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7567 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7568 BDINFO_FLAGS_DISABLED);
7570 /* Disable interrupts */
7571 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7573 /* Zero mailbox registers. */
7574 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7575 for (i = 1; i < tp->irq_max; i++) {
7576 tp->napi[i].tx_prod = 0;
7577 tp->napi[i].tx_cons = 0;
7578 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7579 tw32_mailbox(tp->napi[i].prodmbox, 0);
7580 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7581 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7583 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7584 tw32_mailbox(tp->napi[0].prodmbox, 0);
7586 tp->napi[0].tx_prod = 0;
7587 tp->napi[0].tx_cons = 0;
7588 tw32_mailbox(tp->napi[0].prodmbox, 0);
7589 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7592 /* Make sure the NIC-based send BD rings are disabled. */
7593 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7594 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7595 for (i = 0; i < 16; i++)
7596 tw32_tx_mbox(mbox + i * 8, 0);
7599 txrcb = NIC_SRAM_SEND_RCB;
7600 rxrcb = NIC_SRAM_RCV_RET_RCB;
7602 /* Clear status block in ram. */
7603 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7605 /* Set status block DMA address */
7606 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7607 ((u64) tnapi->status_mapping >> 32));
7608 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7609 ((u64) tnapi->status_mapping & 0xffffffff));
7611 if (tnapi->tx_ring) {
7612 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7613 (TG3_TX_RING_SIZE <<
7614 BDINFO_FLAGS_MAXLEN_SHIFT),
7615 NIC_SRAM_TX_BUFFER_DESC);
7616 txrcb += TG3_BDINFO_SIZE;
7619 if (tnapi->rx_rcb) {
7620 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7621 (tp->rx_ret_ring_mask + 1) <<
7622 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7623 rxrcb += TG3_BDINFO_SIZE;
7626 stblk = HOSTCC_STATBLCK_RING1;
7628 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7629 u64 mapping = (u64)tnapi->status_mapping;
7630 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7631 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7633 /* Clear status block in ram. */
7634 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7636 if (tnapi->tx_ring) {
7637 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7638 (TG3_TX_RING_SIZE <<
7639 BDINFO_FLAGS_MAXLEN_SHIFT),
7640 NIC_SRAM_TX_BUFFER_DESC);
7641 txrcb += TG3_BDINFO_SIZE;
7644 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7645 ((tp->rx_ret_ring_mask + 1) <<
7646 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7649 rxrcb += TG3_BDINFO_SIZE;
7653 /* tp->lock is held. */
7654 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7656 u32 val, rdmac_mode;
7658 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7660 tg3_disable_ints(tp);
7664 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7666 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7667 tg3_abort_hw(tp, 1);
7672 err = tg3_chip_reset(tp);
7676 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7678 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7679 val = tr32(TG3_CPMU_CTRL);
7680 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7681 tw32(TG3_CPMU_CTRL, val);
7683 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7684 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7685 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7686 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7688 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7689 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7690 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7691 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7693 val = tr32(TG3_CPMU_HST_ACC);
7694 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7695 val |= CPMU_HST_ACC_MACCLK_6_25;
7696 tw32(TG3_CPMU_HST_ACC, val);
7699 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7700 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7701 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7702 PCIE_PWR_MGMT_L1_THRESH_4MS;
7703 tw32(PCIE_PWR_MGMT_THRESH, val);
7705 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7706 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7708 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7710 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7711 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7714 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7715 u32 grc_mode = tr32(GRC_MODE);
7717 /* Access the lower 1K of PL PCIE block registers. */
7718 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7719 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7721 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7722 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7723 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7725 tw32(GRC_MODE, grc_mode);
7728 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7729 u32 grc_mode = tr32(GRC_MODE);
7731 /* Access the lower 1K of PL PCIE block registers. */
7732 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7733 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7735 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
7736 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7737 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7739 tw32(GRC_MODE, grc_mode);
7741 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7742 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7743 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7744 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7747 /* This works around an issue with Athlon chipsets on
7748 * B3 tigon3 silicon. This bit has no effect on any
7749 * other revision. But do not set this on PCI Express
7750 * chips and don't even touch the clocks if the CPMU is present.
7752 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7753 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7754 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7755 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7758 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7759 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7760 val = tr32(TG3PCI_PCISTATE);
7761 val |= PCISTATE_RETRY_SAME_DMA;
7762 tw32(TG3PCI_PCISTATE, val);
7765 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7766 /* Allow reads and writes to the
7767 * APE register and memory space.
7769 val = tr32(TG3PCI_PCISTATE);
7770 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7771 PCISTATE_ALLOW_APE_SHMEM_WR |
7772 PCISTATE_ALLOW_APE_PSPACE_WR;
7773 tw32(TG3PCI_PCISTATE, val);
7776 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7777 /* Enable some hw fixes. */
7778 val = tr32(TG3PCI_MSI_DATA);
7779 val |= (1 << 26) | (1 << 28) | (1 << 29);
7780 tw32(TG3PCI_MSI_DATA, val);
7783 /* Descriptor ring init may make accesses to the
7784 * NIC SRAM area to setup the TX descriptors, so we
7785 * can only do this after the hardware has been
7786 * successfully reset.
7788 err = tg3_init_rings(tp);
7792 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7793 val = tr32(TG3PCI_DMA_RW_CTRL) &
7794 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7795 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
7796 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
7797 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7798 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7799 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7800 /* This value is determined during the probe time DMA
7801 * engine test, tg3_test_dma.
7803 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7806 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7807 GRC_MODE_4X_NIC_SEND_RINGS |
7808 GRC_MODE_NO_TX_PHDR_CSUM |
7809 GRC_MODE_NO_RX_PHDR_CSUM);
7810 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7812 /* Pseudo-header checksum is done by hardware logic and not
7813 * the offload processers, so make the chip do the pseudo-
7814 * header checksums on receive. For transmit it is more
7815 * convenient to do the pseudo-header checksum in software
7816 * as Linux does that on transmit for us in all cases.
7818 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7822 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7824 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7825 val = tr32(GRC_MISC_CFG);
7827 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7828 tw32(GRC_MISC_CFG, val);
7830 /* Initialize MBUF/DESC pool. */
7831 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7833 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7834 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7835 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7836 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7838 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7839 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7840 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7841 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7844 fw_len = tp->fw_len;
7845 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7846 tw32(BUFMGR_MB_POOL_ADDR,
7847 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7848 tw32(BUFMGR_MB_POOL_SIZE,
7849 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7852 if (tp->dev->mtu <= ETH_DATA_LEN) {
7853 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7854 tp->bufmgr_config.mbuf_read_dma_low_water);
7855 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7856 tp->bufmgr_config.mbuf_mac_rx_low_water);
7857 tw32(BUFMGR_MB_HIGH_WATER,
7858 tp->bufmgr_config.mbuf_high_water);
7860 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7861 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7862 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7863 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7864 tw32(BUFMGR_MB_HIGH_WATER,
7865 tp->bufmgr_config.mbuf_high_water_jumbo);
7867 tw32(BUFMGR_DMA_LOW_WATER,
7868 tp->bufmgr_config.dma_low_water);
7869 tw32(BUFMGR_DMA_HIGH_WATER,
7870 tp->bufmgr_config.dma_high_water);
7872 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
7873 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7874 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
7875 tw32(BUFMGR_MODE, val);
7876 for (i = 0; i < 2000; i++) {
7877 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7882 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
7886 /* Setup replenish threshold. */
7887 val = tp->rx_pending / 8;
7890 else if (val > tp->rx_std_max_post)
7891 val = tp->rx_std_max_post;
7892 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7893 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7894 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7896 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7897 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7900 tw32(RCVBDI_STD_THRESH, val);
7902 /* Initialize TG3_BDINFO's at:
7903 * RCVDBDI_STD_BD: standard eth size rx ring
7904 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7905 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7908 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7909 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7910 * ring attribute flags
7911 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7913 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7914 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7916 * The size of each ring is fixed in the firmware, but the location is
7919 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7920 ((u64) tpr->rx_std_mapping >> 32));
7921 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7922 ((u64) tpr->rx_std_mapping & 0xffffffff));
7923 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
7924 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
7925 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7926 NIC_SRAM_RX_BUFFER_DESC);
7928 /* Disable the mini ring */
7929 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7930 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7931 BDINFO_FLAGS_DISABLED);
7933 /* Program the jumbo buffer descriptor ring control
7934 * blocks on those devices that have them.
7936 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7937 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7938 /* Setup replenish threshold. */
7939 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7941 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7942 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7943 ((u64) tpr->rx_jmb_mapping >> 32));
7944 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7945 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7946 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7947 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7948 BDINFO_FLAGS_USE_EXT_RECV);
7949 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
7950 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7951 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7952 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7954 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7955 BDINFO_FLAGS_DISABLED);
7958 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7960 val = RX_STD_MAX_SIZE_5705;
7962 val = RX_STD_MAX_SIZE_5717;
7963 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
7964 val |= (TG3_RX_STD_DMA_SZ << 2);
7966 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
7968 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7970 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7972 tpr->rx_std_prod_idx = tp->rx_pending;
7973 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7975 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7976 tp->rx_jumbo_pending : 0;
7977 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7979 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7980 tw32(STD_REPLENISH_LWM, 32);
7981 tw32(JMB_REPLENISH_LWM, 16);
7984 tg3_rings_reset(tp);
7986 /* Initialize MAC address and backoff seed. */
7987 __tg3_set_mac_addr(tp, 0);
7989 /* MTU + ethernet header + FCS + optional VLAN tag */
7990 tw32(MAC_RX_MTU_SIZE,
7991 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7993 /* The slot time is changed by tg3_setup_phy if we
7994 * run at gigabit with half duplex.
7996 tw32(MAC_TX_LENGTHS,
7997 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7998 (6 << TX_LENGTHS_IPG_SHIFT) |
7999 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
8001 /* Receive rules. */
8002 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8003 tw32(RCVLPC_CONFIG, 0x0181);
8005 /* Calculate RDMAC_MODE setting early, we need it to determine
8006 * the RCVLPC_STATE_ENABLE mask.
8008 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8009 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8010 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8011 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8012 RDMAC_MODE_LNGREAD_ENAB);
8014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8016 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8020 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8021 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8022 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8023 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8025 /* If statement applies to 5705 and 5750 PCI devices only */
8026 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8027 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8028 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
8029 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
8030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8031 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8032 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8033 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8034 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8038 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
8039 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8041 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8042 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8044 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
8045 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8047 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8053 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8054 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8055 tw32(TG3_RDMA_RSRVCTRL_REG,
8056 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8059 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8060 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8061 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8062 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8063 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8066 /* Receive/send statistics. */
8067 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8068 val = tr32(RCVLPC_STATS_ENABLE);
8069 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8070 tw32(RCVLPC_STATS_ENABLE, val);
8071 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8072 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8073 val = tr32(RCVLPC_STATS_ENABLE);
8074 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8075 tw32(RCVLPC_STATS_ENABLE, val);
8077 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8079 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8080 tw32(SNDDATAI_STATSENAB, 0xffffff);
8081 tw32(SNDDATAI_STATSCTRL,
8082 (SNDDATAI_SCTRL_ENABLE |
8083 SNDDATAI_SCTRL_FASTUPD));
8085 /* Setup host coalescing engine. */
8086 tw32(HOSTCC_MODE, 0);
8087 for (i = 0; i < 2000; i++) {
8088 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8093 __tg3_set_coalesce(tp, &tp->coal);
8095 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8096 /* Status/statistics block address. See tg3_timer,
8097 * the tg3_periodic_fetch_stats call there, and
8098 * tg3_get_stats to see how this works for 5705/5750 chips.
8100 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8101 ((u64) tp->stats_mapping >> 32));
8102 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8103 ((u64) tp->stats_mapping & 0xffffffff));
8104 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8106 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8108 /* Clear statistics and status block memory areas */
8109 for (i = NIC_SRAM_STATS_BLK;
8110 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8112 tg3_write_mem(tp, i, 0);
8117 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8119 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8120 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8121 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8122 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8124 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8125 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8126 /* reset to prevent losing 1st rx packet intermittently */
8127 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8131 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8132 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8135 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8136 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8137 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8138 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8139 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8140 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8141 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8144 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8145 * If TG3_FLG2_IS_NIC is zero, we should read the
8146 * register to preserve the GPIO settings for LOMs. The GPIOs,
8147 * whether used as inputs or outputs, are set by boot code after
8150 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8153 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8154 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8155 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8158 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8159 GRC_LCLCTRL_GPIO_OUTPUT3;
8161 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8162 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8164 tp->grc_local_ctrl &= ~gpio_mask;
8165 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8167 /* GPIO1 must be driven high for eeprom write protect */
8168 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8169 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8170 GRC_LCLCTRL_GPIO_OUTPUT1);
8172 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8175 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
8176 val = tr32(MSGINT_MODE);
8177 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8178 tw32(MSGINT_MODE, val);
8181 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8182 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8186 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8187 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8188 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8189 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8190 WDMAC_MODE_LNGREAD_ENAB);
8192 /* If statement applies to 5705 and 5750 PCI devices only */
8193 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8194 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8195 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8196 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8197 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8198 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8200 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8201 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8202 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8203 val |= WDMAC_MODE_RX_ACCEL;
8207 /* Enable host coalescing bug fix */
8208 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8209 val |= WDMAC_MODE_STATUS_TAG_FIX;
8211 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8212 val |= WDMAC_MODE_BURST_ALL_DATA;
8214 tw32_f(WDMAC_MODE, val);
8217 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8220 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8222 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8223 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8224 pcix_cmd |= PCI_X_CMD_READ_2K;
8225 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8226 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8227 pcix_cmd |= PCI_X_CMD_READ_2K;
8229 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8233 tw32_f(RDMAC_MODE, rdmac_mode);
8236 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8237 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8238 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8242 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8244 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8246 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8247 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8248 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8249 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8250 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8251 val |= RCVDBDI_MODE_LRG_RING_SZ;
8252 tw32(RCVDBDI_MODE, val);
8253 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8254 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8255 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8256 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8257 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8258 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8259 tw32(SNDBDI_MODE, val);
8260 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8262 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8263 err = tg3_load_5701_a0_firmware_fix(tp);
8268 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8269 err = tg3_load_tso_firmware(tp);
8274 tp->tx_mode = TX_MODE_ENABLE;
8275 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8276 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8277 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8278 tw32_f(MAC_TX_MODE, tp->tx_mode);
8281 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8282 u32 reg = MAC_RSS_INDIR_TBL_0;
8283 u8 *ent = (u8 *)&val;
8285 /* Setup the indirection table */
8286 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8287 int idx = i % sizeof(val);
8289 ent[idx] = i % (tp->irq_cnt - 1);
8290 if (idx == sizeof(val) - 1) {
8296 /* Setup the "secret" hash key. */
8297 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8298 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8299 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8300 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8301 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8302 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8303 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8304 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8305 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8306 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8309 tp->rx_mode = RX_MODE_ENABLE;
8310 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8311 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8313 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8314 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8315 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8316 RX_MODE_RSS_IPV6_HASH_EN |
8317 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8318 RX_MODE_RSS_IPV4_HASH_EN |
8319 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8321 tw32_f(MAC_RX_MODE, tp->rx_mode);
8324 tw32(MAC_LED_CTRL, tp->led_ctrl);
8326 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8327 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8328 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8331 tw32_f(MAC_RX_MODE, tp->rx_mode);
8334 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8335 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8336 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8337 /* Set drive transmission level to 1.2V */
8338 /* only if the signal pre-emphasis bit is not set */
8339 val = tr32(MAC_SERDES_CFG);
8342 tw32(MAC_SERDES_CFG, val);
8344 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8345 tw32(MAC_SERDES_CFG, 0x616000);
8348 /* Prevent chip from dropping frames when flow control
8351 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8355 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8358 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8359 /* Use hardware link auto-negotiation */
8360 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8363 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8364 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8367 tmp = tr32(SERDES_RX_CTRL);
8368 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8369 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8370 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8371 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8374 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8375 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8376 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8377 tp->link_config.speed = tp->link_config.orig_speed;
8378 tp->link_config.duplex = tp->link_config.orig_duplex;
8379 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8382 err = tg3_setup_phy(tp, 0);
8386 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8387 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8390 /* Clear CRC stats. */
8391 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8392 tg3_writephy(tp, MII_TG3_TEST1,
8393 tmp | MII_TG3_TEST1_CRC_EN);
8394 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8399 __tg3_set_rx_mode(tp->dev);
8401 /* Initialize receive rules. */
8402 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8403 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8404 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8405 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8407 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8408 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8412 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8416 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8418 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8420 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8422 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8424 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8426 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8428 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8430 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8432 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8434 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8436 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8438 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8440 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8442 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8450 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8451 /* Write our heartbeat update interval to APE. */
8452 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8453 APE_HOST_HEARTBEAT_INT_DISABLE);
8455 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8460 /* Called at device open time to get the chip ready for
8461 * packet processing. Invoked with tp->lock held.
8463 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8465 tg3_switch_clocks(tp);
8467 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8469 return tg3_reset_hw(tp, reset_phy);
8472 #define TG3_STAT_ADD32(PSTAT, REG) \
8473 do { u32 __val = tr32(REG); \
8474 (PSTAT)->low += __val; \
8475 if ((PSTAT)->low < __val) \
8476 (PSTAT)->high += 1; \
8479 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8481 struct tg3_hw_stats *sp = tp->hw_stats;
8483 if (!netif_carrier_ok(tp->dev))
8486 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8487 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8488 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8489 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8490 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8491 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8492 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8493 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8494 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8495 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8496 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8497 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8498 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8500 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8501 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8502 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8503 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8504 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8505 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8506 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8507 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8508 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8509 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8510 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8511 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8512 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8513 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8515 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8516 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8517 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8520 static void tg3_timer(unsigned long __opaque)
8522 struct tg3 *tp = (struct tg3 *) __opaque;
8527 spin_lock(&tp->lock);
8529 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8530 /* All of this garbage is because when using non-tagged
8531 * IRQ status the mailbox/status_block protocol the chip
8532 * uses with the cpu is race prone.
8534 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8535 tw32(GRC_LOCAL_CTRL,
8536 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8538 tw32(HOSTCC_MODE, tp->coalesce_mode |
8539 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8542 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8543 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8544 spin_unlock(&tp->lock);
8545 schedule_work(&tp->reset_task);
8550 /* This part only runs once per second. */
8551 if (!--tp->timer_counter) {
8552 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8553 tg3_periodic_fetch_stats(tp);
8555 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8559 mac_stat = tr32(MAC_STATUS);
8562 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8563 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8565 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8569 tg3_setup_phy(tp, 0);
8570 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8571 u32 mac_stat = tr32(MAC_STATUS);
8574 if (netif_carrier_ok(tp->dev) &&
8575 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8578 if (!netif_carrier_ok(tp->dev) &&
8579 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8580 MAC_STATUS_SIGNAL_DET))) {
8584 if (!tp->serdes_counter) {
8587 ~MAC_MODE_PORT_MODE_MASK));
8589 tw32_f(MAC_MODE, tp->mac_mode);
8592 tg3_setup_phy(tp, 0);
8594 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8595 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8596 tg3_serdes_parallel_detect(tp);
8599 tp->timer_counter = tp->timer_multiplier;
8602 /* Heartbeat is only sent once every 2 seconds.
8604 * The heartbeat is to tell the ASF firmware that the host
8605 * driver is still alive. In the event that the OS crashes,
8606 * ASF needs to reset the hardware to free up the FIFO space
8607 * that may be filled with rx packets destined for the host.
8608 * If the FIFO is full, ASF will no longer function properly.
8610 * Unintended resets have been reported on real time kernels
8611 * where the timer doesn't run on time. Netpoll will also have
8614 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8615 * to check the ring condition when the heartbeat is expiring
8616 * before doing the reset. This will prevent most unintended
8619 if (!--tp->asf_counter) {
8620 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8621 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8622 tg3_wait_for_event_ack(tp);
8624 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8625 FWCMD_NICDRV_ALIVE3);
8626 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8627 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8628 TG3_FW_UPDATE_TIMEOUT_SEC);
8630 tg3_generate_fw_event(tp);
8632 tp->asf_counter = tp->asf_multiplier;
8635 spin_unlock(&tp->lock);
8638 tp->timer.expires = jiffies + tp->timer_offset;
8639 add_timer(&tp->timer);
8642 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8645 unsigned long flags;
8647 struct tg3_napi *tnapi = &tp->napi[irq_num];
8649 if (tp->irq_cnt == 1)
8650 name = tp->dev->name;
8652 name = &tnapi->irq_lbl[0];
8653 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8654 name[IFNAMSIZ-1] = 0;
8657 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8659 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8661 flags = IRQF_SAMPLE_RANDOM;
8664 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8665 fn = tg3_interrupt_tagged;
8666 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8669 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8672 static int tg3_test_interrupt(struct tg3 *tp)
8674 struct tg3_napi *tnapi = &tp->napi[0];
8675 struct net_device *dev = tp->dev;
8676 int err, i, intr_ok = 0;
8679 if (!netif_running(dev))
8682 tg3_disable_ints(tp);
8684 free_irq(tnapi->irq_vec, tnapi);
8687 * Turn off MSI one shot mode. Otherwise this test has no
8688 * observable way to know whether the interrupt was delivered.
8690 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8691 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8692 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8693 tw32(MSGINT_MODE, val);
8696 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8697 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8701 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8702 tg3_enable_ints(tp);
8704 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8707 for (i = 0; i < 5; i++) {
8708 u32 int_mbox, misc_host_ctrl;
8710 int_mbox = tr32_mailbox(tnapi->int_mbox);
8711 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8713 if ((int_mbox != 0) ||
8714 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8722 tg3_disable_ints(tp);
8724 free_irq(tnapi->irq_vec, tnapi);
8726 err = tg3_request_irq(tp, 0);
8732 /* Reenable MSI one shot mode. */
8733 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8734 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8735 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8736 tw32(MSGINT_MODE, val);
8744 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8745 * successfully restored
8747 static int tg3_test_msi(struct tg3 *tp)
8752 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8755 /* Turn off SERR reporting in case MSI terminates with Master
8758 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8759 pci_write_config_word(tp->pdev, PCI_COMMAND,
8760 pci_cmd & ~PCI_COMMAND_SERR);
8762 err = tg3_test_interrupt(tp);
8764 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8769 /* other failures */
8773 /* MSI test failed, go back to INTx mode */
8774 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
8775 "to INTx mode. Please report this failure to the PCI "
8776 "maintainer and include system chipset information\n");
8778 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8780 pci_disable_msi(tp->pdev);
8782 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8783 tp->napi[0].irq_vec = tp->pdev->irq;
8785 err = tg3_request_irq(tp, 0);
8789 /* Need to reset the chip because the MSI cycle may have terminated
8790 * with Master Abort.
8792 tg3_full_lock(tp, 1);
8794 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8795 err = tg3_init_hw(tp, 1);
8797 tg3_full_unlock(tp);
8800 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8805 static int tg3_request_firmware(struct tg3 *tp)
8807 const __be32 *fw_data;
8809 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8810 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
8815 fw_data = (void *)tp->fw->data;
8817 /* Firmware blob starts with version numbers, followed by
8818 * start address and _full_ length including BSS sections
8819 * (which must be longer than the actual data, of course
8822 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8823 if (tp->fw_len < (tp->fw->size - 12)) {
8824 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
8825 tp->fw_len, tp->fw_needed);
8826 release_firmware(tp->fw);
8831 /* We no longer need firmware; we have it. */
8832 tp->fw_needed = NULL;
8836 static bool tg3_enable_msix(struct tg3 *tp)
8838 int i, rc, cpus = num_online_cpus();
8839 struct msix_entry msix_ent[tp->irq_max];
8842 /* Just fallback to the simpler MSI mode. */
8846 * We want as many rx rings enabled as there are cpus.
8847 * The first MSIX vector only deals with link interrupts, etc,
8848 * so we add one to the number of vectors we are requesting.
8850 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8852 for (i = 0; i < tp->irq_max; i++) {
8853 msix_ent[i].entry = i;
8854 msix_ent[i].vector = 0;
8857 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8860 } else if (rc != 0) {
8861 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8863 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
8868 for (i = 0; i < tp->irq_max; i++)
8869 tp->napi[i].irq_vec = msix_ent[i].vector;
8871 netif_set_real_num_tx_queues(tp->dev, 1);
8872 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
8873 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
8874 pci_disable_msix(tp->pdev);
8877 if (tp->irq_cnt > 1)
8878 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8883 static void tg3_ints_init(struct tg3 *tp)
8885 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8886 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8887 /* All MSI supporting chips should support tagged
8888 * status. Assert that this is the case.
8890 netdev_warn(tp->dev,
8891 "MSI without TAGGED_STATUS? Not using MSI\n");
8895 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8896 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8897 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8898 pci_enable_msi(tp->pdev) == 0)
8899 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8901 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8902 u32 msi_mode = tr32(MSGINT_MODE);
8903 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8904 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8905 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8908 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8910 tp->napi[0].irq_vec = tp->pdev->irq;
8911 netif_set_real_num_tx_queues(tp->dev, 1);
8912 netif_set_real_num_rx_queues(tp->dev, 1);
8916 static void tg3_ints_fini(struct tg3 *tp)
8918 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8919 pci_disable_msix(tp->pdev);
8920 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8921 pci_disable_msi(tp->pdev);
8922 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8923 tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
8926 static int tg3_open(struct net_device *dev)
8928 struct tg3 *tp = netdev_priv(dev);
8931 if (tp->fw_needed) {
8932 err = tg3_request_firmware(tp);
8933 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8937 netdev_warn(tp->dev, "TSO capability disabled\n");
8938 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8939 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8940 netdev_notice(tp->dev, "TSO capability restored\n");
8941 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8945 netif_carrier_off(tp->dev);
8947 err = tg3_set_power_state(tp, PCI_D0);
8951 tg3_full_lock(tp, 0);
8953 tg3_disable_ints(tp);
8954 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8956 tg3_full_unlock(tp);
8959 * Setup interrupts first so we know how
8960 * many NAPI resources to allocate
8964 /* The placement of this call is tied
8965 * to the setup and use of Host TX descriptors.
8967 err = tg3_alloc_consistent(tp);
8973 tg3_napi_enable(tp);
8975 for (i = 0; i < tp->irq_cnt; i++) {
8976 struct tg3_napi *tnapi = &tp->napi[i];
8977 err = tg3_request_irq(tp, i);
8979 for (i--; i >= 0; i--)
8980 free_irq(tnapi->irq_vec, tnapi);
8988 tg3_full_lock(tp, 0);
8990 err = tg3_init_hw(tp, 1);
8992 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8995 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8996 tp->timer_offset = HZ;
8998 tp->timer_offset = HZ / 10;
9000 BUG_ON(tp->timer_offset > HZ);
9001 tp->timer_counter = tp->timer_multiplier =
9002 (HZ / tp->timer_offset);
9003 tp->asf_counter = tp->asf_multiplier =
9004 ((HZ / tp->timer_offset) * 2);
9006 init_timer(&tp->timer);
9007 tp->timer.expires = jiffies + tp->timer_offset;
9008 tp->timer.data = (unsigned long) tp;
9009 tp->timer.function = tg3_timer;
9012 tg3_full_unlock(tp);
9017 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
9018 err = tg3_test_msi(tp);
9021 tg3_full_lock(tp, 0);
9022 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9024 tg3_full_unlock(tp);
9029 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
9030 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9031 u32 val = tr32(PCIE_TRANSACTION_CFG);
9033 tw32(PCIE_TRANSACTION_CFG,
9034 val | PCIE_TRANS_CFG_1SHOT_MSI);
9040 tg3_full_lock(tp, 0);
9042 add_timer(&tp->timer);
9043 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9044 tg3_enable_ints(tp);
9046 tg3_full_unlock(tp);
9048 netif_tx_start_all_queues(dev);
9053 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9054 struct tg3_napi *tnapi = &tp->napi[i];
9055 free_irq(tnapi->irq_vec, tnapi);
9059 tg3_napi_disable(tp);
9061 tg3_free_consistent(tp);
9068 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9069 struct rtnl_link_stats64 *);
9070 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9072 static int tg3_close(struct net_device *dev)
9075 struct tg3 *tp = netdev_priv(dev);
9077 tg3_napi_disable(tp);
9078 cancel_work_sync(&tp->reset_task);
9080 netif_tx_stop_all_queues(dev);
9082 del_timer_sync(&tp->timer);
9086 tg3_full_lock(tp, 1);
9088 tg3_disable_ints(tp);
9090 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9092 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9094 tg3_full_unlock(tp);
9096 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9097 struct tg3_napi *tnapi = &tp->napi[i];
9098 free_irq(tnapi->irq_vec, tnapi);
9103 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9105 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9106 sizeof(tp->estats_prev));
9110 tg3_free_consistent(tp);
9112 tg3_set_power_state(tp, PCI_D3hot);
9114 netif_carrier_off(tp->dev);
9119 static inline u64 get_stat64(tg3_stat64_t *val)
9121 return ((u64)val->high << 32) | ((u64)val->low);
9124 static u64 calc_crc_errors(struct tg3 *tp)
9126 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9128 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9129 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9133 spin_lock_bh(&tp->lock);
9134 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9135 tg3_writephy(tp, MII_TG3_TEST1,
9136 val | MII_TG3_TEST1_CRC_EN);
9137 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9140 spin_unlock_bh(&tp->lock);
9142 tp->phy_crc_errors += val;
9144 return tp->phy_crc_errors;
9147 return get_stat64(&hw_stats->rx_fcs_errors);
9150 #define ESTAT_ADD(member) \
9151 estats->member = old_estats->member + \
9152 get_stat64(&hw_stats->member)
9154 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9156 struct tg3_ethtool_stats *estats = &tp->estats;
9157 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9158 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9163 ESTAT_ADD(rx_octets);
9164 ESTAT_ADD(rx_fragments);
9165 ESTAT_ADD(rx_ucast_packets);
9166 ESTAT_ADD(rx_mcast_packets);
9167 ESTAT_ADD(rx_bcast_packets);
9168 ESTAT_ADD(rx_fcs_errors);
9169 ESTAT_ADD(rx_align_errors);
9170 ESTAT_ADD(rx_xon_pause_rcvd);
9171 ESTAT_ADD(rx_xoff_pause_rcvd);
9172 ESTAT_ADD(rx_mac_ctrl_rcvd);
9173 ESTAT_ADD(rx_xoff_entered);
9174 ESTAT_ADD(rx_frame_too_long_errors);
9175 ESTAT_ADD(rx_jabbers);
9176 ESTAT_ADD(rx_undersize_packets);
9177 ESTAT_ADD(rx_in_length_errors);
9178 ESTAT_ADD(rx_out_length_errors);
9179 ESTAT_ADD(rx_64_or_less_octet_packets);
9180 ESTAT_ADD(rx_65_to_127_octet_packets);
9181 ESTAT_ADD(rx_128_to_255_octet_packets);
9182 ESTAT_ADD(rx_256_to_511_octet_packets);
9183 ESTAT_ADD(rx_512_to_1023_octet_packets);
9184 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9185 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9186 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9187 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9188 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9190 ESTAT_ADD(tx_octets);
9191 ESTAT_ADD(tx_collisions);
9192 ESTAT_ADD(tx_xon_sent);
9193 ESTAT_ADD(tx_xoff_sent);
9194 ESTAT_ADD(tx_flow_control);
9195 ESTAT_ADD(tx_mac_errors);
9196 ESTAT_ADD(tx_single_collisions);
9197 ESTAT_ADD(tx_mult_collisions);
9198 ESTAT_ADD(tx_deferred);
9199 ESTAT_ADD(tx_excessive_collisions);
9200 ESTAT_ADD(tx_late_collisions);
9201 ESTAT_ADD(tx_collide_2times);
9202 ESTAT_ADD(tx_collide_3times);
9203 ESTAT_ADD(tx_collide_4times);
9204 ESTAT_ADD(tx_collide_5times);
9205 ESTAT_ADD(tx_collide_6times);
9206 ESTAT_ADD(tx_collide_7times);
9207 ESTAT_ADD(tx_collide_8times);
9208 ESTAT_ADD(tx_collide_9times);
9209 ESTAT_ADD(tx_collide_10times);
9210 ESTAT_ADD(tx_collide_11times);
9211 ESTAT_ADD(tx_collide_12times);
9212 ESTAT_ADD(tx_collide_13times);
9213 ESTAT_ADD(tx_collide_14times);
9214 ESTAT_ADD(tx_collide_15times);
9215 ESTAT_ADD(tx_ucast_packets);
9216 ESTAT_ADD(tx_mcast_packets);
9217 ESTAT_ADD(tx_bcast_packets);
9218 ESTAT_ADD(tx_carrier_sense_errors);
9219 ESTAT_ADD(tx_discards);
9220 ESTAT_ADD(tx_errors);
9222 ESTAT_ADD(dma_writeq_full);
9223 ESTAT_ADD(dma_write_prioq_full);
9224 ESTAT_ADD(rxbds_empty);
9225 ESTAT_ADD(rx_discards);
9226 ESTAT_ADD(rx_errors);
9227 ESTAT_ADD(rx_threshold_hit);
9229 ESTAT_ADD(dma_readq_full);
9230 ESTAT_ADD(dma_read_prioq_full);
9231 ESTAT_ADD(tx_comp_queue_full);
9233 ESTAT_ADD(ring_set_send_prod_index);
9234 ESTAT_ADD(ring_status_update);
9235 ESTAT_ADD(nic_irqs);
9236 ESTAT_ADD(nic_avoided_irqs);
9237 ESTAT_ADD(nic_tx_threshold_hit);
9242 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9243 struct rtnl_link_stats64 *stats)
9245 struct tg3 *tp = netdev_priv(dev);
9246 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9247 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9252 stats->rx_packets = old_stats->rx_packets +
9253 get_stat64(&hw_stats->rx_ucast_packets) +
9254 get_stat64(&hw_stats->rx_mcast_packets) +
9255 get_stat64(&hw_stats->rx_bcast_packets);
9257 stats->tx_packets = old_stats->tx_packets +
9258 get_stat64(&hw_stats->tx_ucast_packets) +
9259 get_stat64(&hw_stats->tx_mcast_packets) +
9260 get_stat64(&hw_stats->tx_bcast_packets);
9262 stats->rx_bytes = old_stats->rx_bytes +
9263 get_stat64(&hw_stats->rx_octets);
9264 stats->tx_bytes = old_stats->tx_bytes +
9265 get_stat64(&hw_stats->tx_octets);
9267 stats->rx_errors = old_stats->rx_errors +
9268 get_stat64(&hw_stats->rx_errors);
9269 stats->tx_errors = old_stats->tx_errors +
9270 get_stat64(&hw_stats->tx_errors) +
9271 get_stat64(&hw_stats->tx_mac_errors) +
9272 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9273 get_stat64(&hw_stats->tx_discards);
9275 stats->multicast = old_stats->multicast +
9276 get_stat64(&hw_stats->rx_mcast_packets);
9277 stats->collisions = old_stats->collisions +
9278 get_stat64(&hw_stats->tx_collisions);
9280 stats->rx_length_errors = old_stats->rx_length_errors +
9281 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9282 get_stat64(&hw_stats->rx_undersize_packets);
9284 stats->rx_over_errors = old_stats->rx_over_errors +
9285 get_stat64(&hw_stats->rxbds_empty);
9286 stats->rx_frame_errors = old_stats->rx_frame_errors +
9287 get_stat64(&hw_stats->rx_align_errors);
9288 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9289 get_stat64(&hw_stats->tx_discards);
9290 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9291 get_stat64(&hw_stats->tx_carrier_sense_errors);
9293 stats->rx_crc_errors = old_stats->rx_crc_errors +
9294 calc_crc_errors(tp);
9296 stats->rx_missed_errors = old_stats->rx_missed_errors +
9297 get_stat64(&hw_stats->rx_discards);
9302 static inline u32 calc_crc(unsigned char *buf, int len)
9310 for (j = 0; j < len; j++) {
9313 for (k = 0; k < 8; k++) {
9326 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9328 /* accept or reject all multicast frames */
9329 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9330 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9331 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9332 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9335 static void __tg3_set_rx_mode(struct net_device *dev)
9337 struct tg3 *tp = netdev_priv(dev);
9340 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9341 RX_MODE_KEEP_VLAN_TAG);
9343 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9346 #if TG3_VLAN_TAG_USED
9348 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9349 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9351 /* By definition, VLAN is disabled always in this
9354 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9355 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9358 if (dev->flags & IFF_PROMISC) {
9359 /* Promiscuous mode. */
9360 rx_mode |= RX_MODE_PROMISC;
9361 } else if (dev->flags & IFF_ALLMULTI) {
9362 /* Accept all multicast. */
9363 tg3_set_multi(tp, 1);
9364 } else if (netdev_mc_empty(dev)) {
9365 /* Reject all multicast. */
9366 tg3_set_multi(tp, 0);
9368 /* Accept one or more multicast(s). */
9369 struct netdev_hw_addr *ha;
9370 u32 mc_filter[4] = { 0, };
9375 netdev_for_each_mc_addr(ha, dev) {
9376 crc = calc_crc(ha->addr, ETH_ALEN);
9378 regidx = (bit & 0x60) >> 5;
9380 mc_filter[regidx] |= (1 << bit);
9383 tw32(MAC_HASH_REG_0, mc_filter[0]);
9384 tw32(MAC_HASH_REG_1, mc_filter[1]);
9385 tw32(MAC_HASH_REG_2, mc_filter[2]);
9386 tw32(MAC_HASH_REG_3, mc_filter[3]);
9389 if (rx_mode != tp->rx_mode) {
9390 tp->rx_mode = rx_mode;
9391 tw32_f(MAC_RX_MODE, rx_mode);
9396 static void tg3_set_rx_mode(struct net_device *dev)
9398 struct tg3 *tp = netdev_priv(dev);
9400 if (!netif_running(dev))
9403 tg3_full_lock(tp, 0);
9404 __tg3_set_rx_mode(dev);
9405 tg3_full_unlock(tp);
9408 #define TG3_REGDUMP_LEN (32 * 1024)
9410 static int tg3_get_regs_len(struct net_device *dev)
9412 return TG3_REGDUMP_LEN;
9415 static void tg3_get_regs(struct net_device *dev,
9416 struct ethtool_regs *regs, void *_p)
9419 struct tg3 *tp = netdev_priv(dev);
9425 memset(p, 0, TG3_REGDUMP_LEN);
9427 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9430 tg3_full_lock(tp, 0);
9432 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9433 #define GET_REG32_LOOP(base, len) \
9434 do { p = (u32 *)(orig_p + (base)); \
9435 for (i = 0; i < len; i += 4) \
9436 __GET_REG32((base) + i); \
9438 #define GET_REG32_1(reg) \
9439 do { p = (u32 *)(orig_p + (reg)); \
9440 __GET_REG32((reg)); \
9443 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9444 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9445 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9446 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9447 GET_REG32_1(SNDDATAC_MODE);
9448 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9449 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9450 GET_REG32_1(SNDBDC_MODE);
9451 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9452 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9453 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9454 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9455 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9456 GET_REG32_1(RCVDCC_MODE);
9457 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9458 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9459 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9460 GET_REG32_1(MBFREE_MODE);
9461 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9462 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9463 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9464 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9465 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9466 GET_REG32_1(RX_CPU_MODE);
9467 GET_REG32_1(RX_CPU_STATE);
9468 GET_REG32_1(RX_CPU_PGMCTR);
9469 GET_REG32_1(RX_CPU_HWBKPT);
9470 GET_REG32_1(TX_CPU_MODE);
9471 GET_REG32_1(TX_CPU_STATE);
9472 GET_REG32_1(TX_CPU_PGMCTR);
9473 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9474 GET_REG32_LOOP(FTQ_RESET, 0x120);
9475 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9476 GET_REG32_1(DMAC_MODE);
9477 GET_REG32_LOOP(GRC_MODE, 0x4c);
9478 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9479 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9482 #undef GET_REG32_LOOP
9485 tg3_full_unlock(tp);
9488 static int tg3_get_eeprom_len(struct net_device *dev)
9490 struct tg3 *tp = netdev_priv(dev);
9492 return tp->nvram_size;
9495 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9497 struct tg3 *tp = netdev_priv(dev);
9500 u32 i, offset, len, b_offset, b_count;
9503 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9506 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9509 offset = eeprom->offset;
9513 eeprom->magic = TG3_EEPROM_MAGIC;
9516 /* adjustments to start on required 4 byte boundary */
9517 b_offset = offset & 3;
9518 b_count = 4 - b_offset;
9519 if (b_count > len) {
9520 /* i.e. offset=1 len=2 */
9523 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9526 memcpy(data, ((char *)&val) + b_offset, b_count);
9529 eeprom->len += b_count;
9532 /* read bytes upto the last 4 byte boundary */
9533 pd = &data[eeprom->len];
9534 for (i = 0; i < (len - (len & 3)); i += 4) {
9535 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9540 memcpy(pd + i, &val, 4);
9545 /* read last bytes not ending on 4 byte boundary */
9546 pd = &data[eeprom->len];
9548 b_offset = offset + len - b_count;
9549 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9552 memcpy(pd, &val, b_count);
9553 eeprom->len += b_count;
9558 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9560 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9562 struct tg3 *tp = netdev_priv(dev);
9564 u32 offset, len, b_offset, odd_len;
9568 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9571 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9572 eeprom->magic != TG3_EEPROM_MAGIC)
9575 offset = eeprom->offset;
9578 if ((b_offset = (offset & 3))) {
9579 /* adjustments to start on required 4 byte boundary */
9580 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9591 /* adjustments to end on required 4 byte boundary */
9593 len = (len + 3) & ~3;
9594 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9600 if (b_offset || odd_len) {
9601 buf = kmalloc(len, GFP_KERNEL);
9605 memcpy(buf, &start, 4);
9607 memcpy(buf+len-4, &end, 4);
9608 memcpy(buf + b_offset, data, eeprom->len);
9611 ret = tg3_nvram_write_block(tp, offset, len, buf);
9619 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9621 struct tg3 *tp = netdev_priv(dev);
9623 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9624 struct phy_device *phydev;
9625 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9627 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9628 return phy_ethtool_gset(phydev, cmd);
9631 cmd->supported = (SUPPORTED_Autoneg);
9633 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9634 cmd->supported |= (SUPPORTED_1000baseT_Half |
9635 SUPPORTED_1000baseT_Full);
9637 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9638 cmd->supported |= (SUPPORTED_100baseT_Half |
9639 SUPPORTED_100baseT_Full |
9640 SUPPORTED_10baseT_Half |
9641 SUPPORTED_10baseT_Full |
9643 cmd->port = PORT_TP;
9645 cmd->supported |= SUPPORTED_FIBRE;
9646 cmd->port = PORT_FIBRE;
9649 cmd->advertising = tp->link_config.advertising;
9650 if (netif_running(dev)) {
9651 cmd->speed = tp->link_config.active_speed;
9652 cmd->duplex = tp->link_config.active_duplex;
9654 cmd->phy_address = tp->phy_addr;
9655 cmd->transceiver = XCVR_INTERNAL;
9656 cmd->autoneg = tp->link_config.autoneg;
9662 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9664 struct tg3 *tp = netdev_priv(dev);
9666 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9667 struct phy_device *phydev;
9668 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9670 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9671 return phy_ethtool_sset(phydev, cmd);
9674 if (cmd->autoneg != AUTONEG_ENABLE &&
9675 cmd->autoneg != AUTONEG_DISABLE)
9678 if (cmd->autoneg == AUTONEG_DISABLE &&
9679 cmd->duplex != DUPLEX_FULL &&
9680 cmd->duplex != DUPLEX_HALF)
9683 if (cmd->autoneg == AUTONEG_ENABLE) {
9684 u32 mask = ADVERTISED_Autoneg |
9686 ADVERTISED_Asym_Pause;
9688 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9689 mask |= ADVERTISED_1000baseT_Half |
9690 ADVERTISED_1000baseT_Full;
9692 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9693 mask |= ADVERTISED_100baseT_Half |
9694 ADVERTISED_100baseT_Full |
9695 ADVERTISED_10baseT_Half |
9696 ADVERTISED_10baseT_Full |
9699 mask |= ADVERTISED_FIBRE;
9701 if (cmd->advertising & ~mask)
9704 mask &= (ADVERTISED_1000baseT_Half |
9705 ADVERTISED_1000baseT_Full |
9706 ADVERTISED_100baseT_Half |
9707 ADVERTISED_100baseT_Full |
9708 ADVERTISED_10baseT_Half |
9709 ADVERTISED_10baseT_Full);
9711 cmd->advertising &= mask;
9713 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9714 if (cmd->speed != SPEED_1000)
9717 if (cmd->duplex != DUPLEX_FULL)
9720 if (cmd->speed != SPEED_100 &&
9721 cmd->speed != SPEED_10)
9726 tg3_full_lock(tp, 0);
9728 tp->link_config.autoneg = cmd->autoneg;
9729 if (cmd->autoneg == AUTONEG_ENABLE) {
9730 tp->link_config.advertising = (cmd->advertising |
9731 ADVERTISED_Autoneg);
9732 tp->link_config.speed = SPEED_INVALID;
9733 tp->link_config.duplex = DUPLEX_INVALID;
9735 tp->link_config.advertising = 0;
9736 tp->link_config.speed = cmd->speed;
9737 tp->link_config.duplex = cmd->duplex;
9740 tp->link_config.orig_speed = tp->link_config.speed;
9741 tp->link_config.orig_duplex = tp->link_config.duplex;
9742 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9744 if (netif_running(dev))
9745 tg3_setup_phy(tp, 1);
9747 tg3_full_unlock(tp);
9752 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9754 struct tg3 *tp = netdev_priv(dev);
9756 strcpy(info->driver, DRV_MODULE_NAME);
9757 strcpy(info->version, DRV_MODULE_VERSION);
9758 strcpy(info->fw_version, tp->fw_ver);
9759 strcpy(info->bus_info, pci_name(tp->pdev));
9762 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9764 struct tg3 *tp = netdev_priv(dev);
9766 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9767 device_can_wakeup(&tp->pdev->dev))
9768 wol->supported = WAKE_MAGIC;
9772 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9773 device_can_wakeup(&tp->pdev->dev))
9774 wol->wolopts = WAKE_MAGIC;
9775 memset(&wol->sopass, 0, sizeof(wol->sopass));
9778 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9780 struct tg3 *tp = netdev_priv(dev);
9781 struct device *dp = &tp->pdev->dev;
9783 if (wol->wolopts & ~WAKE_MAGIC)
9785 if ((wol->wolopts & WAKE_MAGIC) &&
9786 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9789 spin_lock_bh(&tp->lock);
9790 if (wol->wolopts & WAKE_MAGIC) {
9791 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9792 device_set_wakeup_enable(dp, true);
9794 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9795 device_set_wakeup_enable(dp, false);
9797 spin_unlock_bh(&tp->lock);
9802 static u32 tg3_get_msglevel(struct net_device *dev)
9804 struct tg3 *tp = netdev_priv(dev);
9805 return tp->msg_enable;
9808 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9810 struct tg3 *tp = netdev_priv(dev);
9811 tp->msg_enable = value;
9814 static int tg3_set_tso(struct net_device *dev, u32 value)
9816 struct tg3 *tp = netdev_priv(dev);
9818 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9823 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9824 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9825 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9827 dev->features |= NETIF_F_TSO6;
9828 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9829 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9830 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9831 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9832 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9833 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9834 dev->features |= NETIF_F_TSO_ECN;
9836 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9838 return ethtool_op_set_tso(dev, value);
9841 static int tg3_nway_reset(struct net_device *dev)
9843 struct tg3 *tp = netdev_priv(dev);
9846 if (!netif_running(dev))
9849 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
9852 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9853 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9855 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9859 spin_lock_bh(&tp->lock);
9861 tg3_readphy(tp, MII_BMCR, &bmcr);
9862 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9863 ((bmcr & BMCR_ANENABLE) ||
9864 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
9865 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9869 spin_unlock_bh(&tp->lock);
9875 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9877 struct tg3 *tp = netdev_priv(dev);
9879 ering->rx_max_pending = tp->rx_std_ring_mask;
9880 ering->rx_mini_max_pending = 0;
9881 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9882 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
9884 ering->rx_jumbo_max_pending = 0;
9886 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9888 ering->rx_pending = tp->rx_pending;
9889 ering->rx_mini_pending = 0;
9890 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9891 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9893 ering->rx_jumbo_pending = 0;
9895 ering->tx_pending = tp->napi[0].tx_pending;
9898 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9900 struct tg3 *tp = netdev_priv(dev);
9901 int i, irq_sync = 0, err = 0;
9903 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
9904 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
9905 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9906 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9907 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9908 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9911 if (netif_running(dev)) {
9917 tg3_full_lock(tp, irq_sync);
9919 tp->rx_pending = ering->rx_pending;
9921 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9922 tp->rx_pending > 63)
9923 tp->rx_pending = 63;
9924 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9926 for (i = 0; i < tp->irq_max; i++)
9927 tp->napi[i].tx_pending = ering->tx_pending;
9929 if (netif_running(dev)) {
9930 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9931 err = tg3_restart_hw(tp, 1);
9933 tg3_netif_start(tp);
9936 tg3_full_unlock(tp);
9938 if (irq_sync && !err)
9944 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9946 struct tg3 *tp = netdev_priv(dev);
9948 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9950 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9951 epause->rx_pause = 1;
9953 epause->rx_pause = 0;
9955 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9956 epause->tx_pause = 1;
9958 epause->tx_pause = 0;
9961 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9963 struct tg3 *tp = netdev_priv(dev);
9966 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9968 struct phy_device *phydev;
9970 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9972 if (!(phydev->supported & SUPPORTED_Pause) ||
9973 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
9974 (epause->rx_pause != epause->tx_pause)))
9977 tp->link_config.flowctrl = 0;
9978 if (epause->rx_pause) {
9979 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9981 if (epause->tx_pause) {
9982 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9983 newadv = ADVERTISED_Pause;
9985 newadv = ADVERTISED_Pause |
9986 ADVERTISED_Asym_Pause;
9987 } else if (epause->tx_pause) {
9988 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9989 newadv = ADVERTISED_Asym_Pause;
9993 if (epause->autoneg)
9994 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9996 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9998 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
9999 u32 oldadv = phydev->advertising &
10000 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10001 if (oldadv != newadv) {
10002 phydev->advertising &=
10003 ~(ADVERTISED_Pause |
10004 ADVERTISED_Asym_Pause);
10005 phydev->advertising |= newadv;
10006 if (phydev->autoneg) {
10008 * Always renegotiate the link to
10009 * inform our link partner of our
10010 * flow control settings, even if the
10011 * flow control is forced. Let
10012 * tg3_adjust_link() do the final
10013 * flow control setup.
10015 return phy_start_aneg(phydev);
10019 if (!epause->autoneg)
10020 tg3_setup_flow_control(tp, 0, 0);
10022 tp->link_config.orig_advertising &=
10023 ~(ADVERTISED_Pause |
10024 ADVERTISED_Asym_Pause);
10025 tp->link_config.orig_advertising |= newadv;
10030 if (netif_running(dev)) {
10031 tg3_netif_stop(tp);
10035 tg3_full_lock(tp, irq_sync);
10037 if (epause->autoneg)
10038 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10040 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10041 if (epause->rx_pause)
10042 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10044 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10045 if (epause->tx_pause)
10046 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10048 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10050 if (netif_running(dev)) {
10051 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10052 err = tg3_restart_hw(tp, 1);
10054 tg3_netif_start(tp);
10057 tg3_full_unlock(tp);
10063 static u32 tg3_get_rx_csum(struct net_device *dev)
10065 struct tg3 *tp = netdev_priv(dev);
10066 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
10069 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10071 struct tg3 *tp = netdev_priv(dev);
10073 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10079 spin_lock_bh(&tp->lock);
10081 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10083 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10084 spin_unlock_bh(&tp->lock);
10089 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10091 struct tg3 *tp = netdev_priv(dev);
10093 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10099 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10100 ethtool_op_set_tx_ipv6_csum(dev, data);
10102 ethtool_op_set_tx_csum(dev, data);
10107 static int tg3_get_sset_count(struct net_device *dev, int sset)
10111 return TG3_NUM_TEST;
10113 return TG3_NUM_STATS;
10115 return -EOPNOTSUPP;
10119 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10121 switch (stringset) {
10123 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10126 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10129 WARN_ON(1); /* we need a WARN() */
10134 static int tg3_phys_id(struct net_device *dev, u32 data)
10136 struct tg3 *tp = netdev_priv(dev);
10139 if (!netif_running(tp->dev))
10143 data = UINT_MAX / 2;
10145 for (i = 0; i < (data * 2); i++) {
10147 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10148 LED_CTRL_1000MBPS_ON |
10149 LED_CTRL_100MBPS_ON |
10150 LED_CTRL_10MBPS_ON |
10151 LED_CTRL_TRAFFIC_OVERRIDE |
10152 LED_CTRL_TRAFFIC_BLINK |
10153 LED_CTRL_TRAFFIC_LED);
10156 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10157 LED_CTRL_TRAFFIC_OVERRIDE);
10159 if (msleep_interruptible(500))
10162 tw32(MAC_LED_CTRL, tp->led_ctrl);
10166 static void tg3_get_ethtool_stats(struct net_device *dev,
10167 struct ethtool_stats *estats, u64 *tmp_stats)
10169 struct tg3 *tp = netdev_priv(dev);
10170 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10173 #define NVRAM_TEST_SIZE 0x100
10174 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10175 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10176 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10177 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10178 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10180 static int tg3_test_nvram(struct tg3 *tp)
10184 int i, j, k, err = 0, size;
10186 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10189 if (tg3_nvram_read(tp, 0, &magic) != 0)
10192 if (magic == TG3_EEPROM_MAGIC)
10193 size = NVRAM_TEST_SIZE;
10194 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10195 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10196 TG3_EEPROM_SB_FORMAT_1) {
10197 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10198 case TG3_EEPROM_SB_REVISION_0:
10199 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10201 case TG3_EEPROM_SB_REVISION_2:
10202 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10204 case TG3_EEPROM_SB_REVISION_3:
10205 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10212 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10213 size = NVRAM_SELFBOOT_HW_SIZE;
10217 buf = kmalloc(size, GFP_KERNEL);
10222 for (i = 0, j = 0; i < size; i += 4, j++) {
10223 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10230 /* Selfboot format */
10231 magic = be32_to_cpu(buf[0]);
10232 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10233 TG3_EEPROM_MAGIC_FW) {
10234 u8 *buf8 = (u8 *) buf, csum8 = 0;
10236 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10237 TG3_EEPROM_SB_REVISION_2) {
10238 /* For rev 2, the csum doesn't include the MBA. */
10239 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10241 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10244 for (i = 0; i < size; i++)
10257 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10258 TG3_EEPROM_MAGIC_HW) {
10259 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10260 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10261 u8 *buf8 = (u8 *) buf;
10263 /* Separate the parity bits and the data bytes. */
10264 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10265 if ((i == 0) || (i == 8)) {
10269 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10270 parity[k++] = buf8[i] & msk;
10272 } else if (i == 16) {
10276 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10277 parity[k++] = buf8[i] & msk;
10280 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10281 parity[k++] = buf8[i] & msk;
10284 data[j++] = buf8[i];
10288 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10289 u8 hw8 = hweight8(data[i]);
10291 if ((hw8 & 0x1) && parity[i])
10293 else if (!(hw8 & 0x1) && !parity[i])
10300 /* Bootstrap checksum at offset 0x10 */
10301 csum = calc_crc((unsigned char *) buf, 0x10);
10302 if (csum != be32_to_cpu(buf[0x10/4]))
10305 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10306 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10307 if (csum != be32_to_cpu(buf[0xfc/4]))
10317 #define TG3_SERDES_TIMEOUT_SEC 2
10318 #define TG3_COPPER_TIMEOUT_SEC 6
10320 static int tg3_test_link(struct tg3 *tp)
10324 if (!netif_running(tp->dev))
10327 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10328 max = TG3_SERDES_TIMEOUT_SEC;
10330 max = TG3_COPPER_TIMEOUT_SEC;
10332 for (i = 0; i < max; i++) {
10333 if (netif_carrier_ok(tp->dev))
10336 if (msleep_interruptible(1000))
10343 /* Only test the commonly used registers */
10344 static int tg3_test_registers(struct tg3 *tp)
10346 int i, is_5705, is_5750;
10347 u32 offset, read_mask, write_mask, val, save_val, read_val;
10351 #define TG3_FL_5705 0x1
10352 #define TG3_FL_NOT_5705 0x2
10353 #define TG3_FL_NOT_5788 0x4
10354 #define TG3_FL_NOT_5750 0x8
10358 /* MAC Control Registers */
10359 { MAC_MODE, TG3_FL_NOT_5705,
10360 0x00000000, 0x00ef6f8c },
10361 { MAC_MODE, TG3_FL_5705,
10362 0x00000000, 0x01ef6b8c },
10363 { MAC_STATUS, TG3_FL_NOT_5705,
10364 0x03800107, 0x00000000 },
10365 { MAC_STATUS, TG3_FL_5705,
10366 0x03800100, 0x00000000 },
10367 { MAC_ADDR_0_HIGH, 0x0000,
10368 0x00000000, 0x0000ffff },
10369 { MAC_ADDR_0_LOW, 0x0000,
10370 0x00000000, 0xffffffff },
10371 { MAC_RX_MTU_SIZE, 0x0000,
10372 0x00000000, 0x0000ffff },
10373 { MAC_TX_MODE, 0x0000,
10374 0x00000000, 0x00000070 },
10375 { MAC_TX_LENGTHS, 0x0000,
10376 0x00000000, 0x00003fff },
10377 { MAC_RX_MODE, TG3_FL_NOT_5705,
10378 0x00000000, 0x000007fc },
10379 { MAC_RX_MODE, TG3_FL_5705,
10380 0x00000000, 0x000007dc },
10381 { MAC_HASH_REG_0, 0x0000,
10382 0x00000000, 0xffffffff },
10383 { MAC_HASH_REG_1, 0x0000,
10384 0x00000000, 0xffffffff },
10385 { MAC_HASH_REG_2, 0x0000,
10386 0x00000000, 0xffffffff },
10387 { MAC_HASH_REG_3, 0x0000,
10388 0x00000000, 0xffffffff },
10390 /* Receive Data and Receive BD Initiator Control Registers. */
10391 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10392 0x00000000, 0xffffffff },
10393 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10394 0x00000000, 0xffffffff },
10395 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10396 0x00000000, 0x00000003 },
10397 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10398 0x00000000, 0xffffffff },
10399 { RCVDBDI_STD_BD+0, 0x0000,
10400 0x00000000, 0xffffffff },
10401 { RCVDBDI_STD_BD+4, 0x0000,
10402 0x00000000, 0xffffffff },
10403 { RCVDBDI_STD_BD+8, 0x0000,
10404 0x00000000, 0xffff0002 },
10405 { RCVDBDI_STD_BD+0xc, 0x0000,
10406 0x00000000, 0xffffffff },
10408 /* Receive BD Initiator Control Registers. */
10409 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10410 0x00000000, 0xffffffff },
10411 { RCVBDI_STD_THRESH, TG3_FL_5705,
10412 0x00000000, 0x000003ff },
10413 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10414 0x00000000, 0xffffffff },
10416 /* Host Coalescing Control Registers. */
10417 { HOSTCC_MODE, TG3_FL_NOT_5705,
10418 0x00000000, 0x00000004 },
10419 { HOSTCC_MODE, TG3_FL_5705,
10420 0x00000000, 0x000000f6 },
10421 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10422 0x00000000, 0xffffffff },
10423 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10424 0x00000000, 0x000003ff },
10425 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10426 0x00000000, 0xffffffff },
10427 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10428 0x00000000, 0x000003ff },
10429 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10430 0x00000000, 0xffffffff },
10431 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10432 0x00000000, 0x000000ff },
10433 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10434 0x00000000, 0xffffffff },
10435 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10436 0x00000000, 0x000000ff },
10437 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10438 0x00000000, 0xffffffff },
10439 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10440 0x00000000, 0xffffffff },
10441 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10442 0x00000000, 0xffffffff },
10443 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10444 0x00000000, 0x000000ff },
10445 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10446 0x00000000, 0xffffffff },
10447 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10448 0x00000000, 0x000000ff },
10449 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10450 0x00000000, 0xffffffff },
10451 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10452 0x00000000, 0xffffffff },
10453 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10454 0x00000000, 0xffffffff },
10455 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10456 0x00000000, 0xffffffff },
10457 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10458 0x00000000, 0xffffffff },
10459 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10460 0xffffffff, 0x00000000 },
10461 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10462 0xffffffff, 0x00000000 },
10464 /* Buffer Manager Control Registers. */
10465 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10466 0x00000000, 0x007fff80 },
10467 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10468 0x00000000, 0x007fffff },
10469 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10470 0x00000000, 0x0000003f },
10471 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10472 0x00000000, 0x000001ff },
10473 { BUFMGR_MB_HIGH_WATER, 0x0000,
10474 0x00000000, 0x000001ff },
10475 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10476 0xffffffff, 0x00000000 },
10477 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10478 0xffffffff, 0x00000000 },
10480 /* Mailbox Registers */
10481 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10482 0x00000000, 0x000001ff },
10483 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10484 0x00000000, 0x000001ff },
10485 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10486 0x00000000, 0x000007ff },
10487 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10488 0x00000000, 0x000001ff },
10490 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10493 is_5705 = is_5750 = 0;
10494 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10496 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10500 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10501 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10504 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10507 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10508 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10511 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10514 offset = (u32) reg_tbl[i].offset;
10515 read_mask = reg_tbl[i].read_mask;
10516 write_mask = reg_tbl[i].write_mask;
10518 /* Save the original register content */
10519 save_val = tr32(offset);
10521 /* Determine the read-only value. */
10522 read_val = save_val & read_mask;
10524 /* Write zero to the register, then make sure the read-only bits
10525 * are not changed and the read/write bits are all zeros.
10529 val = tr32(offset);
10531 /* Test the read-only and read/write bits. */
10532 if (((val & read_mask) != read_val) || (val & write_mask))
10535 /* Write ones to all the bits defined by RdMask and WrMask, then
10536 * make sure the read-only bits are not changed and the
10537 * read/write bits are all ones.
10539 tw32(offset, read_mask | write_mask);
10541 val = tr32(offset);
10543 /* Test the read-only bits. */
10544 if ((val & read_mask) != read_val)
10547 /* Test the read/write bits. */
10548 if ((val & write_mask) != write_mask)
10551 tw32(offset, save_val);
10557 if (netif_msg_hw(tp))
10558 netdev_err(tp->dev,
10559 "Register test failed at offset %x\n", offset);
10560 tw32(offset, save_val);
10564 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10566 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10570 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10571 for (j = 0; j < len; j += 4) {
10574 tg3_write_mem(tp, offset + j, test_pattern[i]);
10575 tg3_read_mem(tp, offset + j, &val);
10576 if (val != test_pattern[i])
10583 static int tg3_test_memory(struct tg3 *tp)
10585 static struct mem_entry {
10588 } mem_tbl_570x[] = {
10589 { 0x00000000, 0x00b50},
10590 { 0x00002000, 0x1c000},
10591 { 0xffffffff, 0x00000}
10592 }, mem_tbl_5705[] = {
10593 { 0x00000100, 0x0000c},
10594 { 0x00000200, 0x00008},
10595 { 0x00004000, 0x00800},
10596 { 0x00006000, 0x01000},
10597 { 0x00008000, 0x02000},
10598 { 0x00010000, 0x0e000},
10599 { 0xffffffff, 0x00000}
10600 }, mem_tbl_5755[] = {
10601 { 0x00000200, 0x00008},
10602 { 0x00004000, 0x00800},
10603 { 0x00006000, 0x00800},
10604 { 0x00008000, 0x02000},
10605 { 0x00010000, 0x0c000},
10606 { 0xffffffff, 0x00000}
10607 }, mem_tbl_5906[] = {
10608 { 0x00000200, 0x00008},
10609 { 0x00004000, 0x00400},
10610 { 0x00006000, 0x00400},
10611 { 0x00008000, 0x01000},
10612 { 0x00010000, 0x01000},
10613 { 0xffffffff, 0x00000}
10614 }, mem_tbl_5717[] = {
10615 { 0x00000200, 0x00008},
10616 { 0x00010000, 0x0a000},
10617 { 0x00020000, 0x13c00},
10618 { 0xffffffff, 0x00000}
10619 }, mem_tbl_57765[] = {
10620 { 0x00000200, 0x00008},
10621 { 0x00004000, 0x00800},
10622 { 0x00006000, 0x09800},
10623 { 0x00010000, 0x0a000},
10624 { 0xffffffff, 0x00000}
10626 struct mem_entry *mem_tbl;
10630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10631 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
10632 mem_tbl = mem_tbl_5717;
10633 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10634 mem_tbl = mem_tbl_57765;
10635 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10636 mem_tbl = mem_tbl_5755;
10637 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10638 mem_tbl = mem_tbl_5906;
10639 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10640 mem_tbl = mem_tbl_5705;
10642 mem_tbl = mem_tbl_570x;
10644 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10645 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10653 #define TG3_MAC_LOOPBACK 0
10654 #define TG3_PHY_LOOPBACK 1
10656 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10658 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10659 u32 desc_idx, coal_now;
10660 struct sk_buff *skb, *rx_skb;
10663 int num_pkts, tx_len, rx_len, i, err;
10664 struct tg3_rx_buffer_desc *desc;
10665 struct tg3_napi *tnapi, *rnapi;
10666 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10668 tnapi = &tp->napi[0];
10669 rnapi = &tp->napi[0];
10670 if (tp->irq_cnt > 1) {
10671 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
10672 rnapi = &tp->napi[1];
10673 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
10674 tnapi = &tp->napi[1];
10676 coal_now = tnapi->coal_now | rnapi->coal_now;
10678 if (loopback_mode == TG3_MAC_LOOPBACK) {
10679 /* HW errata - mac loopback fails in some cases on 5780.
10680 * Normal traffic and PHY loopback are not affected by
10683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10686 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10687 MAC_MODE_PORT_INT_LPBACK;
10688 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10689 mac_mode |= MAC_MODE_LINK_POLARITY;
10690 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
10691 mac_mode |= MAC_MODE_PORT_MODE_MII;
10693 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10694 tw32(MAC_MODE, mac_mode);
10695 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10698 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10699 tg3_phy_fet_toggle_apd(tp, false);
10700 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10702 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10704 tg3_phy_toggle_automdix(tp, 0);
10706 tg3_writephy(tp, MII_BMCR, val);
10709 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10710 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10711 tg3_writephy(tp, MII_TG3_FET_PTEST,
10712 MII_TG3_FET_PTEST_FRC_TX_LINK |
10713 MII_TG3_FET_PTEST_FRC_TX_LOCK);
10714 /* The write needs to be flushed for the AC131 */
10715 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10716 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
10717 mac_mode |= MAC_MODE_PORT_MODE_MII;
10719 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10721 /* reset to prevent losing 1st rx packet intermittently */
10722 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10723 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10725 tw32_f(MAC_RX_MODE, tp->rx_mode);
10727 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10728 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
10729 if (masked_phy_id == TG3_PHY_ID_BCM5401)
10730 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10731 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
10732 mac_mode |= MAC_MODE_LINK_POLARITY;
10733 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10734 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10736 tw32(MAC_MODE, mac_mode);
10744 skb = netdev_alloc_skb(tp->dev, tx_len);
10748 tx_data = skb_put(skb, tx_len);
10749 memcpy(tx_data, tp->dev->dev_addr, 6);
10750 memset(tx_data + 6, 0x0, 8);
10752 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10754 for (i = 14; i < tx_len; i++)
10755 tx_data[i] = (u8) (i & 0xff);
10757 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10758 if (pci_dma_mapping_error(tp->pdev, map)) {
10759 dev_kfree_skb(skb);
10763 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10768 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10772 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10777 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10778 tr32_mailbox(tnapi->prodmbox);
10782 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10783 for (i = 0; i < 35; i++) {
10784 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10789 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10790 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10791 if ((tx_idx == tnapi->tx_prod) &&
10792 (rx_idx == (rx_start_idx + num_pkts)))
10796 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10797 dev_kfree_skb(skb);
10799 if (tx_idx != tnapi->tx_prod)
10802 if (rx_idx != rx_start_idx + num_pkts)
10805 desc = &rnapi->rx_rcb[rx_start_idx];
10806 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10807 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10808 if (opaque_key != RXD_OPAQUE_RING_STD)
10811 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10812 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10815 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10816 if (rx_len != tx_len)
10819 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10821 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10822 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10824 for (i = 14; i < tx_len; i++) {
10825 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10830 /* tg3_free_rings will unmap and free the rx_skb */
10835 #define TG3_MAC_LOOPBACK_FAILED 1
10836 #define TG3_PHY_LOOPBACK_FAILED 2
10837 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10838 TG3_PHY_LOOPBACK_FAILED)
10840 static int tg3_test_loopback(struct tg3 *tp)
10845 if (!netif_running(tp->dev))
10846 return TG3_LOOPBACK_FAILED;
10848 err = tg3_reset_hw(tp, 1);
10850 return TG3_LOOPBACK_FAILED;
10852 /* Turn off gphy autopowerdown. */
10853 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
10854 tg3_phy_toggle_apd(tp, false);
10856 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10860 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10862 /* Wait for up to 40 microseconds to acquire lock. */
10863 for (i = 0; i < 4; i++) {
10864 status = tr32(TG3_CPMU_MUTEX_GNT);
10865 if (status == CPMU_MUTEX_GNT_DRIVER)
10870 if (status != CPMU_MUTEX_GNT_DRIVER)
10871 return TG3_LOOPBACK_FAILED;
10873 /* Turn off link-based power management. */
10874 cpmuctrl = tr32(TG3_CPMU_CTRL);
10875 tw32(TG3_CPMU_CTRL,
10876 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10877 CPMU_CTRL_LINK_AWARE_MODE));
10880 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10881 err |= TG3_MAC_LOOPBACK_FAILED;
10883 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10884 tw32(TG3_CPMU_CTRL, cpmuctrl);
10886 /* Release the mutex */
10887 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10890 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10891 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10892 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10893 err |= TG3_PHY_LOOPBACK_FAILED;
10896 /* Re-enable gphy autopowerdown. */
10897 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
10898 tg3_phy_toggle_apd(tp, true);
10903 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10906 struct tg3 *tp = netdev_priv(dev);
10908 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10909 tg3_set_power_state(tp, PCI_D0);
10911 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10913 if (tg3_test_nvram(tp) != 0) {
10914 etest->flags |= ETH_TEST_FL_FAILED;
10917 if (tg3_test_link(tp) != 0) {
10918 etest->flags |= ETH_TEST_FL_FAILED;
10921 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10922 int err, err2 = 0, irq_sync = 0;
10924 if (netif_running(dev)) {
10926 tg3_netif_stop(tp);
10930 tg3_full_lock(tp, irq_sync);
10932 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10933 err = tg3_nvram_lock(tp);
10934 tg3_halt_cpu(tp, RX_CPU_BASE);
10935 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10936 tg3_halt_cpu(tp, TX_CPU_BASE);
10938 tg3_nvram_unlock(tp);
10940 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
10943 if (tg3_test_registers(tp) != 0) {
10944 etest->flags |= ETH_TEST_FL_FAILED;
10947 if (tg3_test_memory(tp) != 0) {
10948 etest->flags |= ETH_TEST_FL_FAILED;
10951 if ((data[4] = tg3_test_loopback(tp)) != 0)
10952 etest->flags |= ETH_TEST_FL_FAILED;
10954 tg3_full_unlock(tp);
10956 if (tg3_test_interrupt(tp) != 0) {
10957 etest->flags |= ETH_TEST_FL_FAILED;
10961 tg3_full_lock(tp, 0);
10963 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10964 if (netif_running(dev)) {
10965 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10966 err2 = tg3_restart_hw(tp, 1);
10968 tg3_netif_start(tp);
10971 tg3_full_unlock(tp);
10973 if (irq_sync && !err2)
10976 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10977 tg3_set_power_state(tp, PCI_D3hot);
10981 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10983 struct mii_ioctl_data *data = if_mii(ifr);
10984 struct tg3 *tp = netdev_priv(dev);
10987 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10988 struct phy_device *phydev;
10989 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10991 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10992 return phy_mii_ioctl(phydev, ifr, cmd);
10997 data->phy_id = tp->phy_addr;
11000 case SIOCGMIIREG: {
11003 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11004 break; /* We have no PHY */
11006 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11009 spin_lock_bh(&tp->lock);
11010 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11011 spin_unlock_bh(&tp->lock);
11013 data->val_out = mii_regval;
11019 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11020 break; /* We have no PHY */
11022 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11025 spin_lock_bh(&tp->lock);
11026 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11027 spin_unlock_bh(&tp->lock);
11035 return -EOPNOTSUPP;
11038 #if TG3_VLAN_TAG_USED
11039 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
11041 struct tg3 *tp = netdev_priv(dev);
11043 if (!netif_running(dev)) {
11048 tg3_netif_stop(tp);
11050 tg3_full_lock(tp, 0);
11054 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
11055 __tg3_set_rx_mode(dev);
11057 tg3_netif_start(tp);
11059 tg3_full_unlock(tp);
11063 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11065 struct tg3 *tp = netdev_priv(dev);
11067 memcpy(ec, &tp->coal, sizeof(*ec));
11071 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11073 struct tg3 *tp = netdev_priv(dev);
11074 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11075 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11077 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11078 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11079 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11080 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11081 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11084 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11085 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11086 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11087 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11088 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11089 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11090 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11091 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11092 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11093 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11096 /* No rx interrupts will be generated if both are zero */
11097 if ((ec->rx_coalesce_usecs == 0) &&
11098 (ec->rx_max_coalesced_frames == 0))
11101 /* No tx interrupts will be generated if both are zero */
11102 if ((ec->tx_coalesce_usecs == 0) &&
11103 (ec->tx_max_coalesced_frames == 0))
11106 /* Only copy relevant parameters, ignore all others. */
11107 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11108 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11109 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11110 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11111 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11112 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11113 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11114 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11115 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11117 if (netif_running(dev)) {
11118 tg3_full_lock(tp, 0);
11119 __tg3_set_coalesce(tp, &tp->coal);
11120 tg3_full_unlock(tp);
11125 static const struct ethtool_ops tg3_ethtool_ops = {
11126 .get_settings = tg3_get_settings,
11127 .set_settings = tg3_set_settings,
11128 .get_drvinfo = tg3_get_drvinfo,
11129 .get_regs_len = tg3_get_regs_len,
11130 .get_regs = tg3_get_regs,
11131 .get_wol = tg3_get_wol,
11132 .set_wol = tg3_set_wol,
11133 .get_msglevel = tg3_get_msglevel,
11134 .set_msglevel = tg3_set_msglevel,
11135 .nway_reset = tg3_nway_reset,
11136 .get_link = ethtool_op_get_link,
11137 .get_eeprom_len = tg3_get_eeprom_len,
11138 .get_eeprom = tg3_get_eeprom,
11139 .set_eeprom = tg3_set_eeprom,
11140 .get_ringparam = tg3_get_ringparam,
11141 .set_ringparam = tg3_set_ringparam,
11142 .get_pauseparam = tg3_get_pauseparam,
11143 .set_pauseparam = tg3_set_pauseparam,
11144 .get_rx_csum = tg3_get_rx_csum,
11145 .set_rx_csum = tg3_set_rx_csum,
11146 .set_tx_csum = tg3_set_tx_csum,
11147 .set_sg = ethtool_op_set_sg,
11148 .set_tso = tg3_set_tso,
11149 .self_test = tg3_self_test,
11150 .get_strings = tg3_get_strings,
11151 .phys_id = tg3_phys_id,
11152 .get_ethtool_stats = tg3_get_ethtool_stats,
11153 .get_coalesce = tg3_get_coalesce,
11154 .set_coalesce = tg3_set_coalesce,
11155 .get_sset_count = tg3_get_sset_count,
11158 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11160 u32 cursize, val, magic;
11162 tp->nvram_size = EEPROM_CHIP_SIZE;
11164 if (tg3_nvram_read(tp, 0, &magic) != 0)
11167 if ((magic != TG3_EEPROM_MAGIC) &&
11168 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11169 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11173 * Size the chip by reading offsets at increasing powers of two.
11174 * When we encounter our validation signature, we know the addressing
11175 * has wrapped around, and thus have our chip size.
11179 while (cursize < tp->nvram_size) {
11180 if (tg3_nvram_read(tp, cursize, &val) != 0)
11189 tp->nvram_size = cursize;
11192 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11196 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11197 tg3_nvram_read(tp, 0, &val) != 0)
11200 /* Selfboot format */
11201 if (val != TG3_EEPROM_MAGIC) {
11202 tg3_get_eeprom_size(tp);
11206 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11208 /* This is confusing. We want to operate on the
11209 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11210 * call will read from NVRAM and byteswap the data
11211 * according to the byteswapping settings for all
11212 * other register accesses. This ensures the data we
11213 * want will always reside in the lower 16-bits.
11214 * However, the data in NVRAM is in LE format, which
11215 * means the data from the NVRAM read will always be
11216 * opposite the endianness of the CPU. The 16-bit
11217 * byteswap then brings the data to CPU endianness.
11219 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11223 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11226 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11230 nvcfg1 = tr32(NVRAM_CFG1);
11231 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11232 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11234 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11235 tw32(NVRAM_CFG1, nvcfg1);
11238 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11239 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11240 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11241 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11242 tp->nvram_jedecnum = JEDEC_ATMEL;
11243 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11244 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11246 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11247 tp->nvram_jedecnum = JEDEC_ATMEL;
11248 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11250 case FLASH_VENDOR_ATMEL_EEPROM:
11251 tp->nvram_jedecnum = JEDEC_ATMEL;
11252 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11253 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11255 case FLASH_VENDOR_ST:
11256 tp->nvram_jedecnum = JEDEC_ST;
11257 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11258 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11260 case FLASH_VENDOR_SAIFUN:
11261 tp->nvram_jedecnum = JEDEC_SAIFUN;
11262 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11264 case FLASH_VENDOR_SST_SMALL:
11265 case FLASH_VENDOR_SST_LARGE:
11266 tp->nvram_jedecnum = JEDEC_SST;
11267 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11271 tp->nvram_jedecnum = JEDEC_ATMEL;
11272 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11273 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11277 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11279 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11280 case FLASH_5752PAGE_SIZE_256:
11281 tp->nvram_pagesize = 256;
11283 case FLASH_5752PAGE_SIZE_512:
11284 tp->nvram_pagesize = 512;
11286 case FLASH_5752PAGE_SIZE_1K:
11287 tp->nvram_pagesize = 1024;
11289 case FLASH_5752PAGE_SIZE_2K:
11290 tp->nvram_pagesize = 2048;
11292 case FLASH_5752PAGE_SIZE_4K:
11293 tp->nvram_pagesize = 4096;
11295 case FLASH_5752PAGE_SIZE_264:
11296 tp->nvram_pagesize = 264;
11298 case FLASH_5752PAGE_SIZE_528:
11299 tp->nvram_pagesize = 528;
11304 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11308 nvcfg1 = tr32(NVRAM_CFG1);
11310 /* NVRAM protection for TPM */
11311 if (nvcfg1 & (1 << 27))
11312 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11314 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11315 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11316 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11317 tp->nvram_jedecnum = JEDEC_ATMEL;
11318 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11320 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11321 tp->nvram_jedecnum = JEDEC_ATMEL;
11322 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11323 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11325 case FLASH_5752VENDOR_ST_M45PE10:
11326 case FLASH_5752VENDOR_ST_M45PE20:
11327 case FLASH_5752VENDOR_ST_M45PE40:
11328 tp->nvram_jedecnum = JEDEC_ST;
11329 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11330 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11334 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11335 tg3_nvram_get_pagesize(tp, nvcfg1);
11337 /* For eeprom, set pagesize to maximum eeprom size */
11338 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11340 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11341 tw32(NVRAM_CFG1, nvcfg1);
11345 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11347 u32 nvcfg1, protect = 0;
11349 nvcfg1 = tr32(NVRAM_CFG1);
11351 /* NVRAM protection for TPM */
11352 if (nvcfg1 & (1 << 27)) {
11353 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11357 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11359 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11360 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11361 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11362 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11363 tp->nvram_jedecnum = JEDEC_ATMEL;
11364 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11365 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11366 tp->nvram_pagesize = 264;
11367 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11368 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11369 tp->nvram_size = (protect ? 0x3e200 :
11370 TG3_NVRAM_SIZE_512KB);
11371 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11372 tp->nvram_size = (protect ? 0x1f200 :
11373 TG3_NVRAM_SIZE_256KB);
11375 tp->nvram_size = (protect ? 0x1f200 :
11376 TG3_NVRAM_SIZE_128KB);
11378 case FLASH_5752VENDOR_ST_M45PE10:
11379 case FLASH_5752VENDOR_ST_M45PE20:
11380 case FLASH_5752VENDOR_ST_M45PE40:
11381 tp->nvram_jedecnum = JEDEC_ST;
11382 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11383 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11384 tp->nvram_pagesize = 256;
11385 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11386 tp->nvram_size = (protect ?
11387 TG3_NVRAM_SIZE_64KB :
11388 TG3_NVRAM_SIZE_128KB);
11389 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11390 tp->nvram_size = (protect ?
11391 TG3_NVRAM_SIZE_64KB :
11392 TG3_NVRAM_SIZE_256KB);
11394 tp->nvram_size = (protect ?
11395 TG3_NVRAM_SIZE_128KB :
11396 TG3_NVRAM_SIZE_512KB);
11401 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11405 nvcfg1 = tr32(NVRAM_CFG1);
11407 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11408 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11409 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11410 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11411 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11412 tp->nvram_jedecnum = JEDEC_ATMEL;
11413 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11414 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11416 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11417 tw32(NVRAM_CFG1, nvcfg1);
11419 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11420 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11421 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11422 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11423 tp->nvram_jedecnum = JEDEC_ATMEL;
11424 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11425 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11426 tp->nvram_pagesize = 264;
11428 case FLASH_5752VENDOR_ST_M45PE10:
11429 case FLASH_5752VENDOR_ST_M45PE20:
11430 case FLASH_5752VENDOR_ST_M45PE40:
11431 tp->nvram_jedecnum = JEDEC_ST;
11432 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11433 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11434 tp->nvram_pagesize = 256;
11439 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11441 u32 nvcfg1, protect = 0;
11443 nvcfg1 = tr32(NVRAM_CFG1);
11445 /* NVRAM protection for TPM */
11446 if (nvcfg1 & (1 << 27)) {
11447 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11451 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11453 case FLASH_5761VENDOR_ATMEL_ADB021D:
11454 case FLASH_5761VENDOR_ATMEL_ADB041D:
11455 case FLASH_5761VENDOR_ATMEL_ADB081D:
11456 case FLASH_5761VENDOR_ATMEL_ADB161D:
11457 case FLASH_5761VENDOR_ATMEL_MDB021D:
11458 case FLASH_5761VENDOR_ATMEL_MDB041D:
11459 case FLASH_5761VENDOR_ATMEL_MDB081D:
11460 case FLASH_5761VENDOR_ATMEL_MDB161D:
11461 tp->nvram_jedecnum = JEDEC_ATMEL;
11462 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11463 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11464 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11465 tp->nvram_pagesize = 256;
11467 case FLASH_5761VENDOR_ST_A_M45PE20:
11468 case FLASH_5761VENDOR_ST_A_M45PE40:
11469 case FLASH_5761VENDOR_ST_A_M45PE80:
11470 case FLASH_5761VENDOR_ST_A_M45PE16:
11471 case FLASH_5761VENDOR_ST_M_M45PE20:
11472 case FLASH_5761VENDOR_ST_M_M45PE40:
11473 case FLASH_5761VENDOR_ST_M_M45PE80:
11474 case FLASH_5761VENDOR_ST_M_M45PE16:
11475 tp->nvram_jedecnum = JEDEC_ST;
11476 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11477 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11478 tp->nvram_pagesize = 256;
11483 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11486 case FLASH_5761VENDOR_ATMEL_ADB161D:
11487 case FLASH_5761VENDOR_ATMEL_MDB161D:
11488 case FLASH_5761VENDOR_ST_A_M45PE16:
11489 case FLASH_5761VENDOR_ST_M_M45PE16:
11490 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11492 case FLASH_5761VENDOR_ATMEL_ADB081D:
11493 case FLASH_5761VENDOR_ATMEL_MDB081D:
11494 case FLASH_5761VENDOR_ST_A_M45PE80:
11495 case FLASH_5761VENDOR_ST_M_M45PE80:
11496 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11498 case FLASH_5761VENDOR_ATMEL_ADB041D:
11499 case FLASH_5761VENDOR_ATMEL_MDB041D:
11500 case FLASH_5761VENDOR_ST_A_M45PE40:
11501 case FLASH_5761VENDOR_ST_M_M45PE40:
11502 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11504 case FLASH_5761VENDOR_ATMEL_ADB021D:
11505 case FLASH_5761VENDOR_ATMEL_MDB021D:
11506 case FLASH_5761VENDOR_ST_A_M45PE20:
11507 case FLASH_5761VENDOR_ST_M_M45PE20:
11508 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11514 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11516 tp->nvram_jedecnum = JEDEC_ATMEL;
11517 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11518 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11521 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11525 nvcfg1 = tr32(NVRAM_CFG1);
11527 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11528 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11529 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11530 tp->nvram_jedecnum = JEDEC_ATMEL;
11531 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11532 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11534 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11535 tw32(NVRAM_CFG1, nvcfg1);
11537 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11538 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11539 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11540 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11541 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11542 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11543 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11544 tp->nvram_jedecnum = JEDEC_ATMEL;
11545 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11546 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11548 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11549 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11550 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11551 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11552 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11554 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11555 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11556 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11558 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11559 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11560 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11564 case FLASH_5752VENDOR_ST_M45PE10:
11565 case FLASH_5752VENDOR_ST_M45PE20:
11566 case FLASH_5752VENDOR_ST_M45PE40:
11567 tp->nvram_jedecnum = JEDEC_ST;
11568 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11569 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11571 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11572 case FLASH_5752VENDOR_ST_M45PE10:
11573 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11575 case FLASH_5752VENDOR_ST_M45PE20:
11576 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11578 case FLASH_5752VENDOR_ST_M45PE40:
11579 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11584 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11588 tg3_nvram_get_pagesize(tp, nvcfg1);
11589 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11590 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11594 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11598 nvcfg1 = tr32(NVRAM_CFG1);
11600 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11601 case FLASH_5717VENDOR_ATMEL_EEPROM:
11602 case FLASH_5717VENDOR_MICRO_EEPROM:
11603 tp->nvram_jedecnum = JEDEC_ATMEL;
11604 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11605 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11607 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11608 tw32(NVRAM_CFG1, nvcfg1);
11610 case FLASH_5717VENDOR_ATMEL_MDB011D:
11611 case FLASH_5717VENDOR_ATMEL_ADB011B:
11612 case FLASH_5717VENDOR_ATMEL_ADB011D:
11613 case FLASH_5717VENDOR_ATMEL_MDB021D:
11614 case FLASH_5717VENDOR_ATMEL_ADB021B:
11615 case FLASH_5717VENDOR_ATMEL_ADB021D:
11616 case FLASH_5717VENDOR_ATMEL_45USPT:
11617 tp->nvram_jedecnum = JEDEC_ATMEL;
11618 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11619 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11621 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11622 case FLASH_5717VENDOR_ATMEL_MDB021D:
11623 case FLASH_5717VENDOR_ATMEL_ADB021B:
11624 case FLASH_5717VENDOR_ATMEL_ADB021D:
11625 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11628 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11632 case FLASH_5717VENDOR_ST_M_M25PE10:
11633 case FLASH_5717VENDOR_ST_A_M25PE10:
11634 case FLASH_5717VENDOR_ST_M_M45PE10:
11635 case FLASH_5717VENDOR_ST_A_M45PE10:
11636 case FLASH_5717VENDOR_ST_M_M25PE20:
11637 case FLASH_5717VENDOR_ST_A_M25PE20:
11638 case FLASH_5717VENDOR_ST_M_M45PE20:
11639 case FLASH_5717VENDOR_ST_A_M45PE20:
11640 case FLASH_5717VENDOR_ST_25USPT:
11641 case FLASH_5717VENDOR_ST_45USPT:
11642 tp->nvram_jedecnum = JEDEC_ST;
11643 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11644 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11646 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11647 case FLASH_5717VENDOR_ST_M_M25PE20:
11648 case FLASH_5717VENDOR_ST_A_M25PE20:
11649 case FLASH_5717VENDOR_ST_M_M45PE20:
11650 case FLASH_5717VENDOR_ST_A_M45PE20:
11651 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11654 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11659 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11663 tg3_nvram_get_pagesize(tp, nvcfg1);
11664 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11665 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11668 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11669 static void __devinit tg3_nvram_init(struct tg3 *tp)
11671 tw32_f(GRC_EEPROM_ADDR,
11672 (EEPROM_ADDR_FSM_RESET |
11673 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11674 EEPROM_ADDR_CLKPERD_SHIFT)));
11678 /* Enable seeprom accesses. */
11679 tw32_f(GRC_LOCAL_CTRL,
11680 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11683 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11684 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11685 tp->tg3_flags |= TG3_FLAG_NVRAM;
11687 if (tg3_nvram_lock(tp)) {
11688 netdev_warn(tp->dev,
11689 "Cannot get nvram lock, %s failed\n",
11693 tg3_enable_nvram_access(tp);
11695 tp->nvram_size = 0;
11697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11698 tg3_get_5752_nvram_info(tp);
11699 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11700 tg3_get_5755_nvram_info(tp);
11701 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11702 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11703 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11704 tg3_get_5787_nvram_info(tp);
11705 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11706 tg3_get_5761_nvram_info(tp);
11707 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11708 tg3_get_5906_nvram_info(tp);
11709 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11710 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11711 tg3_get_57780_nvram_info(tp);
11712 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
11713 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
11714 tg3_get_5717_nvram_info(tp);
11716 tg3_get_nvram_info(tp);
11718 if (tp->nvram_size == 0)
11719 tg3_get_nvram_size(tp);
11721 tg3_disable_nvram_access(tp);
11722 tg3_nvram_unlock(tp);
11725 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11727 tg3_get_eeprom_size(tp);
11731 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11732 u32 offset, u32 len, u8 *buf)
11737 for (i = 0; i < len; i += 4) {
11743 memcpy(&data, buf + i, 4);
11746 * The SEEPROM interface expects the data to always be opposite
11747 * the native endian format. We accomplish this by reversing
11748 * all the operations that would have been performed on the
11749 * data from a call to tg3_nvram_read_be32().
11751 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11753 val = tr32(GRC_EEPROM_ADDR);
11754 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11756 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11758 tw32(GRC_EEPROM_ADDR, val |
11759 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11760 (addr & EEPROM_ADDR_ADDR_MASK) |
11761 EEPROM_ADDR_START |
11762 EEPROM_ADDR_WRITE);
11764 for (j = 0; j < 1000; j++) {
11765 val = tr32(GRC_EEPROM_ADDR);
11767 if (val & EEPROM_ADDR_COMPLETE)
11771 if (!(val & EEPROM_ADDR_COMPLETE)) {
11780 /* offset and length are dword aligned */
11781 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11785 u32 pagesize = tp->nvram_pagesize;
11786 u32 pagemask = pagesize - 1;
11790 tmp = kmalloc(pagesize, GFP_KERNEL);
11796 u32 phy_addr, page_off, size;
11798 phy_addr = offset & ~pagemask;
11800 for (j = 0; j < pagesize; j += 4) {
11801 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11802 (__be32 *) (tmp + j));
11809 page_off = offset & pagemask;
11816 memcpy(tmp + page_off, buf, size);
11818 offset = offset + (pagesize - page_off);
11820 tg3_enable_nvram_access(tp);
11823 * Before we can erase the flash page, we need
11824 * to issue a special "write enable" command.
11826 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11828 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11831 /* Erase the target page */
11832 tw32(NVRAM_ADDR, phy_addr);
11834 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11835 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11837 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11840 /* Issue another write enable to start the write. */
11841 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11843 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11846 for (j = 0; j < pagesize; j += 4) {
11849 data = *((__be32 *) (tmp + j));
11851 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11853 tw32(NVRAM_ADDR, phy_addr + j);
11855 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11859 nvram_cmd |= NVRAM_CMD_FIRST;
11860 else if (j == (pagesize - 4))
11861 nvram_cmd |= NVRAM_CMD_LAST;
11863 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11870 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11871 tg3_nvram_exec_cmd(tp, nvram_cmd);
11878 /* offset and length are dword aligned */
11879 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11884 for (i = 0; i < len; i += 4, offset += 4) {
11885 u32 page_off, phy_addr, nvram_cmd;
11888 memcpy(&data, buf + i, 4);
11889 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11891 page_off = offset % tp->nvram_pagesize;
11893 phy_addr = tg3_nvram_phys_addr(tp, offset);
11895 tw32(NVRAM_ADDR, phy_addr);
11897 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11899 if (page_off == 0 || i == 0)
11900 nvram_cmd |= NVRAM_CMD_FIRST;
11901 if (page_off == (tp->nvram_pagesize - 4))
11902 nvram_cmd |= NVRAM_CMD_LAST;
11904 if (i == (len - 4))
11905 nvram_cmd |= NVRAM_CMD_LAST;
11907 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11908 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11909 (tp->nvram_jedecnum == JEDEC_ST) &&
11910 (nvram_cmd & NVRAM_CMD_FIRST)) {
11912 if ((ret = tg3_nvram_exec_cmd(tp,
11913 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11918 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11919 /* We always do complete word writes to eeprom. */
11920 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11923 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11929 /* offset and length are dword aligned */
11930 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11934 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11935 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11936 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11940 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11941 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11945 ret = tg3_nvram_lock(tp);
11949 tg3_enable_nvram_access(tp);
11950 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11951 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11952 tw32(NVRAM_WRITE1, 0x406);
11954 grc_mode = tr32(GRC_MODE);
11955 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11957 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11958 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11960 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11963 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11967 grc_mode = tr32(GRC_MODE);
11968 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11970 tg3_disable_nvram_access(tp);
11971 tg3_nvram_unlock(tp);
11974 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11975 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11982 struct subsys_tbl_ent {
11983 u16 subsys_vendor, subsys_devid;
11987 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
11988 /* Broadcom boards. */
11989 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11990 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
11991 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11992 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
11993 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11994 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
11995 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11996 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
11997 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11998 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
11999 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12000 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12001 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12002 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12003 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12004 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12005 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12006 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12007 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12008 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12009 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12010 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12013 { TG3PCI_SUBVENDOR_ID_3COM,
12014 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12015 { TG3PCI_SUBVENDOR_ID_3COM,
12016 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12017 { TG3PCI_SUBVENDOR_ID_3COM,
12018 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12019 { TG3PCI_SUBVENDOR_ID_3COM,
12020 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12021 { TG3PCI_SUBVENDOR_ID_3COM,
12022 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12025 { TG3PCI_SUBVENDOR_ID_DELL,
12026 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12027 { TG3PCI_SUBVENDOR_ID_DELL,
12028 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12029 { TG3PCI_SUBVENDOR_ID_DELL,
12030 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12031 { TG3PCI_SUBVENDOR_ID_DELL,
12032 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12034 /* Compaq boards. */
12035 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12036 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12037 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12038 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12039 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12040 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12041 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12042 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12043 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12044 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12047 { TG3PCI_SUBVENDOR_ID_IBM,
12048 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12051 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12055 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12056 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12057 tp->pdev->subsystem_vendor) &&
12058 (subsys_id_to_phy_id[i].subsys_devid ==
12059 tp->pdev->subsystem_device))
12060 return &subsys_id_to_phy_id[i];
12065 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12070 /* On some early chips the SRAM cannot be accessed in D3hot state,
12071 * so need make sure we're in D0.
12073 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12074 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12075 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12078 /* Make sure register accesses (indirect or otherwise)
12079 * will function correctly.
12081 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12082 tp->misc_host_ctrl);
12084 /* The memory arbiter has to be enabled in order for SRAM accesses
12085 * to succeed. Normally on powerup the tg3 chip firmware will make
12086 * sure it is enabled, but other entities such as system netboot
12087 * code might disable it.
12089 val = tr32(MEMARB_MODE);
12090 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12092 tp->phy_id = TG3_PHY_ID_INVALID;
12093 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12095 /* Assume an onboard device and WOL capable by default. */
12096 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12099 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12100 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12101 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12103 val = tr32(VCPU_CFGSHDW);
12104 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12105 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12106 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12107 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12108 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12112 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12113 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12114 u32 nic_cfg, led_cfg;
12115 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12116 int eeprom_phy_serdes = 0;
12118 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12119 tp->nic_sram_data_cfg = nic_cfg;
12121 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12122 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12123 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12124 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12125 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12126 (ver > 0) && (ver < 0x100))
12127 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12130 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12132 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12133 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12134 eeprom_phy_serdes = 1;
12136 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12137 if (nic_phy_id != 0) {
12138 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12139 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12141 eeprom_phy_id = (id1 >> 16) << 10;
12142 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12143 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12147 tp->phy_id = eeprom_phy_id;
12148 if (eeprom_phy_serdes) {
12149 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12150 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12152 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12155 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12156 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12157 SHASTA_EXT_LED_MODE_MASK);
12159 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12163 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12164 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12167 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12168 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12171 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12172 tp->led_ctrl = LED_CTRL_MODE_MAC;
12174 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12175 * read on some older 5700/5701 bootcode.
12177 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12179 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12181 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12185 case SHASTA_EXT_LED_SHARED:
12186 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12187 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12188 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12189 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12190 LED_CTRL_MODE_PHY_2);
12193 case SHASTA_EXT_LED_MAC:
12194 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12197 case SHASTA_EXT_LED_COMBO:
12198 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12199 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12200 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12201 LED_CTRL_MODE_PHY_2);
12206 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12207 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12208 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12209 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12211 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12212 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12214 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12215 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12216 if ((tp->pdev->subsystem_vendor ==
12217 PCI_VENDOR_ID_ARIMA) &&
12218 (tp->pdev->subsystem_device == 0x205a ||
12219 tp->pdev->subsystem_device == 0x2063))
12220 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12222 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12223 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12226 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12227 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12228 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12229 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12232 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12233 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12234 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12236 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12237 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12238 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12240 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12241 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12242 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12244 if (cfg2 & (1 << 17))
12245 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12247 /* serdes signal pre-emphasis in register 0x590 set by */
12248 /* bootcode if bit 18 is set */
12249 if (cfg2 & (1 << 18))
12250 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12252 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12253 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12254 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12255 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12257 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12258 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12259 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
12262 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12263 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12264 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12267 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12268 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12269 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12270 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12271 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12272 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12275 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
12276 device_set_wakeup_enable(&tp->pdev->dev,
12277 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12280 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12285 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12286 tw32(OTP_CTRL, cmd);
12288 /* Wait for up to 1 ms for command to execute. */
12289 for (i = 0; i < 100; i++) {
12290 val = tr32(OTP_STATUS);
12291 if (val & OTP_STATUS_CMD_DONE)
12296 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12299 /* Read the gphy configuration from the OTP region of the chip. The gphy
12300 * configuration is a 32-bit value that straddles the alignment boundary.
12301 * We do two 32-bit reads and then shift and merge the results.
12303 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12305 u32 bhalf_otp, thalf_otp;
12307 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12309 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12312 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12314 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12317 thalf_otp = tr32(OTP_READ_DATA);
12319 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12321 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12324 bhalf_otp = tr32(OTP_READ_DATA);
12326 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12329 static int __devinit tg3_phy_probe(struct tg3 *tp)
12331 u32 hw_phy_id_1, hw_phy_id_2;
12332 u32 hw_phy_id, hw_phy_id_masked;
12335 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12336 return tg3_phy_init(tp);
12338 /* Reading the PHY ID register can conflict with ASF
12339 * firmware access to the PHY hardware.
12342 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12343 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12344 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12346 /* Now read the physical PHY_ID from the chip and verify
12347 * that it is sane. If it doesn't look good, we fall back
12348 * to either the hard-coded table based PHY_ID and failing
12349 * that the value found in the eeprom area.
12351 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12352 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12354 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12355 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12356 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12358 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12361 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12362 tp->phy_id = hw_phy_id;
12363 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12364 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12366 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12368 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12369 /* Do nothing, phy ID already set up in
12370 * tg3_get_eeprom_hw_cfg().
12373 struct subsys_tbl_ent *p;
12375 /* No eeprom signature? Try the hardcoded
12376 * subsys device table.
12378 p = tg3_lookup_by_subsys(tp);
12382 tp->phy_id = p->phy_id;
12384 tp->phy_id == TG3_PHY_ID_BCM8002)
12385 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12389 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12390 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12391 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12392 u32 bmsr, adv_reg, tg3_ctrl, mask;
12394 tg3_readphy(tp, MII_BMSR, &bmsr);
12395 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12396 (bmsr & BMSR_LSTATUS))
12397 goto skip_phy_reset;
12399 err = tg3_phy_reset(tp);
12403 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12404 ADVERTISE_100HALF | ADVERTISE_100FULL |
12405 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12407 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
12408 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12409 MII_TG3_CTRL_ADV_1000_FULL);
12410 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12411 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12412 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12413 MII_TG3_CTRL_ENABLE_AS_MASTER);
12416 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12417 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12418 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12419 if (!tg3_copper_is_advertising_all(tp, mask)) {
12420 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12422 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12423 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12425 tg3_writephy(tp, MII_BMCR,
12426 BMCR_ANENABLE | BMCR_ANRESTART);
12428 tg3_phy_set_wirespeed(tp);
12430 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12431 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12432 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12436 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12437 err = tg3_init_5401phy_dsp(tp);
12441 err = tg3_init_5401phy_dsp(tp);
12444 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12445 tp->link_config.advertising =
12446 (ADVERTISED_1000baseT_Half |
12447 ADVERTISED_1000baseT_Full |
12448 ADVERTISED_Autoneg |
12450 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
12451 tp->link_config.advertising &=
12452 ~(ADVERTISED_1000baseT_Half |
12453 ADVERTISED_1000baseT_Full);
12458 static void __devinit tg3_read_vpd(struct tg3 *tp)
12461 unsigned int block_end, rosize, len;
12465 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12466 tg3_nvram_read(tp, 0x0, &magic))
12469 vpd_data = kmalloc(TG3_NVM_VPD_LEN, GFP_KERNEL);
12473 if (magic == TG3_EEPROM_MAGIC) {
12474 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12477 /* The data is in little-endian format in NVRAM.
12478 * Use the big-endian read routines to preserve
12479 * the byte order as it exists in NVRAM.
12481 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12482 goto out_not_found;
12484 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12488 unsigned int pos = 0;
12490 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12491 cnt = pci_read_vpd(tp->pdev, pos,
12492 TG3_NVM_VPD_LEN - pos,
12494 if (cnt == -ETIMEDOUT || -EINTR)
12497 goto out_not_found;
12499 if (pos != TG3_NVM_VPD_LEN)
12500 goto out_not_found;
12503 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
12504 PCI_VPD_LRDT_RO_DATA);
12506 goto out_not_found;
12508 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
12509 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
12510 i += PCI_VPD_LRDT_TAG_SIZE;
12512 if (block_end > TG3_NVM_VPD_LEN)
12513 goto out_not_found;
12515 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12516 PCI_VPD_RO_KEYWORD_MFR_ID);
12518 len = pci_vpd_info_field_size(&vpd_data[j]);
12520 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12521 if (j + len > block_end || len != 4 ||
12522 memcmp(&vpd_data[j], "1028", 4))
12525 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12526 PCI_VPD_RO_KEYWORD_VENDOR0);
12530 len = pci_vpd_info_field_size(&vpd_data[j]);
12532 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12533 if (j + len > block_end)
12536 memcpy(tp->fw_ver, &vpd_data[j], len);
12537 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
12541 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12542 PCI_VPD_RO_KEYWORD_PARTNO);
12544 goto out_not_found;
12546 len = pci_vpd_info_field_size(&vpd_data[i]);
12548 i += PCI_VPD_INFO_FLD_HDR_SIZE;
12549 if (len > TG3_BPN_SIZE ||
12550 (len + i) > TG3_NVM_VPD_LEN)
12551 goto out_not_found;
12553 memcpy(tp->board_part_number, &vpd_data[i], len);
12557 if (tp->board_part_number[0])
12561 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12562 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
12563 strcpy(tp->board_part_number, "BCM5717");
12564 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
12565 strcpy(tp->board_part_number, "BCM5718");
12568 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
12569 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12570 strcpy(tp->board_part_number, "BCM57780");
12571 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12572 strcpy(tp->board_part_number, "BCM57760");
12573 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12574 strcpy(tp->board_part_number, "BCM57790");
12575 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12576 strcpy(tp->board_part_number, "BCM57788");
12579 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
12580 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12581 strcpy(tp->board_part_number, "BCM57761");
12582 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12583 strcpy(tp->board_part_number, "BCM57765");
12584 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12585 strcpy(tp->board_part_number, "BCM57781");
12586 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12587 strcpy(tp->board_part_number, "BCM57785");
12588 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12589 strcpy(tp->board_part_number, "BCM57791");
12590 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12591 strcpy(tp->board_part_number, "BCM57795");
12594 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12595 strcpy(tp->board_part_number, "BCM95906");
12598 strcpy(tp->board_part_number, "none");
12602 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12606 if (tg3_nvram_read(tp, offset, &val) ||
12607 (val & 0xfc000000) != 0x0c000000 ||
12608 tg3_nvram_read(tp, offset + 4, &val) ||
12615 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12617 u32 val, offset, start, ver_offset;
12619 bool newver = false;
12621 if (tg3_nvram_read(tp, 0xc, &offset) ||
12622 tg3_nvram_read(tp, 0x4, &start))
12625 offset = tg3_nvram_logical_addr(tp, offset);
12627 if (tg3_nvram_read(tp, offset, &val))
12630 if ((val & 0xfc000000) == 0x0c000000) {
12631 if (tg3_nvram_read(tp, offset + 4, &val))
12638 dst_off = strlen(tp->fw_ver);
12641 if (TG3_VER_SIZE - dst_off < 16 ||
12642 tg3_nvram_read(tp, offset + 8, &ver_offset))
12645 offset = offset + ver_offset - start;
12646 for (i = 0; i < 16; i += 4) {
12648 if (tg3_nvram_read_be32(tp, offset + i, &v))
12651 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
12656 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12659 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12660 TG3_NVM_BCVER_MAJSFT;
12661 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12662 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
12663 "v%d.%02d", major, minor);
12667 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12669 u32 val, major, minor;
12671 /* Use native endian representation */
12672 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12675 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12676 TG3_NVM_HWSB_CFG1_MAJSFT;
12677 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12678 TG3_NVM_HWSB_CFG1_MINSFT;
12680 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12683 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12685 u32 offset, major, minor, build;
12687 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
12689 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12692 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12693 case TG3_EEPROM_SB_REVISION_0:
12694 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12696 case TG3_EEPROM_SB_REVISION_2:
12697 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12699 case TG3_EEPROM_SB_REVISION_3:
12700 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12702 case TG3_EEPROM_SB_REVISION_4:
12703 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
12705 case TG3_EEPROM_SB_REVISION_5:
12706 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
12708 case TG3_EEPROM_SB_REVISION_6:
12709 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
12715 if (tg3_nvram_read(tp, offset, &val))
12718 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12719 TG3_EEPROM_SB_EDH_BLD_SHFT;
12720 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12721 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12722 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12724 if (minor > 99 || build > 26)
12727 offset = strlen(tp->fw_ver);
12728 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
12729 " v%d.%02d", major, minor);
12732 offset = strlen(tp->fw_ver);
12733 if (offset < TG3_VER_SIZE - 1)
12734 tp->fw_ver[offset] = 'a' + build - 1;
12738 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12740 u32 val, offset, start;
12743 for (offset = TG3_NVM_DIR_START;
12744 offset < TG3_NVM_DIR_END;
12745 offset += TG3_NVM_DIRENT_SIZE) {
12746 if (tg3_nvram_read(tp, offset, &val))
12749 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12753 if (offset == TG3_NVM_DIR_END)
12756 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12757 start = 0x08000000;
12758 else if (tg3_nvram_read(tp, offset - 4, &start))
12761 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12762 !tg3_fw_img_is_valid(tp, offset) ||
12763 tg3_nvram_read(tp, offset + 8, &val))
12766 offset += val - start;
12768 vlen = strlen(tp->fw_ver);
12770 tp->fw_ver[vlen++] = ',';
12771 tp->fw_ver[vlen++] = ' ';
12773 for (i = 0; i < 4; i++) {
12775 if (tg3_nvram_read_be32(tp, offset, &v))
12778 offset += sizeof(v);
12780 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12781 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12785 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12790 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12796 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12797 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12800 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12801 if (apedata != APE_SEG_SIG_MAGIC)
12804 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12805 if (!(apedata & APE_FW_STATUS_READY))
12808 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12810 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
12811 tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
12817 vlen = strlen(tp->fw_ver);
12819 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
12821 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12822 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12823 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12824 (apedata & APE_FW_VERSION_BLDMSK));
12827 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12830 bool vpd_vers = false;
12832 if (tp->fw_ver[0] != 0)
12835 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12836 strcat(tp->fw_ver, "sb");
12840 if (tg3_nvram_read(tp, 0, &val))
12843 if (val == TG3_EEPROM_MAGIC)
12844 tg3_read_bc_ver(tp);
12845 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12846 tg3_read_sb_ver(tp, val);
12847 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12848 tg3_read_hwsb_ver(tp);
12852 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12853 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
12856 tg3_read_mgmtfw_ver(tp);
12859 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12862 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12864 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
12866 #if TG3_VLAN_TAG_USED
12867 dev->vlan_features |= flags;
12871 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
12873 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12874 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12876 else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
12877 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12883 static int __devinit tg3_get_invariants(struct tg3 *tp)
12885 static struct pci_device_id write_reorder_chipsets[] = {
12886 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12887 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12888 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12889 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12890 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12891 PCI_DEVICE_ID_VIA_8385_0) },
12895 u32 pci_state_reg, grc_misc_cfg;
12900 /* Force memory write invalidate off. If we leave it on,
12901 * then on 5700_BX chips we have to enable a workaround.
12902 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12903 * to match the cacheline size. The Broadcom driver have this
12904 * workaround but turns MWI off all the times so never uses
12905 * it. This seems to suggest that the workaround is insufficient.
12907 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12908 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12909 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12911 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12912 * has the register indirect write enable bit set before
12913 * we try to access any of the MMIO registers. It is also
12914 * critical that the PCI-X hw workaround situation is decided
12915 * before that as well.
12917 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12920 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12921 MISC_HOST_CTRL_CHIPREV_SHIFT);
12922 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12923 u32 prod_id_asic_rev;
12925 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12926 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12927 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
12928 pci_read_config_dword(tp->pdev,
12929 TG3PCI_GEN2_PRODID_ASICREV,
12930 &prod_id_asic_rev);
12931 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
12932 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
12933 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
12934 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
12935 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
12936 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12937 pci_read_config_dword(tp->pdev,
12938 TG3PCI_GEN15_PRODID_ASICREV,
12939 &prod_id_asic_rev);
12941 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12942 &prod_id_asic_rev);
12944 tp->pci_chip_rev_id = prod_id_asic_rev;
12947 /* Wrong chip ID in 5752 A0. This code can be removed later
12948 * as A0 is not in production.
12950 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12951 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12953 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12954 * we need to disable memory and use config. cycles
12955 * only to access all registers. The 5702/03 chips
12956 * can mistakenly decode the special cycles from the
12957 * ICH chipsets as memory write cycles, causing corruption
12958 * of register and memory space. Only certain ICH bridges
12959 * will drive special cycles with non-zero data during the
12960 * address phase which can fall within the 5703's address
12961 * range. This is not an ICH bug as the PCI spec allows
12962 * non-zero address during special cycles. However, only
12963 * these ICH bridges are known to drive non-zero addresses
12964 * during special cycles.
12966 * Since special cycles do not cross PCI bridges, we only
12967 * enable this workaround if the 5703 is on the secondary
12968 * bus of these ICH bridges.
12970 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12971 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12972 static struct tg3_dev_id {
12976 } ich_chipsets[] = {
12977 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12979 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12981 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12983 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12987 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12988 struct pci_dev *bridge = NULL;
12990 while (pci_id->vendor != 0) {
12991 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12997 if (pci_id->rev != PCI_ANY_ID) {
12998 if (bridge->revision > pci_id->rev)
13001 if (bridge->subordinate &&
13002 (bridge->subordinate->number ==
13003 tp->pdev->bus->number)) {
13005 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
13006 pci_dev_put(bridge);
13012 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13013 static struct tg3_dev_id {
13016 } bridge_chipsets[] = {
13017 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13018 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13021 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13022 struct pci_dev *bridge = NULL;
13024 while (pci_id->vendor != 0) {
13025 bridge = pci_get_device(pci_id->vendor,
13032 if (bridge->subordinate &&
13033 (bridge->subordinate->number <=
13034 tp->pdev->bus->number) &&
13035 (bridge->subordinate->subordinate >=
13036 tp->pdev->bus->number)) {
13037 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
13038 pci_dev_put(bridge);
13044 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13045 * DMA addresses > 40-bit. This bridge may have other additional
13046 * 57xx devices behind it in some 4-port NIC designs for example.
13047 * Any tg3 device found behind the bridge will also need the 40-bit
13050 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13052 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
13053 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13054 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13056 struct pci_dev *bridge = NULL;
13059 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13060 PCI_DEVICE_ID_SERVERWORKS_EPB,
13062 if (bridge && bridge->subordinate &&
13063 (bridge->subordinate->number <=
13064 tp->pdev->bus->number) &&
13065 (bridge->subordinate->subordinate >=
13066 tp->pdev->bus->number)) {
13067 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13068 pci_dev_put(bridge);
13074 /* Initialize misc host control in PCI block. */
13075 tp->misc_host_ctrl |= (misc_ctrl_reg &
13076 MISC_HOST_CTRL_CHIPREV);
13077 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13078 tp->misc_host_ctrl);
13080 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13081 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13083 tp->pdev_peer = tg3_find_peer(tp);
13085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13086 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13087 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13088 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
13090 /* Intentionally exclude ASIC_REV_5906 */
13091 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13092 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13095 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13096 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13097 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13098 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13101 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13102 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13103 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13104 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13105 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
13107 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13108 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
13109 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13111 /* 5700 B0 chips do not support checksumming correctly due
13112 * to hardware bugs.
13114 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
13115 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
13117 unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
13119 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13120 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13121 features |= NETIF_F_IPV6_CSUM;
13122 tp->dev->features |= features;
13123 vlan_features_add(tp->dev, features);
13126 /* Determine TSO capabilities */
13127 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13128 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13129 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13131 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13132 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13133 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13135 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13136 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13137 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13138 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13139 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13140 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13141 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13142 tp->fw_needed = FIRMWARE_TG3TSO5;
13144 tp->fw_needed = FIRMWARE_TG3TSO;
13149 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13150 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13151 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13152 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13153 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13154 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13155 tp->pdev_peer == tp->pdev))
13156 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13158 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13159 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13160 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13163 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13164 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13165 tp->irq_max = TG3_IRQ_MAX_VECS;
13169 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13170 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13172 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13173 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13174 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13175 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13178 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13179 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13181 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13182 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13183 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13184 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13186 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13189 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13190 if (tp->pcie_cap != 0) {
13193 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13195 pcie_set_readrq(tp->pdev, 4096);
13197 pci_read_config_word(tp->pdev,
13198 tp->pcie_cap + PCI_EXP_LNKCTL,
13200 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13201 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13202 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13204 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13205 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13206 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13207 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13208 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13209 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13211 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13212 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13213 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13214 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13215 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13216 if (!tp->pcix_cap) {
13217 dev_err(&tp->pdev->dev,
13218 "Cannot find PCI-X capability, aborting\n");
13222 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13223 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13226 /* If we have an AMD 762 or VIA K8T800 chipset, write
13227 * reordering to the mailbox registers done by the host
13228 * controller can cause major troubles. We read back from
13229 * every mailbox register write to force the writes to be
13230 * posted to the chip in order.
13232 if (pci_dev_present(write_reorder_chipsets) &&
13233 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13234 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13236 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13237 &tp->pci_cacheline_sz);
13238 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13239 &tp->pci_lat_timer);
13240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13241 tp->pci_lat_timer < 64) {
13242 tp->pci_lat_timer = 64;
13243 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13244 tp->pci_lat_timer);
13247 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13248 /* 5700 BX chips need to have their TX producer index
13249 * mailboxes written twice to workaround a bug.
13251 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13253 /* If we are in PCI-X mode, enable register write workaround.
13255 * The workaround is to use indirect register accesses
13256 * for all chip writes not to mailbox registers.
13258 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13261 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13263 /* The chip can have it's power management PCI config
13264 * space registers clobbered due to this bug.
13265 * So explicitly force the chip into D0 here.
13267 pci_read_config_dword(tp->pdev,
13268 tp->pm_cap + PCI_PM_CTRL,
13270 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13271 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13272 pci_write_config_dword(tp->pdev,
13273 tp->pm_cap + PCI_PM_CTRL,
13276 /* Also, force SERR#/PERR# in PCI command. */
13277 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13278 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13279 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13283 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13284 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13285 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13286 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13288 /* Chip-specific fixup from Broadcom driver */
13289 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13290 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13291 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13292 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13295 /* Default fast path register access methods */
13296 tp->read32 = tg3_read32;
13297 tp->write32 = tg3_write32;
13298 tp->read32_mbox = tg3_read32;
13299 tp->write32_mbox = tg3_write32;
13300 tp->write32_tx_mbox = tg3_write32;
13301 tp->write32_rx_mbox = tg3_write32;
13303 /* Various workaround register access methods */
13304 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13305 tp->write32 = tg3_write_indirect_reg32;
13306 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13307 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13308 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13310 * Back to back register writes can cause problems on these
13311 * chips, the workaround is to read back all reg writes
13312 * except those to mailbox regs.
13314 * See tg3_write_indirect_reg32().
13316 tp->write32 = tg3_write_flush_reg32;
13319 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13320 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13321 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13322 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13323 tp->write32_rx_mbox = tg3_write_flush_reg32;
13326 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13327 tp->read32 = tg3_read_indirect_reg32;
13328 tp->write32 = tg3_write_indirect_reg32;
13329 tp->read32_mbox = tg3_read_indirect_mbox;
13330 tp->write32_mbox = tg3_write_indirect_mbox;
13331 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13332 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13337 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13338 pci_cmd &= ~PCI_COMMAND_MEMORY;
13339 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13342 tp->read32_mbox = tg3_read32_mbox_5906;
13343 tp->write32_mbox = tg3_write32_mbox_5906;
13344 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13345 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13348 if (tp->write32 == tg3_write_indirect_reg32 ||
13349 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13350 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13351 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13352 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13354 /* Get eeprom hw config before calling tg3_set_power_state().
13355 * In particular, the TG3_FLG2_IS_NIC flag must be
13356 * determined before calling tg3_set_power_state() so that
13357 * we know whether or not to switch out of Vaux power.
13358 * When the flag is set, it means that GPIO1 is used for eeprom
13359 * write protect and also implies that it is a LOM where GPIOs
13360 * are not used to switch power.
13362 tg3_get_eeprom_hw_cfg(tp);
13364 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13365 /* Allow reads and writes to the
13366 * APE register and memory space.
13368 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13369 PCISTATE_ALLOW_APE_SHMEM_WR |
13370 PCISTATE_ALLOW_APE_PSPACE_WR;
13371 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13375 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13376 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13377 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13378 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13379 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13380 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13382 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
13383 * GPIO1 driven high will bring 5700's external PHY out of reset.
13384 * It is also used as eeprom write protect on LOMs.
13386 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13387 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13388 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13389 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13390 GRC_LCLCTRL_GPIO_OUTPUT1);
13391 /* Unused GPIO3 must be driven as output on 5752 because there
13392 * are no pull-up resistors on unused GPIO pins.
13394 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13395 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13398 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13399 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13400 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13402 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13403 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13404 /* Turn off the debug UART. */
13405 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13406 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13407 /* Keep VMain power. */
13408 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13409 GRC_LCLCTRL_GPIO_OUTPUT0;
13412 /* Force the chip into D0. */
13413 err = tg3_set_power_state(tp, PCI_D0);
13415 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13419 /* Derive initial jumbo mode from MTU assigned in
13420 * ether_setup() via the alloc_etherdev() call
13422 if (tp->dev->mtu > ETH_DATA_LEN &&
13423 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13424 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13426 /* Determine WakeOnLan speed to use. */
13427 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13428 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13429 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13430 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13431 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13433 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13437 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13439 /* A few boards don't want Ethernet@WireSpeed phy feature */
13440 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13441 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13442 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13443 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13444 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13445 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13446 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13448 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13449 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13450 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13451 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13452 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13454 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13455 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13456 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13457 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13458 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
13459 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13460 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13461 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13462 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13463 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13464 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13465 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13466 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13467 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13469 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13472 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13473 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13474 tp->phy_otp = tg3_read_otp_phycfg(tp);
13475 if (tp->phy_otp == 0)
13476 tp->phy_otp = TG3_OTP_DEFAULT;
13479 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13480 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13482 tp->mi_mode = MAC_MI_MODE_BASE;
13484 tp->coalesce_mode = 0;
13485 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13486 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13487 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13491 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13493 err = tg3_mdio_init(tp);
13497 /* Initialize data/descriptor byte/word swapping. */
13498 val = tr32(GRC_MODE);
13499 val &= GRC_MODE_HOST_STACKUP;
13500 tw32(GRC_MODE, val | tp->grc_mode);
13502 tg3_switch_clocks(tp);
13504 /* Clear this out for sanity. */
13505 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13507 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13509 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13510 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13511 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13513 if (chiprevid == CHIPREV_ID_5701_A0 ||
13514 chiprevid == CHIPREV_ID_5701_B0 ||
13515 chiprevid == CHIPREV_ID_5701_B2 ||
13516 chiprevid == CHIPREV_ID_5701_B5) {
13517 void __iomem *sram_base;
13519 /* Write some dummy words into the SRAM status block
13520 * area, see if it reads back correctly. If the return
13521 * value is bad, force enable the PCIX workaround.
13523 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13525 writel(0x00000000, sram_base);
13526 writel(0x00000000, sram_base + 4);
13527 writel(0xffffffff, sram_base + 4);
13528 if (readl(sram_base) != 0x00000000)
13529 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13534 tg3_nvram_init(tp);
13536 grc_misc_cfg = tr32(GRC_MISC_CFG);
13537 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13539 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13540 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13541 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13542 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13544 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13545 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13546 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13547 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13548 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13549 HOSTCC_MODE_CLRTICK_TXBD);
13551 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13552 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13553 tp->misc_host_ctrl);
13556 /* Preserve the APE MAC_MODE bits */
13557 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13558 tp->mac_mode = tr32(MAC_MODE) |
13559 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13561 tp->mac_mode = TG3_DEF_MAC_MODE;
13563 /* these are limited to 10/100 only */
13564 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13565 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13566 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13567 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13568 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13569 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13570 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13571 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13572 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13573 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13574 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13575 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13576 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13577 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13578 (tp->phy_flags & TG3_PHYFLG_IS_FET))
13579 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
13581 err = tg3_phy_probe(tp);
13583 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
13584 /* ... but do not return immediately ... */
13589 tg3_read_fw_ver(tp);
13591 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
13592 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13595 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13597 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13600 /* 5700 {AX,BX} chips have a broken status block link
13601 * change bit implementation, so we must use the
13602 * status register in those cases.
13604 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13605 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13607 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13609 /* The led_ctrl is set during tg3_phy_probe, here we might
13610 * have to force the link status polling mechanism based
13611 * upon subsystem IDs.
13613 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13614 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13615 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
13616 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13617 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13620 /* For all SERDES we poll the MAC status register. */
13621 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13622 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13624 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13626 tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
13627 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13628 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13629 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13630 tp->rx_offset -= NET_IP_ALIGN;
13631 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13632 tp->rx_copy_thresh = ~(u16)0;
13636 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
13637 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
13638 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
13640 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
13642 /* Increment the rx prod index on the rx std ring by at most
13643 * 8 for these chips to workaround hw errata.
13645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13646 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13647 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13648 tp->rx_std_max_post = 8;
13650 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13651 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13652 PCIE_PWR_MGMT_L1_THRESH_MSK;
13657 #ifdef CONFIG_SPARC
13658 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13660 struct net_device *dev = tp->dev;
13661 struct pci_dev *pdev = tp->pdev;
13662 struct device_node *dp = pci_device_to_OF_node(pdev);
13663 const unsigned char *addr;
13666 addr = of_get_property(dp, "local-mac-address", &len);
13667 if (addr && len == 6) {
13668 memcpy(dev->dev_addr, addr, 6);
13669 memcpy(dev->perm_addr, dev->dev_addr, 6);
13675 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13677 struct net_device *dev = tp->dev;
13679 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13680 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13685 static int __devinit tg3_get_device_address(struct tg3 *tp)
13687 struct net_device *dev = tp->dev;
13688 u32 hi, lo, mac_offset;
13691 #ifdef CONFIG_SPARC
13692 if (!tg3_get_macaddr_sparc(tp))
13697 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13698 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13699 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13701 if (tg3_nvram_lock(tp))
13702 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13704 tg3_nvram_unlock(tp);
13705 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13707 if (PCI_FUNC(tp->pdev->devfn) & 1)
13709 if (PCI_FUNC(tp->pdev->devfn) > 1)
13710 mac_offset += 0x18c;
13711 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13714 /* First try to get it from MAC address mailbox. */
13715 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13716 if ((hi >> 16) == 0x484b) {
13717 dev->dev_addr[0] = (hi >> 8) & 0xff;
13718 dev->dev_addr[1] = (hi >> 0) & 0xff;
13720 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13721 dev->dev_addr[2] = (lo >> 24) & 0xff;
13722 dev->dev_addr[3] = (lo >> 16) & 0xff;
13723 dev->dev_addr[4] = (lo >> 8) & 0xff;
13724 dev->dev_addr[5] = (lo >> 0) & 0xff;
13726 /* Some old bootcode may report a 0 MAC address in SRAM */
13727 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13730 /* Next, try NVRAM. */
13731 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13732 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13733 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13734 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13735 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13737 /* Finally just fetch it out of the MAC control regs. */
13739 hi = tr32(MAC_ADDR_0_HIGH);
13740 lo = tr32(MAC_ADDR_0_LOW);
13742 dev->dev_addr[5] = lo & 0xff;
13743 dev->dev_addr[4] = (lo >> 8) & 0xff;
13744 dev->dev_addr[3] = (lo >> 16) & 0xff;
13745 dev->dev_addr[2] = (lo >> 24) & 0xff;
13746 dev->dev_addr[1] = hi & 0xff;
13747 dev->dev_addr[0] = (hi >> 8) & 0xff;
13751 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13752 #ifdef CONFIG_SPARC
13753 if (!tg3_get_default_macaddr_sparc(tp))
13758 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13762 #define BOUNDARY_SINGLE_CACHELINE 1
13763 #define BOUNDARY_MULTI_CACHELINE 2
13765 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13767 int cacheline_size;
13771 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13773 cacheline_size = 1024;
13775 cacheline_size = (int) byte * 4;
13777 /* On 5703 and later chips, the boundary bits have no
13780 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13781 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13782 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13785 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13786 goal = BOUNDARY_MULTI_CACHELINE;
13788 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13789 goal = BOUNDARY_SINGLE_CACHELINE;
13795 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13796 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13803 /* PCI controllers on most RISC systems tend to disconnect
13804 * when a device tries to burst across a cache-line boundary.
13805 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13807 * Unfortunately, for PCI-E there are only limited
13808 * write-side controls for this, and thus for reads
13809 * we will still get the disconnects. We'll also waste
13810 * these PCI cycles for both read and write for chips
13811 * other than 5700 and 5701 which do not implement the
13814 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13815 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13816 switch (cacheline_size) {
13821 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13822 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13823 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13825 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13826 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13831 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13832 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13836 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13837 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13840 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13841 switch (cacheline_size) {
13845 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13846 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13847 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13853 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13854 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13858 switch (cacheline_size) {
13860 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13861 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13862 DMA_RWCTRL_WRITE_BNDRY_16);
13867 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13868 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13869 DMA_RWCTRL_WRITE_BNDRY_32);
13874 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13875 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13876 DMA_RWCTRL_WRITE_BNDRY_64);
13881 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13882 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13883 DMA_RWCTRL_WRITE_BNDRY_128);
13888 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13889 DMA_RWCTRL_WRITE_BNDRY_256);
13892 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13893 DMA_RWCTRL_WRITE_BNDRY_512);
13897 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13898 DMA_RWCTRL_WRITE_BNDRY_1024);
13907 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13909 struct tg3_internal_buffer_desc test_desc;
13910 u32 sram_dma_descs;
13913 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13915 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13916 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13917 tw32(RDMAC_STATUS, 0);
13918 tw32(WDMAC_STATUS, 0);
13920 tw32(BUFMGR_MODE, 0);
13921 tw32(FTQ_RESET, 0);
13923 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13924 test_desc.addr_lo = buf_dma & 0xffffffff;
13925 test_desc.nic_mbuf = 0x00002100;
13926 test_desc.len = size;
13929 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13930 * the *second* time the tg3 driver was getting loaded after an
13933 * Broadcom tells me:
13934 * ...the DMA engine is connected to the GRC block and a DMA
13935 * reset may affect the GRC block in some unpredictable way...
13936 * The behavior of resets to individual blocks has not been tested.
13938 * Broadcom noted the GRC reset will also reset all sub-components.
13941 test_desc.cqid_sqid = (13 << 8) | 2;
13943 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13946 test_desc.cqid_sqid = (16 << 8) | 7;
13948 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13951 test_desc.flags = 0x00000005;
13953 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13956 val = *(((u32 *)&test_desc) + i);
13957 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13958 sram_dma_descs + (i * sizeof(u32)));
13959 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13961 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13964 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13966 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13969 for (i = 0; i < 40; i++) {
13973 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13975 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13976 if ((val & 0xffff) == sram_dma_descs) {
13987 #define TEST_BUFFER_SIZE 0x2000
13989 static int __devinit tg3_test_dma(struct tg3 *tp)
13991 dma_addr_t buf_dma;
13992 u32 *buf, saved_dma_rwctrl;
13995 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
14001 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14002 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14004 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14006 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
14009 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14010 /* DMA read watermark not used on PCIE */
14011 tp->dma_rwctrl |= 0x00180000;
14012 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
14013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14015 tp->dma_rwctrl |= 0x003f0000;
14017 tp->dma_rwctrl |= 0x003f000f;
14019 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14020 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14021 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14022 u32 read_water = 0x7;
14024 /* If the 5704 is behind the EPB bridge, we can
14025 * do the less restrictive ONE_DMA workaround for
14026 * better performance.
14028 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
14029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14030 tp->dma_rwctrl |= 0x8000;
14031 else if (ccval == 0x6 || ccval == 0x7)
14032 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14036 /* Set bit 23 to enable PCIX hw bug fix */
14038 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14039 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14041 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14042 /* 5780 always in PCIX mode */
14043 tp->dma_rwctrl |= 0x00144000;
14044 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14045 /* 5714 always in PCIX mode */
14046 tp->dma_rwctrl |= 0x00148000;
14048 tp->dma_rwctrl |= 0x001b000f;
14052 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14053 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14054 tp->dma_rwctrl &= 0xfffffff0;
14056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14058 /* Remove this if it causes problems for some boards. */
14059 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14061 /* On 5700/5701 chips, we need to set this bit.
14062 * Otherwise the chip will issue cacheline transactions
14063 * to streamable DMA memory with not all the byte
14064 * enables turned on. This is an error on several
14065 * RISC PCI controllers, in particular sparc64.
14067 * On 5703/5704 chips, this bit has been reassigned
14068 * a different meaning. In particular, it is used
14069 * on those chips to enable a PCI-X workaround.
14071 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14074 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14077 /* Unneeded, already done by tg3_get_invariants. */
14078 tg3_switch_clocks(tp);
14081 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14082 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14085 /* It is best to perform DMA test with maximum write burst size
14086 * to expose the 5700/5701 write DMA bug.
14088 saved_dma_rwctrl = tp->dma_rwctrl;
14089 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14090 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14095 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14098 /* Send the buffer to the chip. */
14099 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14101 dev_err(&tp->pdev->dev,
14102 "%s: Buffer write failed. err = %d\n",
14108 /* validate data reached card RAM correctly. */
14109 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14111 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14112 if (le32_to_cpu(val) != p[i]) {
14113 dev_err(&tp->pdev->dev,
14114 "%s: Buffer corrupted on device! "
14115 "(%d != %d)\n", __func__, val, i);
14116 /* ret = -ENODEV here? */
14121 /* Now read it back. */
14122 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14124 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14125 "err = %d\n", __func__, ret);
14130 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14134 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14135 DMA_RWCTRL_WRITE_BNDRY_16) {
14136 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14137 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14138 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14141 dev_err(&tp->pdev->dev,
14142 "%s: Buffer corrupted on read back! "
14143 "(%d != %d)\n", __func__, p[i], i);
14149 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14155 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14156 DMA_RWCTRL_WRITE_BNDRY_16) {
14157 static struct pci_device_id dma_wait_state_chipsets[] = {
14158 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
14159 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14163 /* DMA test passed without adjusting DMA boundary,
14164 * now look for chipsets that are known to expose the
14165 * DMA bug without failing the test.
14167 if (pci_dev_present(dma_wait_state_chipsets)) {
14168 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14169 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14171 /* Safe to use the calculated DMA boundary. */
14172 tp->dma_rwctrl = saved_dma_rwctrl;
14175 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14179 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
14184 static void __devinit tg3_init_link_config(struct tg3 *tp)
14186 tp->link_config.advertising =
14187 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
14188 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
14189 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
14190 ADVERTISED_Autoneg | ADVERTISED_MII);
14191 tp->link_config.speed = SPEED_INVALID;
14192 tp->link_config.duplex = DUPLEX_INVALID;
14193 tp->link_config.autoneg = AUTONEG_ENABLE;
14194 tp->link_config.active_speed = SPEED_INVALID;
14195 tp->link_config.active_duplex = DUPLEX_INVALID;
14196 tp->link_config.orig_speed = SPEED_INVALID;
14197 tp->link_config.orig_duplex = DUPLEX_INVALID;
14198 tp->link_config.orig_autoneg = AUTONEG_INVALID;
14201 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14203 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
14204 tp->bufmgr_config.mbuf_read_dma_low_water =
14205 DEFAULT_MB_RDMA_LOW_WATER_5705;
14206 tp->bufmgr_config.mbuf_mac_rx_low_water =
14207 DEFAULT_MB_MACRX_LOW_WATER_57765;
14208 tp->bufmgr_config.mbuf_high_water =
14209 DEFAULT_MB_HIGH_WATER_57765;
14211 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14212 DEFAULT_MB_RDMA_LOW_WATER_5705;
14213 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14214 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14215 tp->bufmgr_config.mbuf_high_water_jumbo =
14216 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14217 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14218 tp->bufmgr_config.mbuf_read_dma_low_water =
14219 DEFAULT_MB_RDMA_LOW_WATER_5705;
14220 tp->bufmgr_config.mbuf_mac_rx_low_water =
14221 DEFAULT_MB_MACRX_LOW_WATER_5705;
14222 tp->bufmgr_config.mbuf_high_water =
14223 DEFAULT_MB_HIGH_WATER_5705;
14224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14225 tp->bufmgr_config.mbuf_mac_rx_low_water =
14226 DEFAULT_MB_MACRX_LOW_WATER_5906;
14227 tp->bufmgr_config.mbuf_high_water =
14228 DEFAULT_MB_HIGH_WATER_5906;
14231 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14232 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14233 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14234 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14235 tp->bufmgr_config.mbuf_high_water_jumbo =
14236 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14238 tp->bufmgr_config.mbuf_read_dma_low_water =
14239 DEFAULT_MB_RDMA_LOW_WATER;
14240 tp->bufmgr_config.mbuf_mac_rx_low_water =
14241 DEFAULT_MB_MACRX_LOW_WATER;
14242 tp->bufmgr_config.mbuf_high_water =
14243 DEFAULT_MB_HIGH_WATER;
14245 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14246 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14247 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14248 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14249 tp->bufmgr_config.mbuf_high_water_jumbo =
14250 DEFAULT_MB_HIGH_WATER_JUMBO;
14253 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14254 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14257 static char * __devinit tg3_phy_string(struct tg3 *tp)
14259 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14260 case TG3_PHY_ID_BCM5400: return "5400";
14261 case TG3_PHY_ID_BCM5401: return "5401";
14262 case TG3_PHY_ID_BCM5411: return "5411";
14263 case TG3_PHY_ID_BCM5701: return "5701";
14264 case TG3_PHY_ID_BCM5703: return "5703";
14265 case TG3_PHY_ID_BCM5704: return "5704";
14266 case TG3_PHY_ID_BCM5705: return "5705";
14267 case TG3_PHY_ID_BCM5750: return "5750";
14268 case TG3_PHY_ID_BCM5752: return "5752";
14269 case TG3_PHY_ID_BCM5714: return "5714";
14270 case TG3_PHY_ID_BCM5780: return "5780";
14271 case TG3_PHY_ID_BCM5755: return "5755";
14272 case TG3_PHY_ID_BCM5787: return "5787";
14273 case TG3_PHY_ID_BCM5784: return "5784";
14274 case TG3_PHY_ID_BCM5756: return "5722/5756";
14275 case TG3_PHY_ID_BCM5906: return "5906";
14276 case TG3_PHY_ID_BCM5761: return "5761";
14277 case TG3_PHY_ID_BCM5718C: return "5718C";
14278 case TG3_PHY_ID_BCM5718S: return "5718S";
14279 case TG3_PHY_ID_BCM57765: return "57765";
14280 case TG3_PHY_ID_BCM5719C: return "5719C";
14281 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14282 case 0: return "serdes";
14283 default: return "unknown";
14287 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14289 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14290 strcpy(str, "PCI Express");
14292 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14293 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14295 strcpy(str, "PCIX:");
14297 if ((clock_ctrl == 7) ||
14298 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14299 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14300 strcat(str, "133MHz");
14301 else if (clock_ctrl == 0)
14302 strcat(str, "33MHz");
14303 else if (clock_ctrl == 2)
14304 strcat(str, "50MHz");
14305 else if (clock_ctrl == 4)
14306 strcat(str, "66MHz");
14307 else if (clock_ctrl == 6)
14308 strcat(str, "100MHz");
14310 strcpy(str, "PCI:");
14311 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14312 strcat(str, "66MHz");
14314 strcat(str, "33MHz");
14316 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14317 strcat(str, ":32-bit");
14319 strcat(str, ":64-bit");
14323 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14325 struct pci_dev *peer;
14326 unsigned int func, devnr = tp->pdev->devfn & ~7;
14328 for (func = 0; func < 8; func++) {
14329 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14330 if (peer && peer != tp->pdev)
14334 /* 5704 can be configured in single-port mode, set peer to
14335 * tp->pdev in that case.
14343 * We don't need to keep the refcount elevated; there's no way
14344 * to remove one half of this device without removing the other
14351 static void __devinit tg3_init_coal(struct tg3 *tp)
14353 struct ethtool_coalesce *ec = &tp->coal;
14355 memset(ec, 0, sizeof(*ec));
14356 ec->cmd = ETHTOOL_GCOALESCE;
14357 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14358 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14359 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14360 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14361 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14362 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14363 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14364 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14365 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14367 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14368 HOSTCC_MODE_CLRTICK_TXBD)) {
14369 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14370 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14371 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14372 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14375 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14376 ec->rx_coalesce_usecs_irq = 0;
14377 ec->tx_coalesce_usecs_irq = 0;
14378 ec->stats_block_coalesce_usecs = 0;
14382 static const struct net_device_ops tg3_netdev_ops = {
14383 .ndo_open = tg3_open,
14384 .ndo_stop = tg3_close,
14385 .ndo_start_xmit = tg3_start_xmit,
14386 .ndo_get_stats64 = tg3_get_stats64,
14387 .ndo_validate_addr = eth_validate_addr,
14388 .ndo_set_multicast_list = tg3_set_rx_mode,
14389 .ndo_set_mac_address = tg3_set_mac_addr,
14390 .ndo_do_ioctl = tg3_ioctl,
14391 .ndo_tx_timeout = tg3_tx_timeout,
14392 .ndo_change_mtu = tg3_change_mtu,
14393 #if TG3_VLAN_TAG_USED
14394 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14396 #ifdef CONFIG_NET_POLL_CONTROLLER
14397 .ndo_poll_controller = tg3_poll_controller,
14401 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14402 .ndo_open = tg3_open,
14403 .ndo_stop = tg3_close,
14404 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14405 .ndo_get_stats64 = tg3_get_stats64,
14406 .ndo_validate_addr = eth_validate_addr,
14407 .ndo_set_multicast_list = tg3_set_rx_mode,
14408 .ndo_set_mac_address = tg3_set_mac_addr,
14409 .ndo_do_ioctl = tg3_ioctl,
14410 .ndo_tx_timeout = tg3_tx_timeout,
14411 .ndo_change_mtu = tg3_change_mtu,
14412 #if TG3_VLAN_TAG_USED
14413 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14415 #ifdef CONFIG_NET_POLL_CONTROLLER
14416 .ndo_poll_controller = tg3_poll_controller,
14420 static int __devinit tg3_init_one(struct pci_dev *pdev,
14421 const struct pci_device_id *ent)
14423 struct net_device *dev;
14425 int i, err, pm_cap;
14426 u32 sndmbx, rcvmbx, intmbx;
14428 u64 dma_mask, persist_dma_mask;
14430 printk_once(KERN_INFO "%s\n", version);
14432 err = pci_enable_device(pdev);
14434 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14438 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14440 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14441 goto err_out_disable_pdev;
14444 pci_set_master(pdev);
14446 /* Find power-management capability. */
14447 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14449 dev_err(&pdev->dev,
14450 "Cannot find Power Management capability, aborting\n");
14452 goto err_out_free_res;
14455 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14457 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14459 goto err_out_free_res;
14462 SET_NETDEV_DEV(dev, &pdev->dev);
14464 #if TG3_VLAN_TAG_USED
14465 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14468 tp = netdev_priv(dev);
14471 tp->pm_cap = pm_cap;
14472 tp->rx_mode = TG3_DEF_RX_MODE;
14473 tp->tx_mode = TG3_DEF_TX_MODE;
14476 tp->msg_enable = tg3_debug;
14478 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14480 /* The word/byte swap controls here control register access byte
14481 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14484 tp->misc_host_ctrl =
14485 MISC_HOST_CTRL_MASK_PCI_INT |
14486 MISC_HOST_CTRL_WORD_SWAP |
14487 MISC_HOST_CTRL_INDIR_ACCESS |
14488 MISC_HOST_CTRL_PCISTATE_RW;
14490 /* The NONFRM (non-frame) byte/word swap controls take effect
14491 * on descriptor entries, anything which isn't packet data.
14493 * The StrongARM chips on the board (one for tx, one for rx)
14494 * are running in big-endian mode.
14496 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14497 GRC_MODE_WSWAP_NONFRM_DATA);
14498 #ifdef __BIG_ENDIAN
14499 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14501 spin_lock_init(&tp->lock);
14502 spin_lock_init(&tp->indirect_lock);
14503 INIT_WORK(&tp->reset_task, tg3_reset_task);
14505 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14507 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14509 goto err_out_free_dev;
14512 tg3_init_link_config(tp);
14514 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14515 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14517 dev->ethtool_ops = &tg3_ethtool_ops;
14518 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14519 dev->irq = pdev->irq;
14521 err = tg3_get_invariants(tp);
14523 dev_err(&pdev->dev,
14524 "Problem fetching invariants of chip, aborting\n");
14525 goto err_out_iounmap;
14528 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14529 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
14530 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14531 dev->netdev_ops = &tg3_netdev_ops;
14533 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14536 /* The EPB bridge inside 5714, 5715, and 5780 and any
14537 * device behind the EPB cannot support DMA addresses > 40-bit.
14538 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14539 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14540 * do DMA address check in tg3_start_xmit().
14542 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14543 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14544 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14545 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14546 #ifdef CONFIG_HIGHMEM
14547 dma_mask = DMA_BIT_MASK(64);
14550 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14552 /* Configure DMA attributes. */
14553 if (dma_mask > DMA_BIT_MASK(32)) {
14554 err = pci_set_dma_mask(pdev, dma_mask);
14556 dev->features |= NETIF_F_HIGHDMA;
14557 err = pci_set_consistent_dma_mask(pdev,
14560 dev_err(&pdev->dev, "Unable to obtain 64 bit "
14561 "DMA for consistent allocations\n");
14562 goto err_out_iounmap;
14566 if (err || dma_mask == DMA_BIT_MASK(32)) {
14567 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14569 dev_err(&pdev->dev,
14570 "No usable DMA configuration, aborting\n");
14571 goto err_out_iounmap;
14575 tg3_init_bufmgr_config(tp);
14577 /* Selectively allow TSO based on operating conditions */
14578 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14579 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14580 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14582 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14583 tp->fw_needed = NULL;
14586 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14587 tp->fw_needed = FIRMWARE_TG3;
14589 /* TSO is on by default on chips that support hardware TSO.
14590 * Firmware TSO on older chips gives lower performance, so it
14591 * is off by default, but can be enabled using ethtool.
14593 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14594 (dev->features & NETIF_F_IP_CSUM)) {
14595 dev->features |= NETIF_F_TSO;
14596 vlan_features_add(dev, NETIF_F_TSO);
14598 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14599 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14600 if (dev->features & NETIF_F_IPV6_CSUM) {
14601 dev->features |= NETIF_F_TSO6;
14602 vlan_features_add(dev, NETIF_F_TSO6);
14604 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14605 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14606 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14607 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14608 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14609 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14610 dev->features |= NETIF_F_TSO_ECN;
14611 vlan_features_add(dev, NETIF_F_TSO_ECN);
14615 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14616 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14617 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14618 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14619 tp->rx_pending = 63;
14622 err = tg3_get_device_address(tp);
14624 dev_err(&pdev->dev,
14625 "Could not obtain valid ethernet address, aborting\n");
14626 goto err_out_iounmap;
14629 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14630 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14631 if (!tp->aperegs) {
14632 dev_err(&pdev->dev,
14633 "Cannot map APE registers, aborting\n");
14635 goto err_out_iounmap;
14638 tg3_ape_lock_init(tp);
14640 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14641 tg3_read_dash_ver(tp);
14645 * Reset chip in case UNDI or EFI driver did not shutdown
14646 * DMA self test will enable WDMAC and we'll see (spurious)
14647 * pending DMA on the PCI bus at that point.
14649 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14650 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14651 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14652 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14655 err = tg3_test_dma(tp);
14657 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
14658 goto err_out_apeunmap;
14661 /* flow control autonegotiation is default behavior */
14662 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14663 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14665 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14666 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14667 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14668 for (i = 0; i < tp->irq_max; i++) {
14669 struct tg3_napi *tnapi = &tp->napi[i];
14672 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14674 tnapi->int_mbox = intmbx;
14680 tnapi->consmbox = rcvmbx;
14681 tnapi->prodmbox = sndmbx;
14684 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14686 tnapi->coal_now = HOSTCC_MODE_NOW;
14688 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14692 * If we support MSIX, we'll be using RSS. If we're using
14693 * RSS, the first vector only handles link interrupts and the
14694 * remaining vectors handle rx and tx interrupts. Reuse the
14695 * mailbox values for the next iteration. The values we setup
14696 * above are still useful for the single vectored mode.
14711 pci_set_drvdata(pdev, dev);
14713 err = register_netdev(dev);
14715 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
14716 goto err_out_apeunmap;
14719 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14720 tp->board_part_number,
14721 tp->pci_chip_rev_id,
14722 tg3_bus_string(tp, str),
14725 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
14726 struct phy_device *phydev;
14727 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14729 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14730 phydev->drv->name, dev_name(&phydev->dev));
14734 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
14735 ethtype = "10/100Base-TX";
14736 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
14737 ethtype = "1000Base-SX";
14739 ethtype = "10/100/1000Base-T";
14741 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
14742 "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype,
14743 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0);
14746 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14747 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14748 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14749 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
14750 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14751 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14752 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14754 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
14755 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
14761 iounmap(tp->aperegs);
14762 tp->aperegs = NULL;
14775 pci_release_regions(pdev);
14777 err_out_disable_pdev:
14778 pci_disable_device(pdev);
14779 pci_set_drvdata(pdev, NULL);
14783 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14785 struct net_device *dev = pci_get_drvdata(pdev);
14788 struct tg3 *tp = netdev_priv(dev);
14791 release_firmware(tp->fw);
14793 flush_scheduled_work();
14795 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14800 unregister_netdev(dev);
14802 iounmap(tp->aperegs);
14803 tp->aperegs = NULL;
14810 pci_release_regions(pdev);
14811 pci_disable_device(pdev);
14812 pci_set_drvdata(pdev, NULL);
14816 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14818 struct net_device *dev = pci_get_drvdata(pdev);
14819 struct tg3 *tp = netdev_priv(dev);
14820 pci_power_t target_state;
14823 /* PCI register 4 needs to be saved whether netif_running() or not.
14824 * MSI address and data need to be saved if using MSI and
14827 pci_save_state(pdev);
14829 if (!netif_running(dev))
14832 flush_scheduled_work();
14834 tg3_netif_stop(tp);
14836 del_timer_sync(&tp->timer);
14838 tg3_full_lock(tp, 1);
14839 tg3_disable_ints(tp);
14840 tg3_full_unlock(tp);
14842 netif_device_detach(dev);
14844 tg3_full_lock(tp, 0);
14845 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14846 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14847 tg3_full_unlock(tp);
14849 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14851 err = tg3_set_power_state(tp, target_state);
14855 tg3_full_lock(tp, 0);
14857 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14858 err2 = tg3_restart_hw(tp, 1);
14862 tp->timer.expires = jiffies + tp->timer_offset;
14863 add_timer(&tp->timer);
14865 netif_device_attach(dev);
14866 tg3_netif_start(tp);
14869 tg3_full_unlock(tp);
14878 static int tg3_resume(struct pci_dev *pdev)
14880 struct net_device *dev = pci_get_drvdata(pdev);
14881 struct tg3 *tp = netdev_priv(dev);
14884 pci_restore_state(tp->pdev);
14886 if (!netif_running(dev))
14889 err = tg3_set_power_state(tp, PCI_D0);
14893 netif_device_attach(dev);
14895 tg3_full_lock(tp, 0);
14897 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14898 err = tg3_restart_hw(tp, 1);
14902 tp->timer.expires = jiffies + tp->timer_offset;
14903 add_timer(&tp->timer);
14905 tg3_netif_start(tp);
14908 tg3_full_unlock(tp);
14916 static struct pci_driver tg3_driver = {
14917 .name = DRV_MODULE_NAME,
14918 .id_table = tg3_pci_tbl,
14919 .probe = tg3_init_one,
14920 .remove = __devexit_p(tg3_remove_one),
14921 .suspend = tg3_suspend,
14922 .resume = tg3_resume
14925 static int __init tg3_init(void)
14927 return pci_register_driver(&tg3_driver);
14930 static void __exit tg3_cleanup(void)
14932 pci_unregister_driver(&tg3_driver);
14935 module_init(tg3_init);
14936 module_exit(tg3_cleanup);