2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2009 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.103"
72 #define DRV_MODULE_RELDATE "November 2, 2009"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
105 #define TG3_RSS_INDIR_TBL_SIZE 128
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
115 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
117 #define TG3_TX_RING_SIZE 512
118 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
120 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
123 TG3_RX_JUMBO_RING_SIZE)
124 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
125 TG3_RX_RCB_RING_SIZE(tp))
126 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
128 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130 #define TG3_DMA_BYTE_ENAB 64
132 #define TG3_RX_STD_DMA_SZ 1536
133 #define TG3_RX_JMB_DMA_SZ 9046
135 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
137 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
140 /* minimum number of free TX descriptors required to wake up TX process */
141 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
143 #define TG3_RAW_IP_ALIGN 2
145 /* number of ETHTOOL_GSTATS u64's */
146 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
148 #define TG3_NUM_TEST 6
150 #define FIRMWARE_TG3 "tigon/tg3.bin"
151 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
152 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
154 static char version[] __devinitdata =
155 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
157 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
158 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
159 MODULE_LICENSE("GPL");
160 MODULE_VERSION(DRV_MODULE_VERSION);
161 MODULE_FIRMWARE(FIRMWARE_TG3);
162 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
163 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
165 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
167 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
168 module_param(tg3_debug, int, 0);
169 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
171 static struct pci_device_id tg3_pci_tbl[] = {
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
238 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
239 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
240 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
241 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
242 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
243 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
244 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
248 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
250 static const struct {
251 const char string[ETH_GSTRING_LEN];
252 } ethtool_stats_keys[TG3_NUM_STATS] = {
255 { "rx_ucast_packets" },
256 { "rx_mcast_packets" },
257 { "rx_bcast_packets" },
259 { "rx_align_errors" },
260 { "rx_xon_pause_rcvd" },
261 { "rx_xoff_pause_rcvd" },
262 { "rx_mac_ctrl_rcvd" },
263 { "rx_xoff_entered" },
264 { "rx_frame_too_long_errors" },
266 { "rx_undersize_packets" },
267 { "rx_in_length_errors" },
268 { "rx_out_length_errors" },
269 { "rx_64_or_less_octet_packets" },
270 { "rx_65_to_127_octet_packets" },
271 { "rx_128_to_255_octet_packets" },
272 { "rx_256_to_511_octet_packets" },
273 { "rx_512_to_1023_octet_packets" },
274 { "rx_1024_to_1522_octet_packets" },
275 { "rx_1523_to_2047_octet_packets" },
276 { "rx_2048_to_4095_octet_packets" },
277 { "rx_4096_to_8191_octet_packets" },
278 { "rx_8192_to_9022_octet_packets" },
285 { "tx_flow_control" },
287 { "tx_single_collisions" },
288 { "tx_mult_collisions" },
290 { "tx_excessive_collisions" },
291 { "tx_late_collisions" },
292 { "tx_collide_2times" },
293 { "tx_collide_3times" },
294 { "tx_collide_4times" },
295 { "tx_collide_5times" },
296 { "tx_collide_6times" },
297 { "tx_collide_7times" },
298 { "tx_collide_8times" },
299 { "tx_collide_9times" },
300 { "tx_collide_10times" },
301 { "tx_collide_11times" },
302 { "tx_collide_12times" },
303 { "tx_collide_13times" },
304 { "tx_collide_14times" },
305 { "tx_collide_15times" },
306 { "tx_ucast_packets" },
307 { "tx_mcast_packets" },
308 { "tx_bcast_packets" },
309 { "tx_carrier_sense_errors" },
313 { "dma_writeq_full" },
314 { "dma_write_prioq_full" },
318 { "rx_threshold_hit" },
320 { "dma_readq_full" },
321 { "dma_read_prioq_full" },
322 { "tx_comp_queue_full" },
324 { "ring_set_send_prod_index" },
325 { "ring_status_update" },
327 { "nic_avoided_irqs" },
328 { "nic_tx_threshold_hit" }
331 static const struct {
332 const char string[ETH_GSTRING_LEN];
333 } ethtool_test_keys[TG3_NUM_TEST] = {
334 { "nvram test (online) " },
335 { "link test (online) " },
336 { "register test (offline)" },
337 { "memory test (offline)" },
338 { "loopback test (offline)" },
339 { "interrupt test (offline)" },
342 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
344 writel(val, tp->regs + off);
347 static u32 tg3_read32(struct tg3 *tp, u32 off)
349 return (readl(tp->regs + off));
352 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
354 writel(val, tp->aperegs + off);
357 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
359 return (readl(tp->aperegs + off));
362 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
366 spin_lock_irqsave(&tp->indirect_lock, flags);
367 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
368 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
369 spin_unlock_irqrestore(&tp->indirect_lock, flags);
372 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
374 writel(val, tp->regs + off);
375 readl(tp->regs + off);
378 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
383 spin_lock_irqsave(&tp->indirect_lock, flags);
384 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
385 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
386 spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
394 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
395 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
396 TG3_64BIT_REG_LOW, val);
399 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
400 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
401 TG3_64BIT_REG_LOW, val);
405 spin_lock_irqsave(&tp->indirect_lock, flags);
406 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
407 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
408 spin_unlock_irqrestore(&tp->indirect_lock, flags);
410 /* In indirect mode when disabling interrupts, we also need
411 * to clear the interrupt bit in the GRC local ctrl register.
413 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
415 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
416 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
420 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
425 spin_lock_irqsave(&tp->indirect_lock, flags);
426 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
427 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
428 spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 /* usec_wait specifies the wait time in usec when writing to certain registers
433 * where it is unsafe to read back the register without some delay.
434 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
435 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
437 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
439 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
440 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
441 /* Non-posted methods */
442 tp->write32(tp, off, val);
445 tg3_write32(tp, off, val);
450 /* Wait again after the read for the posted method to guarantee that
451 * the wait time is met.
457 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
459 tp->write32_mbox(tp, off, val);
460 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
461 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
462 tp->read32_mbox(tp, off);
465 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
467 void __iomem *mbox = tp->regs + off;
469 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
471 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
475 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
477 return (readl(tp->regs + off + GRCMBOX_BASE));
480 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
482 writel(val, tp->regs + off + GRCMBOX_BASE);
485 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
486 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
487 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
488 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
489 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
491 #define tw32(reg,val) tp->write32(tp, reg, val)
492 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
493 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
494 #define tr32(reg) tp->read32(tp, reg)
496 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
500 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
501 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
506 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
507 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
509 /* Always leave this as zero. */
510 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
513 tw32_f(TG3PCI_MEM_WIN_DATA, val);
515 /* Always leave this as zero. */
516 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
518 spin_unlock_irqrestore(&tp->indirect_lock, flags);
521 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
525 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
526 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
531 spin_lock_irqsave(&tp->indirect_lock, flags);
532 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
533 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
534 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
536 /* Always leave this as zero. */
537 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
539 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
540 *val = tr32(TG3PCI_MEM_WIN_DATA);
542 /* Always leave this as zero. */
543 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
545 spin_unlock_irqrestore(&tp->indirect_lock, flags);
548 static void tg3_ape_lock_init(struct tg3 *tp)
552 /* Make sure the driver hasn't any stale locks. */
553 for (i = 0; i < 8; i++)
554 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
555 APE_LOCK_GRANT_DRIVER);
558 static int tg3_ape_lock(struct tg3 *tp, int locknum)
564 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
568 case TG3_APE_LOCK_GRC:
569 case TG3_APE_LOCK_MEM:
577 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
579 /* Wait for up to 1 millisecond to acquire lock. */
580 for (i = 0; i < 100; i++) {
581 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
582 if (status == APE_LOCK_GRANT_DRIVER)
587 if (status != APE_LOCK_GRANT_DRIVER) {
588 /* Revoke the lock request. */
589 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
590 APE_LOCK_GRANT_DRIVER);
598 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
602 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
606 case TG3_APE_LOCK_GRC:
607 case TG3_APE_LOCK_MEM:
614 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
617 static void tg3_disable_ints(struct tg3 *tp)
621 tw32(TG3PCI_MISC_HOST_CTRL,
622 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
623 for (i = 0; i < tp->irq_max; i++)
624 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
627 static void tg3_enable_ints(struct tg3 *tp)
635 tw32(TG3PCI_MISC_HOST_CTRL,
636 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
638 for (i = 0; i < tp->irq_cnt; i++) {
639 struct tg3_napi *tnapi = &tp->napi[i];
640 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
641 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
642 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
644 coal_now |= tnapi->coal_now;
647 /* Force an initial interrupt */
648 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
649 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
650 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 HOSTCC_MODE_ENABLE | coal_now);
656 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
658 struct tg3 *tp = tnapi->tp;
659 struct tg3_hw_status *sblk = tnapi->hw_status;
660 unsigned int work_exists = 0;
662 /* check for phy events */
663 if (!(tp->tg3_flags &
664 (TG3_FLAG_USE_LINKCHG_REG |
665 TG3_FLAG_POLL_SERDES))) {
666 if (sblk->status & SD_STATUS_LINK_CHG)
669 /* check for RX/TX work to do */
670 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
671 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
678 * similar to tg3_enable_ints, but it accurately determines whether there
679 * is new work pending and can return without flushing the PIO write
680 * which reenables interrupts
682 static void tg3_int_reenable(struct tg3_napi *tnapi)
684 struct tg3 *tp = tnapi->tp;
686 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
689 /* When doing tagged status, this work check is unnecessary.
690 * The last_tag we write above tells the chip which piece of
691 * work we've completed.
693 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
695 tw32(HOSTCC_MODE, tp->coalesce_mode |
696 HOSTCC_MODE_ENABLE | tnapi->coal_now);
699 static void tg3_napi_disable(struct tg3 *tp)
703 for (i = tp->irq_cnt - 1; i >= 0; i--)
704 napi_disable(&tp->napi[i].napi);
707 static void tg3_napi_enable(struct tg3 *tp)
711 for (i = 0; i < tp->irq_cnt; i++)
712 napi_enable(&tp->napi[i].napi);
715 static inline void tg3_netif_stop(struct tg3 *tp)
717 tp->dev->trans_start = jiffies; /* prevent tx timeout */
718 tg3_napi_disable(tp);
719 netif_tx_disable(tp->dev);
722 static inline void tg3_netif_start(struct tg3 *tp)
724 /* NOTE: unconditional netif_tx_wake_all_queues is only
725 * appropriate so long as all callers are assured to
726 * have free tx slots (such as after tg3_init_hw)
728 netif_tx_wake_all_queues(tp->dev);
731 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
735 static void tg3_switch_clocks(struct tg3 *tp)
740 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
741 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
744 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
746 orig_clock_ctrl = clock_ctrl;
747 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
748 CLOCK_CTRL_CLKRUN_OENABLE |
750 tp->pci_clock_ctrl = clock_ctrl;
752 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
753 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
754 tw32_wait_f(TG3PCI_CLOCK_CTRL,
755 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
757 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
758 tw32_wait_f(TG3PCI_CLOCK_CTRL,
760 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
762 tw32_wait_f(TG3PCI_CLOCK_CTRL,
763 clock_ctrl | (CLOCK_CTRL_ALTCLK),
766 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
769 #define PHY_BUSY_LOOPS 5000
771 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
777 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
785 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
786 MI_COM_PHY_ADDR_MASK);
787 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
788 MI_COM_REG_ADDR_MASK);
789 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
791 tw32_f(MAC_MI_COM, frame_val);
793 loops = PHY_BUSY_LOOPS;
796 frame_val = tr32(MAC_MI_COM);
798 if ((frame_val & MI_COM_BUSY) == 0) {
800 frame_val = tr32(MAC_MI_COM);
808 *val = frame_val & MI_COM_DATA_MASK;
812 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
813 tw32_f(MAC_MI_MODE, tp->mi_mode);
820 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
826 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
827 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
830 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
832 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
836 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
837 MI_COM_PHY_ADDR_MASK);
838 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
839 MI_COM_REG_ADDR_MASK);
840 frame_val |= (val & MI_COM_DATA_MASK);
841 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
843 tw32_f(MAC_MI_COM, frame_val);
845 loops = PHY_BUSY_LOOPS;
848 frame_val = tr32(MAC_MI_COM);
849 if ((frame_val & MI_COM_BUSY) == 0) {
851 frame_val = tr32(MAC_MI_COM);
861 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
862 tw32_f(MAC_MI_MODE, tp->mi_mode);
869 static int tg3_bmcr_reset(struct tg3 *tp)
874 /* OK, reset it, and poll the BMCR_RESET bit until it
875 * clears or we time out.
877 phy_control = BMCR_RESET;
878 err = tg3_writephy(tp, MII_BMCR, phy_control);
884 err = tg3_readphy(tp, MII_BMCR, &phy_control);
888 if ((phy_control & BMCR_RESET) == 0) {
900 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
902 struct tg3 *tp = bp->priv;
905 spin_lock_bh(&tp->lock);
907 if (tg3_readphy(tp, reg, &val))
910 spin_unlock_bh(&tp->lock);
915 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
917 struct tg3 *tp = bp->priv;
920 spin_lock_bh(&tp->lock);
922 if (tg3_writephy(tp, reg, val))
925 spin_unlock_bh(&tp->lock);
930 static int tg3_mdio_reset(struct mii_bus *bp)
935 static void tg3_mdio_config_5785(struct tg3 *tp)
938 struct phy_device *phydev;
940 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
941 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
942 case TG3_PHY_ID_BCM50610:
943 case TG3_PHY_ID_BCM50610M:
944 val = MAC_PHYCFG2_50610_LED_MODES;
946 case TG3_PHY_ID_BCMAC131:
947 val = MAC_PHYCFG2_AC131_LED_MODES;
949 case TG3_PHY_ID_RTL8211C:
950 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
952 case TG3_PHY_ID_RTL8201E:
953 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
959 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
960 tw32(MAC_PHYCFG2, val);
962 val = tr32(MAC_PHYCFG1);
963 val &= ~(MAC_PHYCFG1_RGMII_INT |
964 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
965 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
966 tw32(MAC_PHYCFG1, val);
971 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
972 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
973 MAC_PHYCFG2_FMODE_MASK_MASK |
974 MAC_PHYCFG2_GMODE_MASK_MASK |
975 MAC_PHYCFG2_ACT_MASK_MASK |
976 MAC_PHYCFG2_QUAL_MASK_MASK |
977 MAC_PHYCFG2_INBAND_ENABLE;
979 tw32(MAC_PHYCFG2, val);
981 val = tr32(MAC_PHYCFG1);
982 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
983 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
984 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
985 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
986 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
987 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
988 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
990 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
991 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
992 tw32(MAC_PHYCFG1, val);
994 val = tr32(MAC_EXT_RGMII_MODE);
995 val &= ~(MAC_RGMII_MODE_RX_INT_B |
996 MAC_RGMII_MODE_RX_QUALITY |
997 MAC_RGMII_MODE_RX_ACTIVITY |
998 MAC_RGMII_MODE_RX_ENG_DET |
999 MAC_RGMII_MODE_TX_ENABLE |
1000 MAC_RGMII_MODE_TX_LOWPWR |
1001 MAC_RGMII_MODE_TX_RESET);
1002 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1003 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1004 val |= MAC_RGMII_MODE_RX_INT_B |
1005 MAC_RGMII_MODE_RX_QUALITY |
1006 MAC_RGMII_MODE_RX_ACTIVITY |
1007 MAC_RGMII_MODE_RX_ENG_DET;
1008 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1009 val |= MAC_RGMII_MODE_TX_ENABLE |
1010 MAC_RGMII_MODE_TX_LOWPWR |
1011 MAC_RGMII_MODE_TX_RESET;
1013 tw32(MAC_EXT_RGMII_MODE, val);
1016 static void tg3_mdio_start(struct tg3 *tp)
1018 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1019 tw32_f(MAC_MI_MODE, tp->mi_mode);
1022 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1023 u32 funcnum, is_serdes;
1025 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1031 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1035 tp->phy_addr = TG3_PHY_MII_ADDR;
1037 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1039 tg3_mdio_config_5785(tp);
1042 static int tg3_mdio_init(struct tg3 *tp)
1046 struct phy_device *phydev;
1050 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1051 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1054 tp->mdio_bus = mdiobus_alloc();
1055 if (tp->mdio_bus == NULL)
1058 tp->mdio_bus->name = "tg3 mdio bus";
1059 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1060 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1061 tp->mdio_bus->priv = tp;
1062 tp->mdio_bus->parent = &tp->pdev->dev;
1063 tp->mdio_bus->read = &tg3_mdio_read;
1064 tp->mdio_bus->write = &tg3_mdio_write;
1065 tp->mdio_bus->reset = &tg3_mdio_reset;
1066 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1067 tp->mdio_bus->irq = &tp->mdio_irq[0];
1069 for (i = 0; i < PHY_MAX_ADDR; i++)
1070 tp->mdio_bus->irq[i] = PHY_POLL;
1072 /* The bus registration will look for all the PHYs on the mdio bus.
1073 * Unfortunately, it does not ensure the PHY is powered up before
1074 * accessing the PHY ID registers. A chip reset is the
1075 * quickest way to bring the device back to an operational state..
1077 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1080 i = mdiobus_register(tp->mdio_bus);
1082 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1084 mdiobus_free(tp->mdio_bus);
1088 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1090 if (!phydev || !phydev->drv) {
1091 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1092 mdiobus_unregister(tp->mdio_bus);
1093 mdiobus_free(tp->mdio_bus);
1097 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1098 case TG3_PHY_ID_BCM57780:
1099 phydev->interface = PHY_INTERFACE_MODE_GMII;
1100 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1102 case TG3_PHY_ID_BCM50610:
1103 case TG3_PHY_ID_BCM50610M:
1104 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1105 PHY_BRCM_RX_REFCLK_UNUSED |
1106 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1107 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1108 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1109 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1110 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1111 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1112 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1113 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1115 case TG3_PHY_ID_RTL8211C:
1116 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1118 case TG3_PHY_ID_RTL8201E:
1119 case TG3_PHY_ID_BCMAC131:
1120 phydev->interface = PHY_INTERFACE_MODE_MII;
1121 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1122 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1126 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1129 tg3_mdio_config_5785(tp);
1134 static void tg3_mdio_fini(struct tg3 *tp)
1136 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1137 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1138 mdiobus_unregister(tp->mdio_bus);
1139 mdiobus_free(tp->mdio_bus);
1143 /* tp->lock is held. */
1144 static inline void tg3_generate_fw_event(struct tg3 *tp)
1148 val = tr32(GRC_RX_CPU_EVENT);
1149 val |= GRC_RX_CPU_DRIVER_EVENT;
1150 tw32_f(GRC_RX_CPU_EVENT, val);
1152 tp->last_event_jiffies = jiffies;
1155 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1157 /* tp->lock is held. */
1158 static void tg3_wait_for_event_ack(struct tg3 *tp)
1161 unsigned int delay_cnt;
1164 /* If enough time has passed, no wait is necessary. */
1165 time_remain = (long)(tp->last_event_jiffies + 1 +
1166 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1168 if (time_remain < 0)
1171 /* Check if we can shorten the wait time. */
1172 delay_cnt = jiffies_to_usecs(time_remain);
1173 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1174 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1175 delay_cnt = (delay_cnt >> 3) + 1;
1177 for (i = 0; i < delay_cnt; i++) {
1178 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1184 /* tp->lock is held. */
1185 static void tg3_ump_link_report(struct tg3 *tp)
1190 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1191 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1194 tg3_wait_for_event_ack(tp);
1196 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1198 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1201 if (!tg3_readphy(tp, MII_BMCR, ®))
1203 if (!tg3_readphy(tp, MII_BMSR, ®))
1204 val |= (reg & 0xffff);
1205 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1208 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1210 if (!tg3_readphy(tp, MII_LPA, ®))
1211 val |= (reg & 0xffff);
1212 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1215 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1216 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1218 if (!tg3_readphy(tp, MII_STAT1000, ®))
1219 val |= (reg & 0xffff);
1221 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1223 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1227 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1229 tg3_generate_fw_event(tp);
1232 static void tg3_link_report(struct tg3 *tp)
1234 if (!netif_carrier_ok(tp->dev)) {
1235 if (netif_msg_link(tp))
1236 printk(KERN_INFO PFX "%s: Link is down.\n",
1238 tg3_ump_link_report(tp);
1239 } else if (netif_msg_link(tp)) {
1240 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1242 (tp->link_config.active_speed == SPEED_1000 ?
1244 (tp->link_config.active_speed == SPEED_100 ?
1246 (tp->link_config.active_duplex == DUPLEX_FULL ?
1249 printk(KERN_INFO PFX
1250 "%s: Flow control is %s for TX and %s for RX.\n",
1252 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1254 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1256 tg3_ump_link_report(tp);
1260 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1264 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1265 miireg = ADVERTISE_PAUSE_CAP;
1266 else if (flow_ctrl & FLOW_CTRL_TX)
1267 miireg = ADVERTISE_PAUSE_ASYM;
1268 else if (flow_ctrl & FLOW_CTRL_RX)
1269 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1276 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1280 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1281 miireg = ADVERTISE_1000XPAUSE;
1282 else if (flow_ctrl & FLOW_CTRL_TX)
1283 miireg = ADVERTISE_1000XPSE_ASYM;
1284 else if (flow_ctrl & FLOW_CTRL_RX)
1285 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1292 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1296 if (lcladv & ADVERTISE_1000XPAUSE) {
1297 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1298 if (rmtadv & LPA_1000XPAUSE)
1299 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1300 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1303 if (rmtadv & LPA_1000XPAUSE)
1304 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1306 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1307 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1314 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1318 u32 old_rx_mode = tp->rx_mode;
1319 u32 old_tx_mode = tp->tx_mode;
1321 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1322 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1324 autoneg = tp->link_config.autoneg;
1326 if (autoneg == AUTONEG_ENABLE &&
1327 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1328 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1329 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1331 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1333 flowctrl = tp->link_config.flowctrl;
1335 tp->link_config.active_flowctrl = flowctrl;
1337 if (flowctrl & FLOW_CTRL_RX)
1338 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1340 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1342 if (old_rx_mode != tp->rx_mode)
1343 tw32_f(MAC_RX_MODE, tp->rx_mode);
1345 if (flowctrl & FLOW_CTRL_TX)
1346 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1348 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1350 if (old_tx_mode != tp->tx_mode)
1351 tw32_f(MAC_TX_MODE, tp->tx_mode);
1354 static void tg3_adjust_link(struct net_device *dev)
1356 u8 oldflowctrl, linkmesg = 0;
1357 u32 mac_mode, lcl_adv, rmt_adv;
1358 struct tg3 *tp = netdev_priv(dev);
1359 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1361 spin_lock_bh(&tp->lock);
1363 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1364 MAC_MODE_HALF_DUPLEX);
1366 oldflowctrl = tp->link_config.active_flowctrl;
1372 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1373 mac_mode |= MAC_MODE_PORT_MODE_MII;
1374 else if (phydev->speed == SPEED_1000 ||
1375 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1376 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1378 mac_mode |= MAC_MODE_PORT_MODE_MII;
1380 if (phydev->duplex == DUPLEX_HALF)
1381 mac_mode |= MAC_MODE_HALF_DUPLEX;
1383 lcl_adv = tg3_advert_flowctrl_1000T(
1384 tp->link_config.flowctrl);
1387 rmt_adv = LPA_PAUSE_CAP;
1388 if (phydev->asym_pause)
1389 rmt_adv |= LPA_PAUSE_ASYM;
1392 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1394 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1396 if (mac_mode != tp->mac_mode) {
1397 tp->mac_mode = mac_mode;
1398 tw32_f(MAC_MODE, tp->mac_mode);
1402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1403 if (phydev->speed == SPEED_10)
1405 MAC_MI_STAT_10MBPS_MODE |
1406 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1408 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1411 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1412 tw32(MAC_TX_LENGTHS,
1413 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1414 (6 << TX_LENGTHS_IPG_SHIFT) |
1415 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1417 tw32(MAC_TX_LENGTHS,
1418 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1419 (6 << TX_LENGTHS_IPG_SHIFT) |
1420 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1422 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1423 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1424 phydev->speed != tp->link_config.active_speed ||
1425 phydev->duplex != tp->link_config.active_duplex ||
1426 oldflowctrl != tp->link_config.active_flowctrl)
1429 tp->link_config.active_speed = phydev->speed;
1430 tp->link_config.active_duplex = phydev->duplex;
1432 spin_unlock_bh(&tp->lock);
1435 tg3_link_report(tp);
1438 static int tg3_phy_init(struct tg3 *tp)
1440 struct phy_device *phydev;
1442 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1445 /* Bring the PHY back to a known state. */
1448 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1450 /* Attach the MAC to the PHY. */
1451 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1452 phydev->dev_flags, phydev->interface);
1453 if (IS_ERR(phydev)) {
1454 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1455 return PTR_ERR(phydev);
1458 /* Mask with MAC supported features. */
1459 switch (phydev->interface) {
1460 case PHY_INTERFACE_MODE_GMII:
1461 case PHY_INTERFACE_MODE_RGMII:
1462 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1463 phydev->supported &= (PHY_GBIT_FEATURES |
1465 SUPPORTED_Asym_Pause);
1469 case PHY_INTERFACE_MODE_MII:
1470 phydev->supported &= (PHY_BASIC_FEATURES |
1472 SUPPORTED_Asym_Pause);
1475 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1479 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1481 phydev->advertising = phydev->supported;
1486 static void tg3_phy_start(struct tg3 *tp)
1488 struct phy_device *phydev;
1490 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1493 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1495 if (tp->link_config.phy_is_low_power) {
1496 tp->link_config.phy_is_low_power = 0;
1497 phydev->speed = tp->link_config.orig_speed;
1498 phydev->duplex = tp->link_config.orig_duplex;
1499 phydev->autoneg = tp->link_config.orig_autoneg;
1500 phydev->advertising = tp->link_config.orig_advertising;
1505 phy_start_aneg(phydev);
1508 static void tg3_phy_stop(struct tg3 *tp)
1510 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1513 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1516 static void tg3_phy_fini(struct tg3 *tp)
1518 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1519 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1520 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1524 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1526 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1527 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1530 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1534 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1537 tg3_writephy(tp, MII_TG3_FET_TEST,
1538 phytest | MII_TG3_FET_SHADOW_EN);
1539 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1541 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1543 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1544 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1546 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1550 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1554 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1557 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1558 tg3_phy_fet_toggle_apd(tp, enable);
1562 reg = MII_TG3_MISC_SHDW_WREN |
1563 MII_TG3_MISC_SHDW_SCR5_SEL |
1564 MII_TG3_MISC_SHDW_SCR5_LPED |
1565 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1566 MII_TG3_MISC_SHDW_SCR5_SDTL |
1567 MII_TG3_MISC_SHDW_SCR5_C125OE;
1568 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1569 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1571 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1574 reg = MII_TG3_MISC_SHDW_WREN |
1575 MII_TG3_MISC_SHDW_APD_SEL |
1576 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1578 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1580 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1583 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1587 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1588 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1591 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1594 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1595 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1597 tg3_writephy(tp, MII_TG3_FET_TEST,
1598 ephy | MII_TG3_FET_SHADOW_EN);
1599 if (!tg3_readphy(tp, reg, &phy)) {
1601 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1603 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1604 tg3_writephy(tp, reg, phy);
1606 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1609 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1610 MII_TG3_AUXCTL_SHDWSEL_MISC;
1611 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1612 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1614 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1616 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1617 phy |= MII_TG3_AUXCTL_MISC_WREN;
1618 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1623 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1627 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1630 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1631 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1632 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1633 (val | (1 << 15) | (1 << 4)));
1636 static void tg3_phy_apply_otp(struct tg3 *tp)
1645 /* Enable SM_DSP clock and tx 6dB coding. */
1646 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1647 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1648 MII_TG3_AUXCTL_ACTL_TX_6DB;
1649 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1651 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1652 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1653 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1655 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1656 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1657 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1659 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1660 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1661 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1663 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1664 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1666 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1667 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1669 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1670 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1671 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1673 /* Turn off SM_DSP clock. */
1674 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1675 MII_TG3_AUXCTL_ACTL_TX_6DB;
1676 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1679 static int tg3_wait_macro_done(struct tg3 *tp)
1686 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1687 if ((tmp32 & 0x1000) == 0)
1697 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1699 static const u32 test_pat[4][6] = {
1700 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1701 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1702 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1703 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1707 for (chan = 0; chan < 4; chan++) {
1710 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1711 (chan * 0x2000) | 0x0200);
1712 tg3_writephy(tp, 0x16, 0x0002);
1714 for (i = 0; i < 6; i++)
1715 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1718 tg3_writephy(tp, 0x16, 0x0202);
1719 if (tg3_wait_macro_done(tp)) {
1724 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1725 (chan * 0x2000) | 0x0200);
1726 tg3_writephy(tp, 0x16, 0x0082);
1727 if (tg3_wait_macro_done(tp)) {
1732 tg3_writephy(tp, 0x16, 0x0802);
1733 if (tg3_wait_macro_done(tp)) {
1738 for (i = 0; i < 6; i += 2) {
1741 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1742 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1743 tg3_wait_macro_done(tp)) {
1749 if (low != test_pat[chan][i] ||
1750 high != test_pat[chan][i+1]) {
1751 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1752 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1753 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1763 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1767 for (chan = 0; chan < 4; chan++) {
1770 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1771 (chan * 0x2000) | 0x0200);
1772 tg3_writephy(tp, 0x16, 0x0002);
1773 for (i = 0; i < 6; i++)
1774 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1775 tg3_writephy(tp, 0x16, 0x0202);
1776 if (tg3_wait_macro_done(tp))
1783 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1785 u32 reg32, phy9_orig;
1786 int retries, do_phy_reset, err;
1792 err = tg3_bmcr_reset(tp);
1798 /* Disable transmitter and interrupt. */
1799 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1803 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1805 /* Set full-duplex, 1000 mbps. */
1806 tg3_writephy(tp, MII_BMCR,
1807 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1809 /* Set to master mode. */
1810 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1813 tg3_writephy(tp, MII_TG3_CTRL,
1814 (MII_TG3_CTRL_AS_MASTER |
1815 MII_TG3_CTRL_ENABLE_AS_MASTER));
1817 /* Enable SM_DSP_CLOCK and 6dB. */
1818 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1820 /* Block the PHY control access. */
1821 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1822 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1824 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1827 } while (--retries);
1829 err = tg3_phy_reset_chanpat(tp);
1833 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1834 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1836 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1837 tg3_writephy(tp, 0x16, 0x0000);
1839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1841 /* Set Extended packet length bit for jumbo frames */
1842 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1845 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1848 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1850 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1852 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1859 /* This will reset the tigon3 PHY if there is no valid
1860 * link unless the FORCE argument is non-zero.
1862 static int tg3_phy_reset(struct tg3 *tp)
1868 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1871 val = tr32(GRC_MISC_CFG);
1872 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1875 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1876 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1880 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1881 netif_carrier_off(tp->dev);
1882 tg3_link_report(tp);
1885 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1886 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1887 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1888 err = tg3_phy_reset_5703_4_5(tp);
1895 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1896 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1897 cpmuctrl = tr32(TG3_CPMU_CTRL);
1898 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1900 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1903 err = tg3_bmcr_reset(tp);
1907 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1910 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1911 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1913 tw32(TG3_CPMU_CTRL, cpmuctrl);
1916 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1917 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1920 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1921 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1922 CPMU_LSPD_1000MB_MACCLK_12_5) {
1923 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1925 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1929 tg3_phy_apply_otp(tp);
1931 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1932 tg3_phy_toggle_apd(tp, true);
1934 tg3_phy_toggle_apd(tp, false);
1937 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1938 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1939 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1940 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1941 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1942 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1943 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1945 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1946 tg3_writephy(tp, 0x1c, 0x8d68);
1947 tg3_writephy(tp, 0x1c, 0x8d68);
1949 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1950 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1951 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1952 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1953 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1954 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1955 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1956 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1957 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1959 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1960 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1961 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1962 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1963 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1964 tg3_writephy(tp, MII_TG3_TEST1,
1965 MII_TG3_TEST1_TRIM_EN | 0x4);
1967 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1968 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1970 /* Set Extended packet length bit (bit 14) on all chips that */
1971 /* support jumbo frames */
1972 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1973 /* Cannot do read-modify-write on 5401 */
1974 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1975 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1978 /* Set bit 14 with read-modify-write to preserve other bits */
1979 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1980 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1981 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1984 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1985 * jumbo frames transmission.
1987 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1990 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1991 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1992 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1996 /* adjust output voltage */
1997 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2000 tg3_phy_toggle_automdix(tp, 1);
2001 tg3_phy_set_wirespeed(tp);
2005 static void tg3_frob_aux_power(struct tg3 *tp)
2007 struct tg3 *tp_peer = tp;
2009 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
2012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2015 struct net_device *dev_peer;
2017 dev_peer = pci_get_drvdata(tp->pdev_peer);
2018 /* remove_one() may have been run on the peer. */
2022 tp_peer = netdev_priv(dev_peer);
2025 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2026 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2027 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2028 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2029 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2031 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2032 (GRC_LCLCTRL_GPIO_OE0 |
2033 GRC_LCLCTRL_GPIO_OE1 |
2034 GRC_LCLCTRL_GPIO_OE2 |
2035 GRC_LCLCTRL_GPIO_OUTPUT0 |
2036 GRC_LCLCTRL_GPIO_OUTPUT1),
2038 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2039 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2040 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2041 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2042 GRC_LCLCTRL_GPIO_OE1 |
2043 GRC_LCLCTRL_GPIO_OE2 |
2044 GRC_LCLCTRL_GPIO_OUTPUT0 |
2045 GRC_LCLCTRL_GPIO_OUTPUT1 |
2047 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2049 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2050 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2052 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2053 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2056 u32 grc_local_ctrl = 0;
2058 if (tp_peer != tp &&
2059 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2062 /* Workaround to prevent overdrawing Amps. */
2063 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2065 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2066 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2067 grc_local_ctrl, 100);
2070 /* On 5753 and variants, GPIO2 cannot be used. */
2071 no_gpio2 = tp->nic_sram_data_cfg &
2072 NIC_SRAM_DATA_CFG_NO_GPIO2;
2074 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2075 GRC_LCLCTRL_GPIO_OE1 |
2076 GRC_LCLCTRL_GPIO_OE2 |
2077 GRC_LCLCTRL_GPIO_OUTPUT1 |
2078 GRC_LCLCTRL_GPIO_OUTPUT2;
2080 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2081 GRC_LCLCTRL_GPIO_OUTPUT2);
2083 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2084 grc_local_ctrl, 100);
2086 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2088 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2089 grc_local_ctrl, 100);
2092 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2093 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2094 grc_local_ctrl, 100);
2098 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2099 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2100 if (tp_peer != tp &&
2101 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2104 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2105 (GRC_LCLCTRL_GPIO_OE1 |
2106 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2108 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2109 GRC_LCLCTRL_GPIO_OE1, 100);
2111 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2112 (GRC_LCLCTRL_GPIO_OE1 |
2113 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2118 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2120 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2122 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2123 if (speed != SPEED_10)
2125 } else if (speed == SPEED_10)
2131 static int tg3_setup_phy(struct tg3 *, int);
2133 #define RESET_KIND_SHUTDOWN 0
2134 #define RESET_KIND_INIT 1
2135 #define RESET_KIND_SUSPEND 2
2137 static void tg3_write_sig_post_reset(struct tg3 *, int);
2138 static int tg3_halt_cpu(struct tg3 *, u32);
2140 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2144 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2146 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2147 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2150 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2151 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2152 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2159 val = tr32(GRC_MISC_CFG);
2160 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2163 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2165 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2168 tg3_writephy(tp, MII_ADVERTISE, 0);
2169 tg3_writephy(tp, MII_BMCR,
2170 BMCR_ANENABLE | BMCR_ANRESTART);
2172 tg3_writephy(tp, MII_TG3_FET_TEST,
2173 phytest | MII_TG3_FET_SHADOW_EN);
2174 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2175 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2177 MII_TG3_FET_SHDW_AUXMODE4,
2180 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2183 } else if (do_low_power) {
2184 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2185 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2187 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2188 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2189 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2190 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2191 MII_TG3_AUXCTL_PCTL_VREG_11V);
2194 /* The PHY should not be powered down on some chips because
2197 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2198 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2199 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2200 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2203 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2204 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2205 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2206 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2207 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2208 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2211 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2214 /* tp->lock is held. */
2215 static int tg3_nvram_lock(struct tg3 *tp)
2217 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2220 if (tp->nvram_lock_cnt == 0) {
2221 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2222 for (i = 0; i < 8000; i++) {
2223 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2228 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2232 tp->nvram_lock_cnt++;
2237 /* tp->lock is held. */
2238 static void tg3_nvram_unlock(struct tg3 *tp)
2240 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2241 if (tp->nvram_lock_cnt > 0)
2242 tp->nvram_lock_cnt--;
2243 if (tp->nvram_lock_cnt == 0)
2244 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2248 /* tp->lock is held. */
2249 static void tg3_enable_nvram_access(struct tg3 *tp)
2251 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2252 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2253 u32 nvaccess = tr32(NVRAM_ACCESS);
2255 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2259 /* tp->lock is held. */
2260 static void tg3_disable_nvram_access(struct tg3 *tp)
2262 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2263 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2264 u32 nvaccess = tr32(NVRAM_ACCESS);
2266 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2270 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2271 u32 offset, u32 *val)
2276 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2279 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2280 EEPROM_ADDR_DEVID_MASK |
2282 tw32(GRC_EEPROM_ADDR,
2284 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2285 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2286 EEPROM_ADDR_ADDR_MASK) |
2287 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2289 for (i = 0; i < 1000; i++) {
2290 tmp = tr32(GRC_EEPROM_ADDR);
2292 if (tmp & EEPROM_ADDR_COMPLETE)
2296 if (!(tmp & EEPROM_ADDR_COMPLETE))
2299 tmp = tr32(GRC_EEPROM_DATA);
2302 * The data will always be opposite the native endian
2303 * format. Perform a blind byteswap to compensate.
2310 #define NVRAM_CMD_TIMEOUT 10000
2312 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2316 tw32(NVRAM_CMD, nvram_cmd);
2317 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2319 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2325 if (i == NVRAM_CMD_TIMEOUT)
2331 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2333 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2334 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2335 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2336 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2337 (tp->nvram_jedecnum == JEDEC_ATMEL))
2339 addr = ((addr / tp->nvram_pagesize) <<
2340 ATMEL_AT45DB0X1B_PAGE_POS) +
2341 (addr % tp->nvram_pagesize);
2346 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2348 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2349 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2350 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2351 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2352 (tp->nvram_jedecnum == JEDEC_ATMEL))
2354 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2355 tp->nvram_pagesize) +
2356 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2361 /* NOTE: Data read in from NVRAM is byteswapped according to
2362 * the byteswapping settings for all other register accesses.
2363 * tg3 devices are BE devices, so on a BE machine, the data
2364 * returned will be exactly as it is seen in NVRAM. On a LE
2365 * machine, the 32-bit value will be byteswapped.
2367 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2371 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2372 return tg3_nvram_read_using_eeprom(tp, offset, val);
2374 offset = tg3_nvram_phys_addr(tp, offset);
2376 if (offset > NVRAM_ADDR_MSK)
2379 ret = tg3_nvram_lock(tp);
2383 tg3_enable_nvram_access(tp);
2385 tw32(NVRAM_ADDR, offset);
2386 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2387 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2390 *val = tr32(NVRAM_RDDATA);
2392 tg3_disable_nvram_access(tp);
2394 tg3_nvram_unlock(tp);
2399 /* Ensures NVRAM data is in bytestream format. */
2400 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2403 int res = tg3_nvram_read(tp, offset, &v);
2405 *val = cpu_to_be32(v);
2409 /* tp->lock is held. */
2410 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2412 u32 addr_high, addr_low;
2415 addr_high = ((tp->dev->dev_addr[0] << 8) |
2416 tp->dev->dev_addr[1]);
2417 addr_low = ((tp->dev->dev_addr[2] << 24) |
2418 (tp->dev->dev_addr[3] << 16) |
2419 (tp->dev->dev_addr[4] << 8) |
2420 (tp->dev->dev_addr[5] << 0));
2421 for (i = 0; i < 4; i++) {
2422 if (i == 1 && skip_mac_1)
2424 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2425 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2430 for (i = 0; i < 12; i++) {
2431 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2432 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2436 addr_high = (tp->dev->dev_addr[0] +
2437 tp->dev->dev_addr[1] +
2438 tp->dev->dev_addr[2] +
2439 tp->dev->dev_addr[3] +
2440 tp->dev->dev_addr[4] +
2441 tp->dev->dev_addr[5]) &
2442 TX_BACKOFF_SEED_MASK;
2443 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2446 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2449 bool device_should_wake, do_low_power;
2451 /* Make sure register accesses (indirect or otherwise)
2452 * will function correctly.
2454 pci_write_config_dword(tp->pdev,
2455 TG3PCI_MISC_HOST_CTRL,
2456 tp->misc_host_ctrl);
2460 pci_enable_wake(tp->pdev, state, false);
2461 pci_set_power_state(tp->pdev, PCI_D0);
2463 /* Switch out of Vaux if it is a NIC */
2464 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2465 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2475 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2476 tp->dev->name, state);
2480 /* Restore the CLKREQ setting. */
2481 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2484 pci_read_config_word(tp->pdev,
2485 tp->pcie_cap + PCI_EXP_LNKCTL,
2487 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2488 pci_write_config_word(tp->pdev,
2489 tp->pcie_cap + PCI_EXP_LNKCTL,
2493 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2494 tw32(TG3PCI_MISC_HOST_CTRL,
2495 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2497 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2498 device_may_wakeup(&tp->pdev->dev) &&
2499 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2501 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2502 do_low_power = false;
2503 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2504 !tp->link_config.phy_is_low_power) {
2505 struct phy_device *phydev;
2506 u32 phyid, advertising;
2508 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2510 tp->link_config.phy_is_low_power = 1;
2512 tp->link_config.orig_speed = phydev->speed;
2513 tp->link_config.orig_duplex = phydev->duplex;
2514 tp->link_config.orig_autoneg = phydev->autoneg;
2515 tp->link_config.orig_advertising = phydev->advertising;
2517 advertising = ADVERTISED_TP |
2519 ADVERTISED_Autoneg |
2520 ADVERTISED_10baseT_Half;
2522 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2523 device_should_wake) {
2524 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2526 ADVERTISED_100baseT_Half |
2527 ADVERTISED_100baseT_Full |
2528 ADVERTISED_10baseT_Full;
2530 advertising |= ADVERTISED_10baseT_Full;
2533 phydev->advertising = advertising;
2535 phy_start_aneg(phydev);
2537 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2538 if (phyid != TG3_PHY_ID_BCMAC131) {
2539 phyid &= TG3_PHY_OUI_MASK;
2540 if (phyid == TG3_PHY_OUI_1 ||
2541 phyid == TG3_PHY_OUI_2 ||
2542 phyid == TG3_PHY_OUI_3)
2543 do_low_power = true;
2547 do_low_power = true;
2549 if (tp->link_config.phy_is_low_power == 0) {
2550 tp->link_config.phy_is_low_power = 1;
2551 tp->link_config.orig_speed = tp->link_config.speed;
2552 tp->link_config.orig_duplex = tp->link_config.duplex;
2553 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2556 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2557 tp->link_config.speed = SPEED_10;
2558 tp->link_config.duplex = DUPLEX_HALF;
2559 tp->link_config.autoneg = AUTONEG_ENABLE;
2560 tg3_setup_phy(tp, 0);
2564 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2567 val = tr32(GRC_VCPU_EXT_CTRL);
2568 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2569 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2573 for (i = 0; i < 200; i++) {
2574 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2575 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2580 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2581 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2582 WOL_DRV_STATE_SHUTDOWN |
2586 if (device_should_wake) {
2589 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2591 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2595 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2596 mac_mode = MAC_MODE_PORT_MODE_GMII;
2598 mac_mode = MAC_MODE_PORT_MODE_MII;
2600 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2601 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2603 u32 speed = (tp->tg3_flags &
2604 TG3_FLAG_WOL_SPEED_100MB) ?
2605 SPEED_100 : SPEED_10;
2606 if (tg3_5700_link_polarity(tp, speed))
2607 mac_mode |= MAC_MODE_LINK_POLARITY;
2609 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2612 mac_mode = MAC_MODE_PORT_MODE_TBI;
2615 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2616 tw32(MAC_LED_CTRL, tp->led_ctrl);
2618 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2619 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2620 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2621 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2622 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2623 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2625 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2626 mac_mode |= tp->mac_mode &
2627 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2628 if (mac_mode & MAC_MODE_APE_TX_EN)
2629 mac_mode |= MAC_MODE_TDE_ENABLE;
2632 tw32_f(MAC_MODE, mac_mode);
2635 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2639 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2640 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2641 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2644 base_val = tp->pci_clock_ctrl;
2645 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2646 CLOCK_CTRL_TXCLK_DISABLE);
2648 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2649 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2650 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2651 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2652 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2654 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2655 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2656 u32 newbits1, newbits2;
2658 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2659 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2660 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2661 CLOCK_CTRL_TXCLK_DISABLE |
2663 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2664 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2665 newbits1 = CLOCK_CTRL_625_CORE;
2666 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2668 newbits1 = CLOCK_CTRL_ALTCLK;
2669 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2672 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2675 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2678 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2682 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2683 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2684 CLOCK_CTRL_TXCLK_DISABLE |
2685 CLOCK_CTRL_44MHZ_CORE);
2687 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2690 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2691 tp->pci_clock_ctrl | newbits3, 40);
2695 if (!(device_should_wake) &&
2696 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2697 tg3_power_down_phy(tp, do_low_power);
2699 tg3_frob_aux_power(tp);
2701 /* Workaround for unstable PLL clock */
2702 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2703 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2704 u32 val = tr32(0x7d00);
2706 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2708 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2711 err = tg3_nvram_lock(tp);
2712 tg3_halt_cpu(tp, RX_CPU_BASE);
2714 tg3_nvram_unlock(tp);
2718 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2720 if (device_should_wake)
2721 pci_enable_wake(tp->pdev, state, true);
2723 /* Finally, set the new power state. */
2724 pci_set_power_state(tp->pdev, state);
2729 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2731 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2732 case MII_TG3_AUX_STAT_10HALF:
2734 *duplex = DUPLEX_HALF;
2737 case MII_TG3_AUX_STAT_10FULL:
2739 *duplex = DUPLEX_FULL;
2742 case MII_TG3_AUX_STAT_100HALF:
2744 *duplex = DUPLEX_HALF;
2747 case MII_TG3_AUX_STAT_100FULL:
2749 *duplex = DUPLEX_FULL;
2752 case MII_TG3_AUX_STAT_1000HALF:
2753 *speed = SPEED_1000;
2754 *duplex = DUPLEX_HALF;
2757 case MII_TG3_AUX_STAT_1000FULL:
2758 *speed = SPEED_1000;
2759 *duplex = DUPLEX_FULL;
2763 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2764 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2766 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2770 *speed = SPEED_INVALID;
2771 *duplex = DUPLEX_INVALID;
2776 static void tg3_phy_copper_begin(struct tg3 *tp)
2781 if (tp->link_config.phy_is_low_power) {
2782 /* Entering low power mode. Disable gigabit and
2783 * 100baseT advertisements.
2785 tg3_writephy(tp, MII_TG3_CTRL, 0);
2787 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2788 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2789 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2790 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2792 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2793 } else if (tp->link_config.speed == SPEED_INVALID) {
2794 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2795 tp->link_config.advertising &=
2796 ~(ADVERTISED_1000baseT_Half |
2797 ADVERTISED_1000baseT_Full);
2799 new_adv = ADVERTISE_CSMA;
2800 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2801 new_adv |= ADVERTISE_10HALF;
2802 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2803 new_adv |= ADVERTISE_10FULL;
2804 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2805 new_adv |= ADVERTISE_100HALF;
2806 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2807 new_adv |= ADVERTISE_100FULL;
2809 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2811 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2813 if (tp->link_config.advertising &
2814 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2816 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2817 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2818 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2819 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2820 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2821 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2822 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2823 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2824 MII_TG3_CTRL_ENABLE_AS_MASTER);
2825 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2827 tg3_writephy(tp, MII_TG3_CTRL, 0);
2830 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2831 new_adv |= ADVERTISE_CSMA;
2833 /* Asking for a specific link mode. */
2834 if (tp->link_config.speed == SPEED_1000) {
2835 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2837 if (tp->link_config.duplex == DUPLEX_FULL)
2838 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2840 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2841 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2842 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2843 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2844 MII_TG3_CTRL_ENABLE_AS_MASTER);
2846 if (tp->link_config.speed == SPEED_100) {
2847 if (tp->link_config.duplex == DUPLEX_FULL)
2848 new_adv |= ADVERTISE_100FULL;
2850 new_adv |= ADVERTISE_100HALF;
2852 if (tp->link_config.duplex == DUPLEX_FULL)
2853 new_adv |= ADVERTISE_10FULL;
2855 new_adv |= ADVERTISE_10HALF;
2857 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2862 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2865 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2866 tp->link_config.speed != SPEED_INVALID) {
2867 u32 bmcr, orig_bmcr;
2869 tp->link_config.active_speed = tp->link_config.speed;
2870 tp->link_config.active_duplex = tp->link_config.duplex;
2873 switch (tp->link_config.speed) {
2879 bmcr |= BMCR_SPEED100;
2883 bmcr |= TG3_BMCR_SPEED1000;
2887 if (tp->link_config.duplex == DUPLEX_FULL)
2888 bmcr |= BMCR_FULLDPLX;
2890 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2891 (bmcr != orig_bmcr)) {
2892 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2893 for (i = 0; i < 1500; i++) {
2897 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2898 tg3_readphy(tp, MII_BMSR, &tmp))
2900 if (!(tmp & BMSR_LSTATUS)) {
2905 tg3_writephy(tp, MII_BMCR, bmcr);
2909 tg3_writephy(tp, MII_BMCR,
2910 BMCR_ANENABLE | BMCR_ANRESTART);
2914 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2918 /* Turn off tap power management. */
2919 /* Set Extended packet length bit */
2920 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2922 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2923 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2925 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2926 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2928 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2929 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2931 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2932 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2934 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2935 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2942 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2944 u32 adv_reg, all_mask = 0;
2946 if (mask & ADVERTISED_10baseT_Half)
2947 all_mask |= ADVERTISE_10HALF;
2948 if (mask & ADVERTISED_10baseT_Full)
2949 all_mask |= ADVERTISE_10FULL;
2950 if (mask & ADVERTISED_100baseT_Half)
2951 all_mask |= ADVERTISE_100HALF;
2952 if (mask & ADVERTISED_100baseT_Full)
2953 all_mask |= ADVERTISE_100FULL;
2955 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2958 if ((adv_reg & all_mask) != all_mask)
2960 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2964 if (mask & ADVERTISED_1000baseT_Half)
2965 all_mask |= ADVERTISE_1000HALF;
2966 if (mask & ADVERTISED_1000baseT_Full)
2967 all_mask |= ADVERTISE_1000FULL;
2969 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2972 if ((tg3_ctrl & all_mask) != all_mask)
2978 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2982 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2985 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2986 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2988 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2989 if (curadv != reqadv)
2992 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2993 tg3_readphy(tp, MII_LPA, rmtadv);
2995 /* Reprogram the advertisement register, even if it
2996 * does not affect the current link. If the link
2997 * gets renegotiated in the future, we can save an
2998 * additional renegotiation cycle by advertising
2999 * it correctly in the first place.
3001 if (curadv != reqadv) {
3002 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3003 ADVERTISE_PAUSE_ASYM);
3004 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3011 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3013 int current_link_up;
3015 u32 lcl_adv, rmt_adv;
3023 (MAC_STATUS_SYNC_CHANGED |
3024 MAC_STATUS_CFG_CHANGED |
3025 MAC_STATUS_MI_COMPLETION |
3026 MAC_STATUS_LNKSTATE_CHANGED));
3029 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3031 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3035 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3037 /* Some third-party PHYs need to be reset on link going
3040 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3041 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3042 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3043 netif_carrier_ok(tp->dev)) {
3044 tg3_readphy(tp, MII_BMSR, &bmsr);
3045 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3046 !(bmsr & BMSR_LSTATUS))
3052 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
3053 tg3_readphy(tp, MII_BMSR, &bmsr);
3054 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3055 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3058 if (!(bmsr & BMSR_LSTATUS)) {
3059 err = tg3_init_5401phy_dsp(tp);
3063 tg3_readphy(tp, MII_BMSR, &bmsr);
3064 for (i = 0; i < 1000; i++) {
3066 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3067 (bmsr & BMSR_LSTATUS)) {
3073 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3074 !(bmsr & BMSR_LSTATUS) &&
3075 tp->link_config.active_speed == SPEED_1000) {
3076 err = tg3_phy_reset(tp);
3078 err = tg3_init_5401phy_dsp(tp);
3083 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3084 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3085 /* 5701 {A0,B0} CRC bug workaround */
3086 tg3_writephy(tp, 0x15, 0x0a75);
3087 tg3_writephy(tp, 0x1c, 0x8c68);
3088 tg3_writephy(tp, 0x1c, 0x8d68);
3089 tg3_writephy(tp, 0x1c, 0x8c68);
3092 /* Clear pending interrupts... */
3093 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3094 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3096 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3097 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3098 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3099 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3102 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3103 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3104 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3105 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3107 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3110 current_link_up = 0;
3111 current_speed = SPEED_INVALID;
3112 current_duplex = DUPLEX_INVALID;
3114 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3117 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3118 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3119 if (!(val & (1 << 10))) {
3121 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3127 for (i = 0; i < 100; i++) {
3128 tg3_readphy(tp, MII_BMSR, &bmsr);
3129 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3130 (bmsr & BMSR_LSTATUS))
3135 if (bmsr & BMSR_LSTATUS) {
3138 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3139 for (i = 0; i < 2000; i++) {
3141 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3146 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3151 for (i = 0; i < 200; i++) {
3152 tg3_readphy(tp, MII_BMCR, &bmcr);
3153 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3155 if (bmcr && bmcr != 0x7fff)
3163 tp->link_config.active_speed = current_speed;
3164 tp->link_config.active_duplex = current_duplex;
3166 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3167 if ((bmcr & BMCR_ANENABLE) &&
3168 tg3_copper_is_advertising_all(tp,
3169 tp->link_config.advertising)) {
3170 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3172 current_link_up = 1;
3175 if (!(bmcr & BMCR_ANENABLE) &&
3176 tp->link_config.speed == current_speed &&
3177 tp->link_config.duplex == current_duplex &&
3178 tp->link_config.flowctrl ==
3179 tp->link_config.active_flowctrl) {
3180 current_link_up = 1;
3184 if (current_link_up == 1 &&
3185 tp->link_config.active_duplex == DUPLEX_FULL)
3186 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3190 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3193 tg3_phy_copper_begin(tp);
3195 tg3_readphy(tp, MII_BMSR, &tmp);
3196 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3197 (tmp & BMSR_LSTATUS))
3198 current_link_up = 1;
3201 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3202 if (current_link_up == 1) {
3203 if (tp->link_config.active_speed == SPEED_100 ||
3204 tp->link_config.active_speed == SPEED_10)
3205 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3207 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3208 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3209 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3211 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3213 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3214 if (tp->link_config.active_duplex == DUPLEX_HALF)
3215 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3218 if (current_link_up == 1 &&
3219 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3220 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3222 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3225 /* ??? Without this setting Netgear GA302T PHY does not
3226 * ??? send/receive packets...
3228 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3229 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3230 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3231 tw32_f(MAC_MI_MODE, tp->mi_mode);
3235 tw32_f(MAC_MODE, tp->mac_mode);
3238 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3239 /* Polled via timer. */
3240 tw32_f(MAC_EVENT, 0);
3242 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3246 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3247 current_link_up == 1 &&
3248 tp->link_config.active_speed == SPEED_1000 &&
3249 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3250 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3253 (MAC_STATUS_SYNC_CHANGED |
3254 MAC_STATUS_CFG_CHANGED));
3257 NIC_SRAM_FIRMWARE_MBOX,
3258 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3261 /* Prevent send BD corruption. */
3262 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3263 u16 oldlnkctl, newlnkctl;
3265 pci_read_config_word(tp->pdev,
3266 tp->pcie_cap + PCI_EXP_LNKCTL,
3268 if (tp->link_config.active_speed == SPEED_100 ||
3269 tp->link_config.active_speed == SPEED_10)
3270 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3272 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3273 if (newlnkctl != oldlnkctl)
3274 pci_write_config_word(tp->pdev,
3275 tp->pcie_cap + PCI_EXP_LNKCTL,
3279 if (current_link_up != netif_carrier_ok(tp->dev)) {
3280 if (current_link_up)
3281 netif_carrier_on(tp->dev);
3283 netif_carrier_off(tp->dev);
3284 tg3_link_report(tp);
3290 struct tg3_fiber_aneginfo {
3292 #define ANEG_STATE_UNKNOWN 0
3293 #define ANEG_STATE_AN_ENABLE 1
3294 #define ANEG_STATE_RESTART_INIT 2
3295 #define ANEG_STATE_RESTART 3
3296 #define ANEG_STATE_DISABLE_LINK_OK 4
3297 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3298 #define ANEG_STATE_ABILITY_DETECT 6
3299 #define ANEG_STATE_ACK_DETECT_INIT 7
3300 #define ANEG_STATE_ACK_DETECT 8
3301 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3302 #define ANEG_STATE_COMPLETE_ACK 10
3303 #define ANEG_STATE_IDLE_DETECT_INIT 11
3304 #define ANEG_STATE_IDLE_DETECT 12
3305 #define ANEG_STATE_LINK_OK 13
3306 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3307 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3310 #define MR_AN_ENABLE 0x00000001
3311 #define MR_RESTART_AN 0x00000002
3312 #define MR_AN_COMPLETE 0x00000004
3313 #define MR_PAGE_RX 0x00000008
3314 #define MR_NP_LOADED 0x00000010
3315 #define MR_TOGGLE_TX 0x00000020
3316 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3317 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3318 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3319 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3320 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3321 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3322 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3323 #define MR_TOGGLE_RX 0x00002000
3324 #define MR_NP_RX 0x00004000
3326 #define MR_LINK_OK 0x80000000
3328 unsigned long link_time, cur_time;
3330 u32 ability_match_cfg;
3331 int ability_match_count;
3333 char ability_match, idle_match, ack_match;
3335 u32 txconfig, rxconfig;
3336 #define ANEG_CFG_NP 0x00000080
3337 #define ANEG_CFG_ACK 0x00000040
3338 #define ANEG_CFG_RF2 0x00000020
3339 #define ANEG_CFG_RF1 0x00000010
3340 #define ANEG_CFG_PS2 0x00000001
3341 #define ANEG_CFG_PS1 0x00008000
3342 #define ANEG_CFG_HD 0x00004000
3343 #define ANEG_CFG_FD 0x00002000
3344 #define ANEG_CFG_INVAL 0x00001f06
3349 #define ANEG_TIMER_ENAB 2
3350 #define ANEG_FAILED -1
3352 #define ANEG_STATE_SETTLE_TIME 10000
3354 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3355 struct tg3_fiber_aneginfo *ap)
3358 unsigned long delta;
3362 if (ap->state == ANEG_STATE_UNKNOWN) {
3366 ap->ability_match_cfg = 0;
3367 ap->ability_match_count = 0;
3368 ap->ability_match = 0;
3374 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3375 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3377 if (rx_cfg_reg != ap->ability_match_cfg) {
3378 ap->ability_match_cfg = rx_cfg_reg;
3379 ap->ability_match = 0;
3380 ap->ability_match_count = 0;
3382 if (++ap->ability_match_count > 1) {
3383 ap->ability_match = 1;
3384 ap->ability_match_cfg = rx_cfg_reg;
3387 if (rx_cfg_reg & ANEG_CFG_ACK)
3395 ap->ability_match_cfg = 0;
3396 ap->ability_match_count = 0;
3397 ap->ability_match = 0;
3403 ap->rxconfig = rx_cfg_reg;
3407 case ANEG_STATE_UNKNOWN:
3408 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3409 ap->state = ANEG_STATE_AN_ENABLE;
3412 case ANEG_STATE_AN_ENABLE:
3413 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3414 if (ap->flags & MR_AN_ENABLE) {
3417 ap->ability_match_cfg = 0;
3418 ap->ability_match_count = 0;
3419 ap->ability_match = 0;
3423 ap->state = ANEG_STATE_RESTART_INIT;
3425 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3429 case ANEG_STATE_RESTART_INIT:
3430 ap->link_time = ap->cur_time;
3431 ap->flags &= ~(MR_NP_LOADED);
3433 tw32(MAC_TX_AUTO_NEG, 0);
3434 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3435 tw32_f(MAC_MODE, tp->mac_mode);
3438 ret = ANEG_TIMER_ENAB;
3439 ap->state = ANEG_STATE_RESTART;
3442 case ANEG_STATE_RESTART:
3443 delta = ap->cur_time - ap->link_time;
3444 if (delta > ANEG_STATE_SETTLE_TIME) {
3445 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3447 ret = ANEG_TIMER_ENAB;
3451 case ANEG_STATE_DISABLE_LINK_OK:
3455 case ANEG_STATE_ABILITY_DETECT_INIT:
3456 ap->flags &= ~(MR_TOGGLE_TX);
3457 ap->txconfig = ANEG_CFG_FD;
3458 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3459 if (flowctrl & ADVERTISE_1000XPAUSE)
3460 ap->txconfig |= ANEG_CFG_PS1;
3461 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3462 ap->txconfig |= ANEG_CFG_PS2;
3463 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3464 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3465 tw32_f(MAC_MODE, tp->mac_mode);
3468 ap->state = ANEG_STATE_ABILITY_DETECT;
3471 case ANEG_STATE_ABILITY_DETECT:
3472 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3473 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3477 case ANEG_STATE_ACK_DETECT_INIT:
3478 ap->txconfig |= ANEG_CFG_ACK;
3479 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3480 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3481 tw32_f(MAC_MODE, tp->mac_mode);
3484 ap->state = ANEG_STATE_ACK_DETECT;
3487 case ANEG_STATE_ACK_DETECT:
3488 if (ap->ack_match != 0) {
3489 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3490 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3491 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3493 ap->state = ANEG_STATE_AN_ENABLE;
3495 } else if (ap->ability_match != 0 &&
3496 ap->rxconfig == 0) {
3497 ap->state = ANEG_STATE_AN_ENABLE;
3501 case ANEG_STATE_COMPLETE_ACK_INIT:
3502 if (ap->rxconfig & ANEG_CFG_INVAL) {
3506 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3507 MR_LP_ADV_HALF_DUPLEX |
3508 MR_LP_ADV_SYM_PAUSE |
3509 MR_LP_ADV_ASYM_PAUSE |
3510 MR_LP_ADV_REMOTE_FAULT1 |
3511 MR_LP_ADV_REMOTE_FAULT2 |
3512 MR_LP_ADV_NEXT_PAGE |
3515 if (ap->rxconfig & ANEG_CFG_FD)
3516 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3517 if (ap->rxconfig & ANEG_CFG_HD)
3518 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3519 if (ap->rxconfig & ANEG_CFG_PS1)
3520 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3521 if (ap->rxconfig & ANEG_CFG_PS2)
3522 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3523 if (ap->rxconfig & ANEG_CFG_RF1)
3524 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3525 if (ap->rxconfig & ANEG_CFG_RF2)
3526 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3527 if (ap->rxconfig & ANEG_CFG_NP)
3528 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3530 ap->link_time = ap->cur_time;
3532 ap->flags ^= (MR_TOGGLE_TX);
3533 if (ap->rxconfig & 0x0008)
3534 ap->flags |= MR_TOGGLE_RX;
3535 if (ap->rxconfig & ANEG_CFG_NP)
3536 ap->flags |= MR_NP_RX;
3537 ap->flags |= MR_PAGE_RX;
3539 ap->state = ANEG_STATE_COMPLETE_ACK;
3540 ret = ANEG_TIMER_ENAB;
3543 case ANEG_STATE_COMPLETE_ACK:
3544 if (ap->ability_match != 0 &&
3545 ap->rxconfig == 0) {
3546 ap->state = ANEG_STATE_AN_ENABLE;
3549 delta = ap->cur_time - ap->link_time;
3550 if (delta > ANEG_STATE_SETTLE_TIME) {
3551 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3552 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3554 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3555 !(ap->flags & MR_NP_RX)) {
3556 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3564 case ANEG_STATE_IDLE_DETECT_INIT:
3565 ap->link_time = ap->cur_time;
3566 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3567 tw32_f(MAC_MODE, tp->mac_mode);
3570 ap->state = ANEG_STATE_IDLE_DETECT;
3571 ret = ANEG_TIMER_ENAB;
3574 case ANEG_STATE_IDLE_DETECT:
3575 if (ap->ability_match != 0 &&
3576 ap->rxconfig == 0) {
3577 ap->state = ANEG_STATE_AN_ENABLE;
3580 delta = ap->cur_time - ap->link_time;
3581 if (delta > ANEG_STATE_SETTLE_TIME) {
3582 /* XXX another gem from the Broadcom driver :( */
3583 ap->state = ANEG_STATE_LINK_OK;
3587 case ANEG_STATE_LINK_OK:
3588 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3592 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3593 /* ??? unimplemented */
3596 case ANEG_STATE_NEXT_PAGE_WAIT:
3597 /* ??? unimplemented */
3608 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3611 struct tg3_fiber_aneginfo aninfo;
3612 int status = ANEG_FAILED;
3616 tw32_f(MAC_TX_AUTO_NEG, 0);
3618 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3619 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3622 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3625 memset(&aninfo, 0, sizeof(aninfo));
3626 aninfo.flags |= MR_AN_ENABLE;
3627 aninfo.state = ANEG_STATE_UNKNOWN;
3628 aninfo.cur_time = 0;
3630 while (++tick < 195000) {
3631 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3632 if (status == ANEG_DONE || status == ANEG_FAILED)
3638 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3639 tw32_f(MAC_MODE, tp->mac_mode);
3642 *txflags = aninfo.txconfig;
3643 *rxflags = aninfo.flags;
3645 if (status == ANEG_DONE &&
3646 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3647 MR_LP_ADV_FULL_DUPLEX)))
3653 static void tg3_init_bcm8002(struct tg3 *tp)
3655 u32 mac_status = tr32(MAC_STATUS);
3658 /* Reset when initting first time or we have a link. */
3659 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3660 !(mac_status & MAC_STATUS_PCS_SYNCED))
3663 /* Set PLL lock range. */
3664 tg3_writephy(tp, 0x16, 0x8007);
3667 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3669 /* Wait for reset to complete. */
3670 /* XXX schedule_timeout() ... */
3671 for (i = 0; i < 500; i++)
3674 /* Config mode; select PMA/Ch 1 regs. */
3675 tg3_writephy(tp, 0x10, 0x8411);
3677 /* Enable auto-lock and comdet, select txclk for tx. */
3678 tg3_writephy(tp, 0x11, 0x0a10);
3680 tg3_writephy(tp, 0x18, 0x00a0);
3681 tg3_writephy(tp, 0x16, 0x41ff);
3683 /* Assert and deassert POR. */
3684 tg3_writephy(tp, 0x13, 0x0400);
3686 tg3_writephy(tp, 0x13, 0x0000);
3688 tg3_writephy(tp, 0x11, 0x0a50);
3690 tg3_writephy(tp, 0x11, 0x0a10);
3692 /* Wait for signal to stabilize */
3693 /* XXX schedule_timeout() ... */
3694 for (i = 0; i < 15000; i++)
3697 /* Deselect the channel register so we can read the PHYID
3700 tg3_writephy(tp, 0x10, 0x8011);
3703 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3706 u32 sg_dig_ctrl, sg_dig_status;
3707 u32 serdes_cfg, expected_sg_dig_ctrl;
3708 int workaround, port_a;
3709 int current_link_up;
3712 expected_sg_dig_ctrl = 0;
3715 current_link_up = 0;
3717 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3718 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3720 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3723 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3724 /* preserve bits 20-23 for voltage regulator */
3725 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3728 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3730 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3731 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3733 u32 val = serdes_cfg;
3739 tw32_f(MAC_SERDES_CFG, val);
3742 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3744 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3745 tg3_setup_flow_control(tp, 0, 0);
3746 current_link_up = 1;
3751 /* Want auto-negotiation. */
3752 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3754 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3755 if (flowctrl & ADVERTISE_1000XPAUSE)
3756 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3757 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3758 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3760 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3761 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3762 tp->serdes_counter &&
3763 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3764 MAC_STATUS_RCVD_CFG)) ==
3765 MAC_STATUS_PCS_SYNCED)) {
3766 tp->serdes_counter--;
3767 current_link_up = 1;
3772 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3773 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3775 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3777 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3778 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3779 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3780 MAC_STATUS_SIGNAL_DET)) {
3781 sg_dig_status = tr32(SG_DIG_STATUS);
3782 mac_status = tr32(MAC_STATUS);
3784 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3785 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3786 u32 local_adv = 0, remote_adv = 0;
3788 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3789 local_adv |= ADVERTISE_1000XPAUSE;
3790 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3791 local_adv |= ADVERTISE_1000XPSE_ASYM;
3793 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3794 remote_adv |= LPA_1000XPAUSE;
3795 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3796 remote_adv |= LPA_1000XPAUSE_ASYM;
3798 tg3_setup_flow_control(tp, local_adv, remote_adv);
3799 current_link_up = 1;
3800 tp->serdes_counter = 0;
3801 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3802 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3803 if (tp->serdes_counter)
3804 tp->serdes_counter--;
3807 u32 val = serdes_cfg;
3814 tw32_f(MAC_SERDES_CFG, val);
3817 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3820 /* Link parallel detection - link is up */
3821 /* only if we have PCS_SYNC and not */
3822 /* receiving config code words */
3823 mac_status = tr32(MAC_STATUS);
3824 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3825 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3826 tg3_setup_flow_control(tp, 0, 0);
3827 current_link_up = 1;
3829 TG3_FLG2_PARALLEL_DETECT;
3830 tp->serdes_counter =
3831 SERDES_PARALLEL_DET_TIMEOUT;
3833 goto restart_autoneg;
3837 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3838 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3842 return current_link_up;
3845 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3847 int current_link_up = 0;
3849 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3852 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3853 u32 txflags, rxflags;
3856 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3857 u32 local_adv = 0, remote_adv = 0;
3859 if (txflags & ANEG_CFG_PS1)
3860 local_adv |= ADVERTISE_1000XPAUSE;
3861 if (txflags & ANEG_CFG_PS2)
3862 local_adv |= ADVERTISE_1000XPSE_ASYM;
3864 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3865 remote_adv |= LPA_1000XPAUSE;
3866 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3867 remote_adv |= LPA_1000XPAUSE_ASYM;
3869 tg3_setup_flow_control(tp, local_adv, remote_adv);
3871 current_link_up = 1;
3873 for (i = 0; i < 30; i++) {
3876 (MAC_STATUS_SYNC_CHANGED |
3877 MAC_STATUS_CFG_CHANGED));
3879 if ((tr32(MAC_STATUS) &
3880 (MAC_STATUS_SYNC_CHANGED |
3881 MAC_STATUS_CFG_CHANGED)) == 0)
3885 mac_status = tr32(MAC_STATUS);
3886 if (current_link_up == 0 &&
3887 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3888 !(mac_status & MAC_STATUS_RCVD_CFG))
3889 current_link_up = 1;
3891 tg3_setup_flow_control(tp, 0, 0);
3893 /* Forcing 1000FD link up. */
3894 current_link_up = 1;
3896 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3899 tw32_f(MAC_MODE, tp->mac_mode);
3904 return current_link_up;
3907 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3910 u16 orig_active_speed;
3911 u8 orig_active_duplex;
3913 int current_link_up;
3916 orig_pause_cfg = tp->link_config.active_flowctrl;
3917 orig_active_speed = tp->link_config.active_speed;
3918 orig_active_duplex = tp->link_config.active_duplex;
3920 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3921 netif_carrier_ok(tp->dev) &&
3922 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3923 mac_status = tr32(MAC_STATUS);
3924 mac_status &= (MAC_STATUS_PCS_SYNCED |
3925 MAC_STATUS_SIGNAL_DET |
3926 MAC_STATUS_CFG_CHANGED |
3927 MAC_STATUS_RCVD_CFG);
3928 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3929 MAC_STATUS_SIGNAL_DET)) {
3930 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3931 MAC_STATUS_CFG_CHANGED));
3936 tw32_f(MAC_TX_AUTO_NEG, 0);
3938 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3939 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3940 tw32_f(MAC_MODE, tp->mac_mode);
3943 if (tp->phy_id == PHY_ID_BCM8002)
3944 tg3_init_bcm8002(tp);
3946 /* Enable link change event even when serdes polling. */
3947 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3950 current_link_up = 0;
3951 mac_status = tr32(MAC_STATUS);
3953 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3954 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3956 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3958 tp->napi[0].hw_status->status =
3959 (SD_STATUS_UPDATED |
3960 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3962 for (i = 0; i < 100; i++) {
3963 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3964 MAC_STATUS_CFG_CHANGED));
3966 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3967 MAC_STATUS_CFG_CHANGED |
3968 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3972 mac_status = tr32(MAC_STATUS);
3973 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3974 current_link_up = 0;
3975 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3976 tp->serdes_counter == 0) {
3977 tw32_f(MAC_MODE, (tp->mac_mode |
3978 MAC_MODE_SEND_CONFIGS));
3980 tw32_f(MAC_MODE, tp->mac_mode);
3984 if (current_link_up == 1) {
3985 tp->link_config.active_speed = SPEED_1000;
3986 tp->link_config.active_duplex = DUPLEX_FULL;
3987 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3988 LED_CTRL_LNKLED_OVERRIDE |
3989 LED_CTRL_1000MBPS_ON));
3991 tp->link_config.active_speed = SPEED_INVALID;
3992 tp->link_config.active_duplex = DUPLEX_INVALID;
3993 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3994 LED_CTRL_LNKLED_OVERRIDE |
3995 LED_CTRL_TRAFFIC_OVERRIDE));
3998 if (current_link_up != netif_carrier_ok(tp->dev)) {
3999 if (current_link_up)
4000 netif_carrier_on(tp->dev);
4002 netif_carrier_off(tp->dev);
4003 tg3_link_report(tp);
4005 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4006 if (orig_pause_cfg != now_pause_cfg ||
4007 orig_active_speed != tp->link_config.active_speed ||
4008 orig_active_duplex != tp->link_config.active_duplex)
4009 tg3_link_report(tp);
4015 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4017 int current_link_up, err = 0;
4021 u32 local_adv, remote_adv;
4023 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4024 tw32_f(MAC_MODE, tp->mac_mode);
4030 (MAC_STATUS_SYNC_CHANGED |
4031 MAC_STATUS_CFG_CHANGED |
4032 MAC_STATUS_MI_COMPLETION |
4033 MAC_STATUS_LNKSTATE_CHANGED));
4039 current_link_up = 0;
4040 current_speed = SPEED_INVALID;
4041 current_duplex = DUPLEX_INVALID;
4043 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4044 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4045 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4046 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4047 bmsr |= BMSR_LSTATUS;
4049 bmsr &= ~BMSR_LSTATUS;
4052 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4054 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4055 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4056 /* do nothing, just check for link up at the end */
4057 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4060 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4061 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4062 ADVERTISE_1000XPAUSE |
4063 ADVERTISE_1000XPSE_ASYM |
4066 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4068 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4069 new_adv |= ADVERTISE_1000XHALF;
4070 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4071 new_adv |= ADVERTISE_1000XFULL;
4073 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4074 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4075 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4076 tg3_writephy(tp, MII_BMCR, bmcr);
4078 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4079 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4080 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4087 bmcr &= ~BMCR_SPEED1000;
4088 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4090 if (tp->link_config.duplex == DUPLEX_FULL)
4091 new_bmcr |= BMCR_FULLDPLX;
4093 if (new_bmcr != bmcr) {
4094 /* BMCR_SPEED1000 is a reserved bit that needs
4095 * to be set on write.
4097 new_bmcr |= BMCR_SPEED1000;
4099 /* Force a linkdown */
4100 if (netif_carrier_ok(tp->dev)) {
4103 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4104 adv &= ~(ADVERTISE_1000XFULL |
4105 ADVERTISE_1000XHALF |
4107 tg3_writephy(tp, MII_ADVERTISE, adv);
4108 tg3_writephy(tp, MII_BMCR, bmcr |
4112 netif_carrier_off(tp->dev);
4114 tg3_writephy(tp, MII_BMCR, new_bmcr);
4116 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4117 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4118 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4120 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4121 bmsr |= BMSR_LSTATUS;
4123 bmsr &= ~BMSR_LSTATUS;
4125 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4129 if (bmsr & BMSR_LSTATUS) {
4130 current_speed = SPEED_1000;
4131 current_link_up = 1;
4132 if (bmcr & BMCR_FULLDPLX)
4133 current_duplex = DUPLEX_FULL;
4135 current_duplex = DUPLEX_HALF;
4140 if (bmcr & BMCR_ANENABLE) {
4143 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4144 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4145 common = local_adv & remote_adv;
4146 if (common & (ADVERTISE_1000XHALF |
4147 ADVERTISE_1000XFULL)) {
4148 if (common & ADVERTISE_1000XFULL)
4149 current_duplex = DUPLEX_FULL;
4151 current_duplex = DUPLEX_HALF;
4154 current_link_up = 0;
4158 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4159 tg3_setup_flow_control(tp, local_adv, remote_adv);
4161 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4162 if (tp->link_config.active_duplex == DUPLEX_HALF)
4163 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4165 tw32_f(MAC_MODE, tp->mac_mode);
4168 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4170 tp->link_config.active_speed = current_speed;
4171 tp->link_config.active_duplex = current_duplex;
4173 if (current_link_up != netif_carrier_ok(tp->dev)) {
4174 if (current_link_up)
4175 netif_carrier_on(tp->dev);
4177 netif_carrier_off(tp->dev);
4178 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4180 tg3_link_report(tp);
4185 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4187 if (tp->serdes_counter) {
4188 /* Give autoneg time to complete. */
4189 tp->serdes_counter--;
4192 if (!netif_carrier_ok(tp->dev) &&
4193 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4196 tg3_readphy(tp, MII_BMCR, &bmcr);
4197 if (bmcr & BMCR_ANENABLE) {
4200 /* Select shadow register 0x1f */
4201 tg3_writephy(tp, 0x1c, 0x7c00);
4202 tg3_readphy(tp, 0x1c, &phy1);
4204 /* Select expansion interrupt status register */
4205 tg3_writephy(tp, 0x17, 0x0f01);
4206 tg3_readphy(tp, 0x15, &phy2);
4207 tg3_readphy(tp, 0x15, &phy2);
4209 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4210 /* We have signal detect and not receiving
4211 * config code words, link is up by parallel
4215 bmcr &= ~BMCR_ANENABLE;
4216 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4217 tg3_writephy(tp, MII_BMCR, bmcr);
4218 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4222 else if (netif_carrier_ok(tp->dev) &&
4223 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4224 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4227 /* Select expansion interrupt status register */
4228 tg3_writephy(tp, 0x17, 0x0f01);
4229 tg3_readphy(tp, 0x15, &phy2);
4233 /* Config code words received, turn on autoneg. */
4234 tg3_readphy(tp, MII_BMCR, &bmcr);
4235 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4237 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4243 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4247 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4248 err = tg3_setup_fiber_phy(tp, force_reset);
4249 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4250 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4252 err = tg3_setup_copper_phy(tp, force_reset);
4255 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4258 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4259 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4261 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4266 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4267 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4268 tw32(GRC_MISC_CFG, val);
4271 if (tp->link_config.active_speed == SPEED_1000 &&
4272 tp->link_config.active_duplex == DUPLEX_HALF)
4273 tw32(MAC_TX_LENGTHS,
4274 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4275 (6 << TX_LENGTHS_IPG_SHIFT) |
4276 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4278 tw32(MAC_TX_LENGTHS,
4279 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4280 (6 << TX_LENGTHS_IPG_SHIFT) |
4281 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4283 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4284 if (netif_carrier_ok(tp->dev)) {
4285 tw32(HOSTCC_STAT_COAL_TICKS,
4286 tp->coal.stats_block_coalesce_usecs);
4288 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4292 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4293 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4294 if (!netif_carrier_ok(tp->dev))
4295 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4298 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4299 tw32(PCIE_PWR_MGMT_THRESH, val);
4305 /* This is called whenever we suspect that the system chipset is re-
4306 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4307 * is bogus tx completions. We try to recover by setting the
4308 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4311 static void tg3_tx_recover(struct tg3 *tp)
4313 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4314 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4316 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4317 "mapped I/O cycles to the network device, attempting to "
4318 "recover. Please report the problem to the driver maintainer "
4319 "and include system chipset information.\n", tp->dev->name);
4321 spin_lock(&tp->lock);
4322 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4323 spin_unlock(&tp->lock);
4326 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4329 return tnapi->tx_pending -
4330 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4333 /* Tigon3 never reports partial packet sends. So we do not
4334 * need special logic to handle SKBs that have not had all
4335 * of their frags sent yet, like SunGEM does.
4337 static void tg3_tx(struct tg3_napi *tnapi)
4339 struct tg3 *tp = tnapi->tp;
4340 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4341 u32 sw_idx = tnapi->tx_cons;
4342 struct netdev_queue *txq;
4343 int index = tnapi - tp->napi;
4345 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
4348 txq = netdev_get_tx_queue(tp->dev, index);
4350 while (sw_idx != hw_idx) {
4351 struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4352 struct sk_buff *skb = ri->skb;
4355 if (unlikely(skb == NULL)) {
4360 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4364 sw_idx = NEXT_TX(sw_idx);
4366 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4367 ri = &tnapi->tx_buffers[sw_idx];
4368 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4370 sw_idx = NEXT_TX(sw_idx);
4375 if (unlikely(tx_bug)) {
4381 tnapi->tx_cons = sw_idx;
4383 /* Need to make the tx_cons update visible to tg3_start_xmit()
4384 * before checking for netif_queue_stopped(). Without the
4385 * memory barrier, there is a small possibility that tg3_start_xmit()
4386 * will miss it and cause the queue to be stopped forever.
4390 if (unlikely(netif_tx_queue_stopped(txq) &&
4391 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4392 __netif_tx_lock(txq, smp_processor_id());
4393 if (netif_tx_queue_stopped(txq) &&
4394 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4395 netif_tx_wake_queue(txq);
4396 __netif_tx_unlock(txq);
4400 /* Returns size of skb allocated or < 0 on error.
4402 * We only need to fill in the address because the other members
4403 * of the RX descriptor are invariant, see tg3_init_rings.
4405 * Note the purposeful assymetry of cpu vs. chip accesses. For
4406 * posting buffers we only dirty the first cache line of the RX
4407 * descriptor (containing the address). Whereas for the RX status
4408 * buffers the cpu only reads the last cacheline of the RX descriptor
4409 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4411 static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4412 int src_idx, u32 dest_idx_unmasked)
4414 struct tg3 *tp = tnapi->tp;
4415 struct tg3_rx_buffer_desc *desc;
4416 struct ring_info *map, *src_map;
4417 struct sk_buff *skb;
4419 int skb_size, dest_idx;
4420 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4423 switch (opaque_key) {
4424 case RXD_OPAQUE_RING_STD:
4425 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4426 desc = &tpr->rx_std[dest_idx];
4427 map = &tpr->rx_std_buffers[dest_idx];
4429 src_map = &tpr->rx_std_buffers[src_idx];
4430 skb_size = tp->rx_pkt_map_sz;
4433 case RXD_OPAQUE_RING_JUMBO:
4434 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4435 desc = &tpr->rx_jmb[dest_idx].std;
4436 map = &tpr->rx_jmb_buffers[dest_idx];
4438 src_map = &tpr->rx_jmb_buffers[src_idx];
4439 skb_size = TG3_RX_JMB_MAP_SZ;
4446 /* Do not overwrite any of the map or rp information
4447 * until we are sure we can commit to a new buffer.
4449 * Callers depend upon this behavior and assume that
4450 * we leave everything unchanged if we fail.
4452 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4456 skb_reserve(skb, tp->rx_offset);
4458 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4459 PCI_DMA_FROMDEVICE);
4460 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4466 pci_unmap_addr_set(map, mapping, mapping);
4468 if (src_map != NULL)
4469 src_map->skb = NULL;
4471 desc->addr_hi = ((u64)mapping >> 32);
4472 desc->addr_lo = ((u64)mapping & 0xffffffff);
4477 /* We only need to move over in the address because the other
4478 * members of the RX descriptor are invariant. See notes above
4479 * tg3_alloc_rx_skb for full details.
4481 static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
4482 int src_idx, u32 dest_idx_unmasked)
4484 struct tg3 *tp = tnapi->tp;
4485 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4486 struct ring_info *src_map, *dest_map;
4488 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4490 switch (opaque_key) {
4491 case RXD_OPAQUE_RING_STD:
4492 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4493 dest_desc = &tpr->rx_std[dest_idx];
4494 dest_map = &tpr->rx_std_buffers[dest_idx];
4495 src_desc = &tpr->rx_std[src_idx];
4496 src_map = &tpr->rx_std_buffers[src_idx];
4499 case RXD_OPAQUE_RING_JUMBO:
4500 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4501 dest_desc = &tpr->rx_jmb[dest_idx].std;
4502 dest_map = &tpr->rx_jmb_buffers[dest_idx];
4503 src_desc = &tpr->rx_jmb[src_idx].std;
4504 src_map = &tpr->rx_jmb_buffers[src_idx];
4511 dest_map->skb = src_map->skb;
4512 pci_unmap_addr_set(dest_map, mapping,
4513 pci_unmap_addr(src_map, mapping));
4514 dest_desc->addr_hi = src_desc->addr_hi;
4515 dest_desc->addr_lo = src_desc->addr_lo;
4517 src_map->skb = NULL;
4520 /* The RX ring scheme is composed of multiple rings which post fresh
4521 * buffers to the chip, and one special ring the chip uses to report
4522 * status back to the host.
4524 * The special ring reports the status of received packets to the
4525 * host. The chip does not write into the original descriptor the
4526 * RX buffer was obtained from. The chip simply takes the original
4527 * descriptor as provided by the host, updates the status and length
4528 * field, then writes this into the next status ring entry.
4530 * Each ring the host uses to post buffers to the chip is described
4531 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4532 * it is first placed into the on-chip ram. When the packet's length
4533 * is known, it walks down the TG3_BDINFO entries to select the ring.
4534 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4535 * which is within the range of the new packet's length is chosen.
4537 * The "separate ring for rx status" scheme may sound queer, but it makes
4538 * sense from a cache coherency perspective. If only the host writes
4539 * to the buffer post rings, and only the chip writes to the rx status
4540 * rings, then cache lines never move beyond shared-modified state.
4541 * If both the host and chip were to write into the same ring, cache line
4542 * eviction could occur since both entities want it in an exclusive state.
4544 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4546 struct tg3 *tp = tnapi->tp;
4547 u32 work_mask, rx_std_posted = 0;
4548 u32 sw_idx = tnapi->rx_rcb_ptr;
4551 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4553 hw_idx = *(tnapi->rx_rcb_prod_idx);
4555 * We need to order the read of hw_idx and the read of
4556 * the opaque cookie.
4561 while (sw_idx != hw_idx && budget > 0) {
4562 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4564 struct sk_buff *skb;
4565 dma_addr_t dma_addr;
4566 u32 opaque_key, desc_idx, *post_ptr;
4568 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4569 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4570 if (opaque_key == RXD_OPAQUE_RING_STD) {
4571 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx];
4572 dma_addr = pci_unmap_addr(ri, mapping);
4574 post_ptr = &tpr->rx_std_ptr;
4576 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4577 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx];
4578 dma_addr = pci_unmap_addr(ri, mapping);
4580 post_ptr = &tpr->rx_jmb_ptr;
4582 goto next_pkt_nopost;
4584 work_mask |= opaque_key;
4586 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4587 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4589 tg3_recycle_rx(tnapi, opaque_key,
4590 desc_idx, *post_ptr);
4592 /* Other statistics kept track of by card. */
4593 tp->net_stats.rx_dropped++;
4597 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4600 if (len > RX_COPY_THRESHOLD
4601 && tp->rx_offset == NET_IP_ALIGN
4602 /* rx_offset will likely not equal NET_IP_ALIGN
4603 * if this is a 5701 card running in PCI-X mode
4604 * [see tg3_get_invariants()]
4609 skb_size = tg3_alloc_rx_skb(tnapi, opaque_key,
4610 desc_idx, *post_ptr);
4614 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4615 PCI_DMA_FROMDEVICE);
4619 struct sk_buff *copy_skb;
4621 tg3_recycle_rx(tnapi, opaque_key,
4622 desc_idx, *post_ptr);
4624 copy_skb = netdev_alloc_skb(tp->dev,
4625 len + TG3_RAW_IP_ALIGN);
4626 if (copy_skb == NULL)
4627 goto drop_it_no_recycle;
4629 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4630 skb_put(copy_skb, len);
4631 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4632 skb_copy_from_linear_data(skb, copy_skb->data, len);
4633 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4635 /* We'll reuse the original ring buffer. */
4639 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4640 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4641 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4642 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4643 skb->ip_summed = CHECKSUM_UNNECESSARY;
4645 skb->ip_summed = CHECKSUM_NONE;
4647 skb->protocol = eth_type_trans(skb, tp->dev);
4649 if (len > (tp->dev->mtu + ETH_HLEN) &&
4650 skb->protocol != htons(ETH_P_8021Q)) {
4655 #if TG3_VLAN_TAG_USED
4656 if (tp->vlgrp != NULL &&
4657 desc->type_flags & RXD_FLAG_VLAN) {
4658 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4659 desc->err_vlan & RXD_VLAN_MASK, skb);
4662 napi_gro_receive(&tnapi->napi, skb);
4670 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4671 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4673 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4674 TG3_64BIT_REG_LOW, idx);
4675 work_mask &= ~RXD_OPAQUE_RING_STD;
4680 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4682 /* Refresh hw_idx to see if there is new work */
4683 if (sw_idx == hw_idx) {
4684 hw_idx = *(tnapi->rx_rcb_prod_idx);
4689 /* ACK the status ring. */
4690 tnapi->rx_rcb_ptr = sw_idx;
4691 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4693 /* Refill RX ring(s). */
4694 if (work_mask & RXD_OPAQUE_RING_STD) {
4695 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE;
4696 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4699 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4700 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE;
4701 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4709 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4711 struct tg3 *tp = tnapi->tp;
4712 struct tg3_hw_status *sblk = tnapi->hw_status;
4714 /* handle link change and other phy events */
4715 if (!(tp->tg3_flags &
4716 (TG3_FLAG_USE_LINKCHG_REG |
4717 TG3_FLAG_POLL_SERDES))) {
4718 if (sblk->status & SD_STATUS_LINK_CHG) {
4719 sblk->status = SD_STATUS_UPDATED |
4720 (sblk->status & ~SD_STATUS_LINK_CHG);
4721 spin_lock(&tp->lock);
4722 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4724 (MAC_STATUS_SYNC_CHANGED |
4725 MAC_STATUS_CFG_CHANGED |
4726 MAC_STATUS_MI_COMPLETION |
4727 MAC_STATUS_LNKSTATE_CHANGED));
4730 tg3_setup_phy(tp, 0);
4731 spin_unlock(&tp->lock);
4735 /* run TX completion thread */
4736 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4738 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4742 /* run RX thread, within the bounds set by NAPI.
4743 * All RX "locking" is done by ensuring outside
4744 * code synchronizes with tg3->napi.poll()
4746 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4747 work_done += tg3_rx(tnapi, budget - work_done);
4752 static int tg3_poll(struct napi_struct *napi, int budget)
4754 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4755 struct tg3 *tp = tnapi->tp;
4757 struct tg3_hw_status *sblk = tnapi->hw_status;
4760 work_done = tg3_poll_work(tnapi, work_done, budget);
4762 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4765 if (unlikely(work_done >= budget))
4768 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4769 /* tp->last_tag is used in tg3_int_reenable() below
4770 * to tell the hw how much work has been processed,
4771 * so we must read it before checking for more work.
4773 tnapi->last_tag = sblk->status_tag;
4774 tnapi->last_irq_tag = tnapi->last_tag;
4777 sblk->status &= ~SD_STATUS_UPDATED;
4779 if (likely(!tg3_has_work(tnapi))) {
4780 napi_complete(napi);
4781 tg3_int_reenable(tnapi);
4789 /* work_done is guaranteed to be less than budget. */
4790 napi_complete(napi);
4791 schedule_work(&tp->reset_task);
4795 static void tg3_irq_quiesce(struct tg3 *tp)
4799 BUG_ON(tp->irq_sync);
4804 for (i = 0; i < tp->irq_cnt; i++)
4805 synchronize_irq(tp->napi[i].irq_vec);
4808 static inline int tg3_irq_sync(struct tg3 *tp)
4810 return tp->irq_sync;
4813 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4814 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4815 * with as well. Most of the time, this is not necessary except when
4816 * shutting down the device.
4818 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4820 spin_lock_bh(&tp->lock);
4822 tg3_irq_quiesce(tp);
4825 static inline void tg3_full_unlock(struct tg3 *tp)
4827 spin_unlock_bh(&tp->lock);
4830 /* One-shot MSI handler - Chip automatically disables interrupt
4831 * after sending MSI so driver doesn't have to do it.
4833 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4835 struct tg3_napi *tnapi = dev_id;
4836 struct tg3 *tp = tnapi->tp;
4838 prefetch(tnapi->hw_status);
4840 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4842 if (likely(!tg3_irq_sync(tp)))
4843 napi_schedule(&tnapi->napi);
4848 /* MSI ISR - No need to check for interrupt sharing and no need to
4849 * flush status block and interrupt mailbox. PCI ordering rules
4850 * guarantee that MSI will arrive after the status block.
4852 static irqreturn_t tg3_msi(int irq, void *dev_id)
4854 struct tg3_napi *tnapi = dev_id;
4855 struct tg3 *tp = tnapi->tp;
4857 prefetch(tnapi->hw_status);
4859 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4861 * Writing any value to intr-mbox-0 clears PCI INTA# and
4862 * chip-internal interrupt pending events.
4863 * Writing non-zero to intr-mbox-0 additional tells the
4864 * NIC to stop sending us irqs, engaging "in-intr-handler"
4867 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4868 if (likely(!tg3_irq_sync(tp)))
4869 napi_schedule(&tnapi->napi);
4871 return IRQ_RETVAL(1);
4874 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4876 struct tg3_napi *tnapi = dev_id;
4877 struct tg3 *tp = tnapi->tp;
4878 struct tg3_hw_status *sblk = tnapi->hw_status;
4879 unsigned int handled = 1;
4881 /* In INTx mode, it is possible for the interrupt to arrive at
4882 * the CPU before the status block posted prior to the interrupt.
4883 * Reading the PCI State register will confirm whether the
4884 * interrupt is ours and will flush the status block.
4886 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4887 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4888 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4895 * Writing any value to intr-mbox-0 clears PCI INTA# and
4896 * chip-internal interrupt pending events.
4897 * Writing non-zero to intr-mbox-0 additional tells the
4898 * NIC to stop sending us irqs, engaging "in-intr-handler"
4901 * Flush the mailbox to de-assert the IRQ immediately to prevent
4902 * spurious interrupts. The flush impacts performance but
4903 * excessive spurious interrupts can be worse in some cases.
4905 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4906 if (tg3_irq_sync(tp))
4908 sblk->status &= ~SD_STATUS_UPDATED;
4909 if (likely(tg3_has_work(tnapi))) {
4910 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4911 napi_schedule(&tnapi->napi);
4913 /* No work, shared interrupt perhaps? re-enable
4914 * interrupts, and flush that PCI write
4916 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4920 return IRQ_RETVAL(handled);
4923 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4925 struct tg3_napi *tnapi = dev_id;
4926 struct tg3 *tp = tnapi->tp;
4927 struct tg3_hw_status *sblk = tnapi->hw_status;
4928 unsigned int handled = 1;
4930 /* In INTx mode, it is possible for the interrupt to arrive at
4931 * the CPU before the status block posted prior to the interrupt.
4932 * Reading the PCI State register will confirm whether the
4933 * interrupt is ours and will flush the status block.
4935 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
4936 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4937 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4944 * writing any value to intr-mbox-0 clears PCI INTA# and
4945 * chip-internal interrupt pending events.
4946 * writing non-zero to intr-mbox-0 additional tells the
4947 * NIC to stop sending us irqs, engaging "in-intr-handler"
4950 * Flush the mailbox to de-assert the IRQ immediately to prevent
4951 * spurious interrupts. The flush impacts performance but
4952 * excessive spurious interrupts can be worse in some cases.
4954 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4957 * In a shared interrupt configuration, sometimes other devices'
4958 * interrupts will scream. We record the current status tag here
4959 * so that the above check can report that the screaming interrupts
4960 * are unhandled. Eventually they will be silenced.
4962 tnapi->last_irq_tag = sblk->status_tag;
4964 if (tg3_irq_sync(tp))
4967 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4969 napi_schedule(&tnapi->napi);
4972 return IRQ_RETVAL(handled);
4975 /* ISR for interrupt test */
4976 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4978 struct tg3_napi *tnapi = dev_id;
4979 struct tg3 *tp = tnapi->tp;
4980 struct tg3_hw_status *sblk = tnapi->hw_status;
4982 if ((sblk->status & SD_STATUS_UPDATED) ||
4983 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4984 tg3_disable_ints(tp);
4985 return IRQ_RETVAL(1);
4987 return IRQ_RETVAL(0);
4990 static int tg3_init_hw(struct tg3 *, int);
4991 static int tg3_halt(struct tg3 *, int, int);
4993 /* Restart hardware after configuration changes, self-test, etc.
4994 * Invoked with tp->lock held.
4996 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4997 __releases(tp->lock)
4998 __acquires(tp->lock)
5002 err = tg3_init_hw(tp, reset_phy);
5004 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
5005 "aborting.\n", tp->dev->name);
5006 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5007 tg3_full_unlock(tp);
5008 del_timer_sync(&tp->timer);
5010 tg3_napi_enable(tp);
5012 tg3_full_lock(tp, 0);
5017 #ifdef CONFIG_NET_POLL_CONTROLLER
5018 static void tg3_poll_controller(struct net_device *dev)
5021 struct tg3 *tp = netdev_priv(dev);
5023 for (i = 0; i < tp->irq_cnt; i++)
5024 tg3_interrupt(tp->napi[i].irq_vec, dev);
5028 static void tg3_reset_task(struct work_struct *work)
5030 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5032 unsigned int restart_timer;
5034 tg3_full_lock(tp, 0);
5036 if (!netif_running(tp->dev)) {
5037 tg3_full_unlock(tp);
5041 tg3_full_unlock(tp);
5047 tg3_full_lock(tp, 1);
5049 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5050 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5052 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5053 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5054 tp->write32_rx_mbox = tg3_write_flush_reg32;
5055 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5056 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5059 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5060 err = tg3_init_hw(tp, 1);
5064 tg3_netif_start(tp);
5067 mod_timer(&tp->timer, jiffies + 1);
5070 tg3_full_unlock(tp);
5076 static void tg3_dump_short_state(struct tg3 *tp)
5078 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5079 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5080 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5081 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5084 static void tg3_tx_timeout(struct net_device *dev)
5086 struct tg3 *tp = netdev_priv(dev);
5088 if (netif_msg_tx_err(tp)) {
5089 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5091 tg3_dump_short_state(tp);
5094 schedule_work(&tp->reset_task);
5097 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5098 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5100 u32 base = (u32) mapping & 0xffffffff;
5102 return ((base > 0xffffdcc0) &&
5103 (base + len + 8 < base));
5106 /* Test for DMA addresses > 40-bit */
5107 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5110 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5111 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5112 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5119 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5121 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5122 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5123 struct sk_buff *skb, u32 last_plus_one,
5124 u32 *start, u32 base_flags, u32 mss)
5126 struct tg3 *tp = tnapi->tp;
5127 struct sk_buff *new_skb;
5128 dma_addr_t new_addr = 0;
5132 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5133 new_skb = skb_copy(skb, GFP_ATOMIC);
5135 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5137 new_skb = skb_copy_expand(skb,
5138 skb_headroom(skb) + more_headroom,
5139 skb_tailroom(skb), GFP_ATOMIC);
5145 /* New SKB is guaranteed to be linear. */
5147 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
5148 new_addr = skb_shinfo(new_skb)->dma_head;
5150 /* Make sure new skb does not cross any 4G boundaries.
5151 * Drop the packet if it does.
5153 if (ret || ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5154 tg3_4g_overflow_test(new_addr, new_skb->len))) {
5156 skb_dma_unmap(&tp->pdev->dev, new_skb,
5159 dev_kfree_skb(new_skb);
5162 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5163 base_flags, 1 | (mss << 1));
5164 *start = NEXT_TX(entry);
5168 /* Now clean up the sw ring entries. */
5170 while (entry != last_plus_one) {
5172 tnapi->tx_buffers[entry].skb = new_skb;
5174 tnapi->tx_buffers[entry].skb = NULL;
5175 entry = NEXT_TX(entry);
5179 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5185 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5186 dma_addr_t mapping, int len, u32 flags,
5189 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5190 int is_end = (mss_and_is_end & 0x1);
5191 u32 mss = (mss_and_is_end >> 1);
5195 flags |= TXD_FLAG_END;
5196 if (flags & TXD_FLAG_VLAN) {
5197 vlan_tag = flags >> 16;
5200 vlan_tag |= (mss << TXD_MSS_SHIFT);
5202 txd->addr_hi = ((u64) mapping >> 32);
5203 txd->addr_lo = ((u64) mapping & 0xffffffff);
5204 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5205 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5208 /* hard_start_xmit for devices that don't have any bugs and
5209 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5211 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5212 struct net_device *dev)
5214 struct tg3 *tp = netdev_priv(dev);
5215 u32 len, entry, base_flags, mss;
5216 struct skb_shared_info *sp;
5218 struct tg3_napi *tnapi;
5219 struct netdev_queue *txq;
5221 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5222 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5223 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5226 /* We are running in BH disabled context with netif_tx_lock
5227 * and TX reclaim runs via tp->napi.poll inside of a software
5228 * interrupt. Furthermore, IRQ processing runs lockless so we have
5229 * no IRQ context deadlocks to worry about either. Rejoice!
5231 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5232 if (!netif_tx_queue_stopped(txq)) {
5233 netif_tx_stop_queue(txq);
5235 /* This is a hard error, log it. */
5236 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5237 "queue awake!\n", dev->name);
5239 return NETDEV_TX_BUSY;
5242 entry = tnapi->tx_prod;
5245 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5246 int tcp_opt_len, ip_tcp_len;
5249 if (skb_header_cloned(skb) &&
5250 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5255 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5256 hdrlen = skb_headlen(skb) - ETH_HLEN;
5258 struct iphdr *iph = ip_hdr(skb);
5260 tcp_opt_len = tcp_optlen(skb);
5261 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5264 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5265 hdrlen = ip_tcp_len + tcp_opt_len;
5268 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5269 mss |= (hdrlen & 0xc) << 12;
5271 base_flags |= 0x00000010;
5272 base_flags |= (hdrlen & 0x3e0) << 5;
5276 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5277 TXD_FLAG_CPU_POST_DMA);
5279 tcp_hdr(skb)->check = 0;
5282 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5283 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5284 #if TG3_VLAN_TAG_USED
5285 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5286 base_flags |= (TXD_FLAG_VLAN |
5287 (vlan_tx_tag_get(skb) << 16));
5290 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5295 sp = skb_shinfo(skb);
5297 mapping = sp->dma_head;
5299 tnapi->tx_buffers[entry].skb = skb;
5301 len = skb_headlen(skb);
5303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
5304 !mss && skb->len > ETH_DATA_LEN)
5305 base_flags |= TXD_FLAG_JMB_PKT;
5307 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5308 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5310 entry = NEXT_TX(entry);
5312 /* Now loop through additional data fragments, and queue them. */
5313 if (skb_shinfo(skb)->nr_frags > 0) {
5314 unsigned int i, last;
5316 last = skb_shinfo(skb)->nr_frags - 1;
5317 for (i = 0; i <= last; i++) {
5318 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5321 mapping = sp->dma_maps[i];
5322 tnapi->tx_buffers[entry].skb = NULL;
5324 tg3_set_txd(tnapi, entry, mapping, len,
5325 base_flags, (i == last) | (mss << 1));
5327 entry = NEXT_TX(entry);
5331 /* Packets are ready, update Tx producer idx local and on card. */
5332 tw32_tx_mbox(tnapi->prodmbox, entry);
5334 tnapi->tx_prod = entry;
5335 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5336 netif_tx_stop_queue(txq);
5337 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5338 netif_tx_wake_queue(txq);
5344 return NETDEV_TX_OK;
5347 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5348 struct net_device *);
5350 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5351 * TSO header is greater than 80 bytes.
5353 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5355 struct sk_buff *segs, *nskb;
5356 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5358 /* Estimate the number of fragments in the worst case */
5359 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5360 netif_stop_queue(tp->dev);
5361 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5362 return NETDEV_TX_BUSY;
5364 netif_wake_queue(tp->dev);
5367 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5369 goto tg3_tso_bug_end;
5375 tg3_start_xmit_dma_bug(nskb, tp->dev);
5381 return NETDEV_TX_OK;
5384 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5385 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5387 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5388 struct net_device *dev)
5390 struct tg3 *tp = netdev_priv(dev);
5391 u32 len, entry, base_flags, mss;
5392 struct skb_shared_info *sp;
5393 int would_hit_hwbug;
5395 struct tg3_napi *tnapi;
5396 struct netdev_queue *txq;
5398 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5399 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5400 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5403 /* We are running in BH disabled context with netif_tx_lock
5404 * and TX reclaim runs via tp->napi.poll inside of a software
5405 * interrupt. Furthermore, IRQ processing runs lockless so we have
5406 * no IRQ context deadlocks to worry about either. Rejoice!
5408 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5409 if (!netif_tx_queue_stopped(txq)) {
5410 netif_tx_stop_queue(txq);
5412 /* This is a hard error, log it. */
5413 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5414 "queue awake!\n", dev->name);
5416 return NETDEV_TX_BUSY;
5419 entry = tnapi->tx_prod;
5421 if (skb->ip_summed == CHECKSUM_PARTIAL)
5422 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5424 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5426 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5428 if (skb_header_cloned(skb) &&
5429 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5434 tcp_opt_len = tcp_optlen(skb);
5435 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5437 hdr_len = ip_tcp_len + tcp_opt_len;
5438 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5439 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5440 return (tg3_tso_bug(tp, skb));
5442 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5443 TXD_FLAG_CPU_POST_DMA);
5447 iph->tot_len = htons(mss + hdr_len);
5448 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5449 tcp_hdr(skb)->check = 0;
5450 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5452 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5457 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5458 mss |= (hdr_len & 0xc) << 12;
5460 base_flags |= 0x00000010;
5461 base_flags |= (hdr_len & 0x3e0) << 5;
5462 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5463 mss |= hdr_len << 9;
5464 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5465 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5466 if (tcp_opt_len || iph->ihl > 5) {
5469 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5470 mss |= (tsflags << 11);
5473 if (tcp_opt_len || iph->ihl > 5) {
5476 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5477 base_flags |= tsflags << 12;
5481 #if TG3_VLAN_TAG_USED
5482 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5483 base_flags |= (TXD_FLAG_VLAN |
5484 (vlan_tx_tag_get(skb) << 16));
5487 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
5488 !mss && skb->len > ETH_DATA_LEN)
5489 base_flags |= TXD_FLAG_JMB_PKT;
5491 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5496 sp = skb_shinfo(skb);
5498 mapping = sp->dma_head;
5500 tnapi->tx_buffers[entry].skb = skb;
5502 would_hit_hwbug = 0;
5504 len = skb_headlen(skb);
5506 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5507 would_hit_hwbug = 1;
5509 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5510 tg3_4g_overflow_test(mapping, len))
5511 would_hit_hwbug = 1;
5513 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5514 tg3_40bit_overflow_test(tp, mapping, len))
5515 would_hit_hwbug = 1;
5517 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5518 would_hit_hwbug = 1;
5520 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5521 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5523 entry = NEXT_TX(entry);
5525 /* Now loop through additional data fragments, and queue them. */
5526 if (skb_shinfo(skb)->nr_frags > 0) {
5527 unsigned int i, last;
5529 last = skb_shinfo(skb)->nr_frags - 1;
5530 for (i = 0; i <= last; i++) {
5531 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5534 mapping = sp->dma_maps[i];
5536 tnapi->tx_buffers[entry].skb = NULL;
5538 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5540 would_hit_hwbug = 1;
5542 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5543 tg3_4g_overflow_test(mapping, len))
5544 would_hit_hwbug = 1;
5546 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5547 tg3_40bit_overflow_test(tp, mapping, len))
5548 would_hit_hwbug = 1;
5550 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5551 tg3_set_txd(tnapi, entry, mapping, len,
5552 base_flags, (i == last)|(mss << 1));
5554 tg3_set_txd(tnapi, entry, mapping, len,
5555 base_flags, (i == last));
5557 entry = NEXT_TX(entry);
5561 if (would_hit_hwbug) {
5562 u32 last_plus_one = entry;
5565 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5566 start &= (TG3_TX_RING_SIZE - 1);
5568 /* If the workaround fails due to memory/mapping
5569 * failure, silently drop this packet.
5571 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5572 &start, base_flags, mss))
5578 /* Packets are ready, update Tx producer idx local and on card. */
5579 tw32_tx_mbox(tnapi->prodmbox, entry);
5581 tnapi->tx_prod = entry;
5582 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5583 netif_tx_stop_queue(txq);
5584 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5585 netif_tx_wake_queue(txq);
5591 return NETDEV_TX_OK;
5594 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5599 if (new_mtu > ETH_DATA_LEN) {
5600 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5601 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5602 ethtool_op_set_tso(dev, 0);
5605 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5607 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5608 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5609 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5613 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5615 struct tg3 *tp = netdev_priv(dev);
5618 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5621 if (!netif_running(dev)) {
5622 /* We'll just catch it later when the
5625 tg3_set_mtu(dev, tp, new_mtu);
5633 tg3_full_lock(tp, 1);
5635 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5637 tg3_set_mtu(dev, tp, new_mtu);
5639 err = tg3_restart_hw(tp, 0);
5642 tg3_netif_start(tp);
5644 tg3_full_unlock(tp);
5652 static void tg3_rx_prodring_free(struct tg3 *tp,
5653 struct tg3_rx_prodring_set *tpr)
5656 struct ring_info *rxp;
5658 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5659 rxp = &tpr->rx_std_buffers[i];
5661 if (rxp->skb == NULL)
5664 pci_unmap_single(tp->pdev,
5665 pci_unmap_addr(rxp, mapping),
5667 PCI_DMA_FROMDEVICE);
5668 dev_kfree_skb_any(rxp->skb);
5672 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5673 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5674 rxp = &tpr->rx_jmb_buffers[i];
5676 if (rxp->skb == NULL)
5679 pci_unmap_single(tp->pdev,
5680 pci_unmap_addr(rxp, mapping),
5682 PCI_DMA_FROMDEVICE);
5683 dev_kfree_skb_any(rxp->skb);
5689 /* Initialize tx/rx rings for packet processing.
5691 * The chip has been shut down and the driver detached from
5692 * the networking, so no interrupts or new tx packets will
5693 * end up in the driver. tp->{tx,}lock are held and thus
5696 static int tg3_rx_prodring_alloc(struct tg3 *tp,
5697 struct tg3_rx_prodring_set *tpr)
5699 u32 i, rx_pkt_dma_sz;
5700 struct tg3_napi *tnapi = &tp->napi[0];
5702 /* Zero out all descriptors. */
5703 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
5705 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
5706 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5707 tp->dev->mtu > ETH_DATA_LEN)
5708 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
5709 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
5711 /* Initialize invariants of the rings, we only set this
5712 * stuff once. This works because the card does not
5713 * write into the rx buffer posting rings.
5715 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5716 struct tg3_rx_buffer_desc *rxd;
5718 rxd = &tpr->rx_std[i];
5719 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
5720 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5721 rxd->opaque = (RXD_OPAQUE_RING_STD |
5722 (i << RXD_OPAQUE_INDEX_SHIFT));
5725 /* Now allocate fresh SKBs for each rx ring. */
5726 for (i = 0; i < tp->rx_pending; i++) {
5727 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5728 printk(KERN_WARNING PFX
5729 "%s: Using a smaller RX standard ring, "
5730 "only %d out of %d buffers were allocated "
5732 tp->dev->name, i, tp->rx_pending);
5740 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
5743 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
5745 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5746 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5747 struct tg3_rx_buffer_desc *rxd;
5749 rxd = &tpr->rx_jmb[i].std;
5750 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
5751 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5753 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5754 (i << RXD_OPAQUE_INDEX_SHIFT));
5757 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5758 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO,
5760 printk(KERN_WARNING PFX
5761 "%s: Using a smaller RX jumbo ring, "
5762 "only %d out of %d buffers were "
5763 "allocated successfully.\n",
5764 tp->dev->name, i, tp->rx_jumbo_pending);
5767 tp->rx_jumbo_pending = i;
5777 tg3_rx_prodring_free(tp, tpr);
5781 static void tg3_rx_prodring_fini(struct tg3 *tp,
5782 struct tg3_rx_prodring_set *tpr)
5784 kfree(tpr->rx_std_buffers);
5785 tpr->rx_std_buffers = NULL;
5786 kfree(tpr->rx_jmb_buffers);
5787 tpr->rx_jmb_buffers = NULL;
5789 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5790 tpr->rx_std, tpr->rx_std_mapping);
5794 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5795 tpr->rx_jmb, tpr->rx_jmb_mapping);
5800 static int tg3_rx_prodring_init(struct tg3 *tp,
5801 struct tg3_rx_prodring_set *tpr)
5803 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
5804 TG3_RX_RING_SIZE, GFP_KERNEL);
5805 if (!tpr->rx_std_buffers)
5808 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5809 &tpr->rx_std_mapping);
5813 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5814 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
5815 TG3_RX_JUMBO_RING_SIZE,
5817 if (!tpr->rx_jmb_buffers)
5820 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
5821 TG3_RX_JUMBO_RING_BYTES,
5822 &tpr->rx_jmb_mapping);
5830 tg3_rx_prodring_fini(tp, tpr);
5834 /* Free up pending packets in all rx/tx rings.
5836 * The chip has been shut down and the driver detached from
5837 * the networking, so no interrupts or new tx packets will
5838 * end up in the driver. tp->{tx,}lock is not held and we are not
5839 * in an interrupt context and thus may sleep.
5841 static void tg3_free_rings(struct tg3 *tp)
5845 for (j = 0; j < tp->irq_cnt; j++) {
5846 struct tg3_napi *tnapi = &tp->napi[j];
5848 if (!tnapi->tx_buffers)
5851 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5852 struct tx_ring_info *txp;
5853 struct sk_buff *skb;
5855 txp = &tnapi->tx_buffers[i];
5863 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5867 i += skb_shinfo(skb)->nr_frags + 1;
5869 dev_kfree_skb_any(skb);
5873 tg3_rx_prodring_free(tp, &tp->prodring[0]);
5876 /* Initialize tx/rx rings for packet processing.
5878 * The chip has been shut down and the driver detached from
5879 * the networking, so no interrupts or new tx packets will
5880 * end up in the driver. tp->{tx,}lock are held and thus
5883 static int tg3_init_rings(struct tg3 *tp)
5887 /* Free up all the SKBs. */
5890 for (i = 0; i < tp->irq_cnt; i++) {
5891 struct tg3_napi *tnapi = &tp->napi[i];
5893 tnapi->last_tag = 0;
5894 tnapi->last_irq_tag = 0;
5895 tnapi->hw_status->status = 0;
5896 tnapi->hw_status->status_tag = 0;
5897 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5902 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
5904 tnapi->rx_rcb_ptr = 0;
5906 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5909 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
5913 * Must not be invoked with interrupt sources disabled and
5914 * the hardware shutdown down.
5916 static void tg3_free_consistent(struct tg3 *tp)
5920 for (i = 0; i < tp->irq_cnt; i++) {
5921 struct tg3_napi *tnapi = &tp->napi[i];
5923 if (tnapi->tx_ring) {
5924 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5925 tnapi->tx_ring, tnapi->tx_desc_mapping);
5926 tnapi->tx_ring = NULL;
5929 kfree(tnapi->tx_buffers);
5930 tnapi->tx_buffers = NULL;
5932 if (tnapi->rx_rcb) {
5933 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5935 tnapi->rx_rcb_mapping);
5936 tnapi->rx_rcb = NULL;
5939 if (tnapi->hw_status) {
5940 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5942 tnapi->status_mapping);
5943 tnapi->hw_status = NULL;
5948 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5949 tp->hw_stats, tp->stats_mapping);
5950 tp->hw_stats = NULL;
5953 tg3_rx_prodring_fini(tp, &tp->prodring[0]);
5957 * Must not be invoked with interrupt sources disabled and
5958 * the hardware shutdown down. Can sleep.
5960 static int tg3_alloc_consistent(struct tg3 *tp)
5964 if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
5967 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5968 sizeof(struct tg3_hw_stats),
5969 &tp->stats_mapping);
5973 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5975 for (i = 0; i < tp->irq_cnt; i++) {
5976 struct tg3_napi *tnapi = &tp->napi[i];
5977 struct tg3_hw_status *sblk;
5979 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
5981 &tnapi->status_mapping);
5982 if (!tnapi->hw_status)
5985 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5986 sblk = tnapi->hw_status;
5989 * When RSS is enabled, the status block format changes
5990 * slightly. The "rx_jumbo_consumer", "reserved",
5991 * and "rx_mini_consumer" members get mapped to the
5992 * other three rx return ring producer indexes.
5996 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
5999 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6002 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6005 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6010 * If multivector RSS is enabled, vector 0 does not handle
6011 * rx or tx interrupts. Don't allocate any resources for it.
6013 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6016 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
6017 TG3_RX_RCB_RING_BYTES(tp),
6018 &tnapi->rx_rcb_mapping);
6022 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6024 tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
6025 TG3_TX_RING_SIZE, GFP_KERNEL);
6026 if (!tnapi->tx_buffers)
6029 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6031 &tnapi->tx_desc_mapping);
6032 if (!tnapi->tx_ring)
6039 tg3_free_consistent(tp);
6043 #define MAX_WAIT_CNT 1000
6045 /* To stop a block, clear the enable bit and poll till it
6046 * clears. tp->lock is held.
6048 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6053 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6060 /* We can't enable/disable these bits of the
6061 * 5705/5750, just say success.
6074 for (i = 0; i < MAX_WAIT_CNT; i++) {
6077 if ((val & enable_bit) == 0)
6081 if (i == MAX_WAIT_CNT && !silent) {
6082 printk(KERN_ERR PFX "tg3_stop_block timed out, "
6083 "ofs=%lx enable_bit=%x\n",
6091 /* tp->lock is held. */
6092 static int tg3_abort_hw(struct tg3 *tp, int silent)
6096 tg3_disable_ints(tp);
6098 tp->rx_mode &= ~RX_MODE_ENABLE;
6099 tw32_f(MAC_RX_MODE, tp->rx_mode);
6102 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6103 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6104 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6105 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6106 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6107 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6109 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6110 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6111 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6112 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6113 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6114 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6115 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6117 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6118 tw32_f(MAC_MODE, tp->mac_mode);
6121 tp->tx_mode &= ~TX_MODE_ENABLE;
6122 tw32_f(MAC_TX_MODE, tp->tx_mode);
6124 for (i = 0; i < MAX_WAIT_CNT; i++) {
6126 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6129 if (i >= MAX_WAIT_CNT) {
6130 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
6131 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
6132 tp->dev->name, tr32(MAC_TX_MODE));
6136 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6137 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6138 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6140 tw32(FTQ_RESET, 0xffffffff);
6141 tw32(FTQ_RESET, 0x00000000);
6143 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6144 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6146 for (i = 0; i < tp->irq_cnt; i++) {
6147 struct tg3_napi *tnapi = &tp->napi[i];
6148 if (tnapi->hw_status)
6149 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6152 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6157 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6162 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6163 if (apedata != APE_SEG_SIG_MAGIC)
6166 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6167 if (!(apedata & APE_FW_STATUS_READY))
6170 /* Wait for up to 1 millisecond for APE to service previous event. */
6171 for (i = 0; i < 10; i++) {
6172 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6175 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6177 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6178 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6179 event | APE_EVENT_STATUS_EVENT_PENDING);
6181 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6183 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6189 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6190 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6193 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6198 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6202 case RESET_KIND_INIT:
6203 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6204 APE_HOST_SEG_SIG_MAGIC);
6205 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6206 APE_HOST_SEG_LEN_MAGIC);
6207 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6208 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6209 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6210 APE_HOST_DRIVER_ID_MAGIC);
6211 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6212 APE_HOST_BEHAV_NO_PHYLOCK);
6214 event = APE_EVENT_STATUS_STATE_START;
6216 case RESET_KIND_SHUTDOWN:
6217 /* With the interface we are currently using,
6218 * APE does not track driver state. Wiping
6219 * out the HOST SEGMENT SIGNATURE forces
6220 * the APE to assume OS absent status.
6222 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6224 event = APE_EVENT_STATUS_STATE_UNLOAD;
6226 case RESET_KIND_SUSPEND:
6227 event = APE_EVENT_STATUS_STATE_SUSPEND;
6233 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6235 tg3_ape_send_event(tp, event);
6238 /* tp->lock is held. */
6239 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6241 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6242 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6244 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6246 case RESET_KIND_INIT:
6247 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6251 case RESET_KIND_SHUTDOWN:
6252 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6256 case RESET_KIND_SUSPEND:
6257 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6266 if (kind == RESET_KIND_INIT ||
6267 kind == RESET_KIND_SUSPEND)
6268 tg3_ape_driver_state_change(tp, kind);
6271 /* tp->lock is held. */
6272 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6274 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6276 case RESET_KIND_INIT:
6277 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6278 DRV_STATE_START_DONE);
6281 case RESET_KIND_SHUTDOWN:
6282 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6283 DRV_STATE_UNLOAD_DONE);
6291 if (kind == RESET_KIND_SHUTDOWN)
6292 tg3_ape_driver_state_change(tp, kind);
6295 /* tp->lock is held. */
6296 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6298 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6300 case RESET_KIND_INIT:
6301 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6305 case RESET_KIND_SHUTDOWN:
6306 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6310 case RESET_KIND_SUSPEND:
6311 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6321 static int tg3_poll_fw(struct tg3 *tp)
6326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6327 /* Wait up to 20ms for init done. */
6328 for (i = 0; i < 200; i++) {
6329 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6336 /* Wait for firmware initialization to complete. */
6337 for (i = 0; i < 100000; i++) {
6338 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6339 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6344 /* Chip might not be fitted with firmware. Some Sun onboard
6345 * parts are configured like that. So don't signal the timeout
6346 * of the above loop as an error, but do report the lack of
6347 * running firmware once.
6350 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6351 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6353 printk(KERN_INFO PFX "%s: No firmware running.\n",
6360 /* Save PCI command register before chip reset */
6361 static void tg3_save_pci_state(struct tg3 *tp)
6363 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6366 /* Restore PCI state after chip reset */
6367 static void tg3_restore_pci_state(struct tg3 *tp)
6371 /* Re-enable indirect register accesses. */
6372 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6373 tp->misc_host_ctrl);
6375 /* Set MAX PCI retry to zero. */
6376 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6377 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6378 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6379 val |= PCISTATE_RETRY_SAME_DMA;
6380 /* Allow reads and writes to the APE register and memory space. */
6381 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6382 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6383 PCISTATE_ALLOW_APE_SHMEM_WR;
6384 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6386 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6388 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6389 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6390 pcie_set_readrq(tp->pdev, 4096);
6392 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6393 tp->pci_cacheline_sz);
6394 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6399 /* Make sure PCI-X relaxed ordering bit is clear. */
6400 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6403 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6405 pcix_cmd &= ~PCI_X_CMD_ERO;
6406 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6410 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6412 /* Chip reset on 5780 will reset MSI enable bit,
6413 * so need to restore it.
6415 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6418 pci_read_config_word(tp->pdev,
6419 tp->msi_cap + PCI_MSI_FLAGS,
6421 pci_write_config_word(tp->pdev,
6422 tp->msi_cap + PCI_MSI_FLAGS,
6423 ctrl | PCI_MSI_FLAGS_ENABLE);
6424 val = tr32(MSGINT_MODE);
6425 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6430 static void tg3_stop_fw(struct tg3 *);
6432 /* tp->lock is held. */
6433 static int tg3_chip_reset(struct tg3 *tp)
6436 void (*write_op)(struct tg3 *, u32, u32);
6441 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6443 /* No matching tg3_nvram_unlock() after this because
6444 * chip reset below will undo the nvram lock.
6446 tp->nvram_lock_cnt = 0;
6448 /* GRC_MISC_CFG core clock reset will clear the memory
6449 * enable bit in PCI register 4 and the MSI enable bit
6450 * on some chips, so we save relevant registers here.
6452 tg3_save_pci_state(tp);
6454 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6455 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6456 tw32(GRC_FASTBOOT_PC, 0);
6459 * We must avoid the readl() that normally takes place.
6460 * It locks machines, causes machine checks, and other
6461 * fun things. So, temporarily disable the 5701
6462 * hardware workaround, while we do the reset.
6464 write_op = tp->write32;
6465 if (write_op == tg3_write_flush_reg32)
6466 tp->write32 = tg3_write32;
6468 /* Prevent the irq handler from reading or writing PCI registers
6469 * during chip reset when the memory enable bit in the PCI command
6470 * register may be cleared. The chip does not generate interrupt
6471 * at this time, but the irq handler may still be called due to irq
6472 * sharing or irqpoll.
6474 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6475 for (i = 0; i < tp->irq_cnt; i++) {
6476 struct tg3_napi *tnapi = &tp->napi[i];
6477 if (tnapi->hw_status) {
6478 tnapi->hw_status->status = 0;
6479 tnapi->hw_status->status_tag = 0;
6481 tnapi->last_tag = 0;
6482 tnapi->last_irq_tag = 0;
6486 for (i = 0; i < tp->irq_cnt; i++)
6487 synchronize_irq(tp->napi[i].irq_vec);
6489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6490 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6491 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6495 val = GRC_MISC_CFG_CORECLK_RESET;
6497 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6498 if (tr32(0x7e2c) == 0x60) {
6501 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6502 tw32(GRC_MISC_CFG, (1 << 29));
6507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6508 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6509 tw32(GRC_VCPU_EXT_CTRL,
6510 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6513 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6514 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6515 tw32(GRC_MISC_CFG, val);
6517 /* restore 5701 hardware bug workaround write method */
6518 tp->write32 = write_op;
6520 /* Unfortunately, we have to delay before the PCI read back.
6521 * Some 575X chips even will not respond to a PCI cfg access
6522 * when the reset command is given to the chip.
6524 * How do these hardware designers expect things to work
6525 * properly if the PCI write is posted for a long period
6526 * of time? It is always necessary to have some method by
6527 * which a register read back can occur to push the write
6528 * out which does the reset.
6530 * For most tg3 variants the trick below was working.
6535 /* Flush PCI posted writes. The normal MMIO registers
6536 * are inaccessible at this time so this is the only
6537 * way to make this reliably (actually, this is no longer
6538 * the case, see above). I tried to use indirect
6539 * register read/write but this upset some 5701 variants.
6541 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6545 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6548 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6552 /* Wait for link training to complete. */
6553 for (i = 0; i < 5000; i++)
6556 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6557 pci_write_config_dword(tp->pdev, 0xc4,
6558 cfg_val | (1 << 15));
6561 /* Clear the "no snoop" and "relaxed ordering" bits. */
6562 pci_read_config_word(tp->pdev,
6563 tp->pcie_cap + PCI_EXP_DEVCTL,
6565 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6566 PCI_EXP_DEVCTL_NOSNOOP_EN);
6568 * Older PCIe devices only support the 128 byte
6569 * MPS setting. Enforce the restriction.
6571 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6572 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6573 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6574 pci_write_config_word(tp->pdev,
6575 tp->pcie_cap + PCI_EXP_DEVCTL,
6578 pcie_set_readrq(tp->pdev, 4096);
6580 /* Clear error status */
6581 pci_write_config_word(tp->pdev,
6582 tp->pcie_cap + PCI_EXP_DEVSTA,
6583 PCI_EXP_DEVSTA_CED |
6584 PCI_EXP_DEVSTA_NFED |
6585 PCI_EXP_DEVSTA_FED |
6586 PCI_EXP_DEVSTA_URD);
6589 tg3_restore_pci_state(tp);
6591 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6594 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6595 val = tr32(MEMARB_MODE);
6596 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6598 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6600 tw32(0x5000, 0x400);
6603 tw32(GRC_MODE, tp->grc_mode);
6605 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6608 tw32(0xc4, val | (1 << 15));
6611 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6613 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6614 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6615 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6616 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6619 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6620 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6621 tw32_f(MAC_MODE, tp->mac_mode);
6622 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6623 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6624 tw32_f(MAC_MODE, tp->mac_mode);
6625 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6626 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6627 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6628 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6629 tw32_f(MAC_MODE, tp->mac_mode);
6631 tw32_f(MAC_MODE, 0);
6634 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6636 err = tg3_poll_fw(tp);
6642 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6645 phy_addr = tp->phy_addr;
6646 tp->phy_addr = TG3_PHY_PCIE_ADDR;
6648 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6649 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
6650 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
6651 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
6652 TG3_PCIEPHY_TX0CTRL1_NB_EN;
6653 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
6656 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6657 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
6658 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
6659 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
6660 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
6663 tp->phy_addr = phy_addr;
6666 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6667 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
6668 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
6669 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
6672 tw32(0x7c00, val | (1 << 25));
6675 /* Reprobe ASF enable state. */
6676 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6677 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6678 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6679 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6682 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6683 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6684 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6685 tp->last_event_jiffies = jiffies;
6686 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6687 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6694 /* tp->lock is held. */
6695 static void tg3_stop_fw(struct tg3 *tp)
6697 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6698 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6699 /* Wait for RX cpu to ACK the previous event. */
6700 tg3_wait_for_event_ack(tp);
6702 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6704 tg3_generate_fw_event(tp);
6706 /* Wait for RX cpu to ACK this event. */
6707 tg3_wait_for_event_ack(tp);
6711 /* tp->lock is held. */
6712 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6718 tg3_write_sig_pre_reset(tp, kind);
6720 tg3_abort_hw(tp, silent);
6721 err = tg3_chip_reset(tp);
6723 __tg3_set_mac_addr(tp, 0);
6725 tg3_write_sig_legacy(tp, kind);
6726 tg3_write_sig_post_reset(tp, kind);
6734 #define RX_CPU_SCRATCH_BASE 0x30000
6735 #define RX_CPU_SCRATCH_SIZE 0x04000
6736 #define TX_CPU_SCRATCH_BASE 0x34000
6737 #define TX_CPU_SCRATCH_SIZE 0x04000
6739 /* tp->lock is held. */
6740 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6744 BUG_ON(offset == TX_CPU_BASE &&
6745 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6748 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6750 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6753 if (offset == RX_CPU_BASE) {
6754 for (i = 0; i < 10000; i++) {
6755 tw32(offset + CPU_STATE, 0xffffffff);
6756 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6757 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6761 tw32(offset + CPU_STATE, 0xffffffff);
6762 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6765 for (i = 0; i < 10000; i++) {
6766 tw32(offset + CPU_STATE, 0xffffffff);
6767 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6768 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6774 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6777 (offset == RX_CPU_BASE ? "RX" : "TX"));
6781 /* Clear firmware's nvram arbitration. */
6782 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6783 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6788 unsigned int fw_base;
6789 unsigned int fw_len;
6790 const __be32 *fw_data;
6793 /* tp->lock is held. */
6794 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6795 int cpu_scratch_size, struct fw_info *info)
6797 int err, lock_err, i;
6798 void (*write_op)(struct tg3 *, u32, u32);
6800 if (cpu_base == TX_CPU_BASE &&
6801 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6802 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6803 "TX cpu firmware on %s which is 5705.\n",
6808 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6809 write_op = tg3_write_mem;
6811 write_op = tg3_write_indirect_reg32;
6813 /* It is possible that bootcode is still loading at this point.
6814 * Get the nvram lock first before halting the cpu.
6816 lock_err = tg3_nvram_lock(tp);
6817 err = tg3_halt_cpu(tp, cpu_base);
6819 tg3_nvram_unlock(tp);
6823 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6824 write_op(tp, cpu_scratch_base + i, 0);
6825 tw32(cpu_base + CPU_STATE, 0xffffffff);
6826 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6827 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
6828 write_op(tp, (cpu_scratch_base +
6829 (info->fw_base & 0xffff) +
6831 be32_to_cpu(info->fw_data[i]));
6839 /* tp->lock is held. */
6840 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6842 struct fw_info info;
6843 const __be32 *fw_data;
6846 fw_data = (void *)tp->fw->data;
6848 /* Firmware blob starts with version numbers, followed by
6849 start address and length. We are setting complete length.
6850 length = end_address_of_bss - start_address_of_text.
6851 Remainder is the blob to be loaded contiguously
6852 from start address. */
6854 info.fw_base = be32_to_cpu(fw_data[1]);
6855 info.fw_len = tp->fw->size - 12;
6856 info.fw_data = &fw_data[3];
6858 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6859 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6864 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6865 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6870 /* Now startup only the RX cpu. */
6871 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6872 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6874 for (i = 0; i < 5; i++) {
6875 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
6877 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6878 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6879 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6883 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6884 "to set RX CPU PC, is %08x should be %08x\n",
6885 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6889 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6890 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6895 /* 5705 needs a special version of the TSO firmware. */
6897 /* tp->lock is held. */
6898 static int tg3_load_tso_firmware(struct tg3 *tp)
6900 struct fw_info info;
6901 const __be32 *fw_data;
6902 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6905 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6908 fw_data = (void *)tp->fw->data;
6910 /* Firmware blob starts with version numbers, followed by
6911 start address and length. We are setting complete length.
6912 length = end_address_of_bss - start_address_of_text.
6913 Remainder is the blob to be loaded contiguously
6914 from start address. */
6916 info.fw_base = be32_to_cpu(fw_data[1]);
6917 cpu_scratch_size = tp->fw_len;
6918 info.fw_len = tp->fw->size - 12;
6919 info.fw_data = &fw_data[3];
6921 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6922 cpu_base = RX_CPU_BASE;
6923 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6925 cpu_base = TX_CPU_BASE;
6926 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6927 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6930 err = tg3_load_firmware_cpu(tp, cpu_base,
6931 cpu_scratch_base, cpu_scratch_size,
6936 /* Now startup the cpu. */
6937 tw32(cpu_base + CPU_STATE, 0xffffffff);
6938 tw32_f(cpu_base + CPU_PC, info.fw_base);
6940 for (i = 0; i < 5; i++) {
6941 if (tr32(cpu_base + CPU_PC) == info.fw_base)
6943 tw32(cpu_base + CPU_STATE, 0xffffffff);
6944 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6945 tw32_f(cpu_base + CPU_PC, info.fw_base);
6949 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6950 "to set CPU PC, is %08x should be %08x\n",
6951 tp->dev->name, tr32(cpu_base + CPU_PC),
6955 tw32(cpu_base + CPU_STATE, 0xffffffff);
6956 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6961 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6963 struct tg3 *tp = netdev_priv(dev);
6964 struct sockaddr *addr = p;
6965 int err = 0, skip_mac_1 = 0;
6967 if (!is_valid_ether_addr(addr->sa_data))
6970 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6972 if (!netif_running(dev))
6975 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6976 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6978 addr0_high = tr32(MAC_ADDR_0_HIGH);
6979 addr0_low = tr32(MAC_ADDR_0_LOW);
6980 addr1_high = tr32(MAC_ADDR_1_HIGH);
6981 addr1_low = tr32(MAC_ADDR_1_LOW);
6983 /* Skip MAC addr 1 if ASF is using it. */
6984 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6985 !(addr1_high == 0 && addr1_low == 0))
6988 spin_lock_bh(&tp->lock);
6989 __tg3_set_mac_addr(tp, skip_mac_1);
6990 spin_unlock_bh(&tp->lock);
6995 /* tp->lock is held. */
6996 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6997 dma_addr_t mapping, u32 maxlen_flags,
7001 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7002 ((u64) mapping >> 32));
7004 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7005 ((u64) mapping & 0xffffffff));
7007 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7010 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7012 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7016 static void __tg3_set_rx_mode(struct net_device *);
7017 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7021 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
7022 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7023 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7024 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7026 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7027 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7028 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7030 tw32(HOSTCC_TXCOL_TICKS, 0);
7031 tw32(HOSTCC_TXMAX_FRAMES, 0);
7032 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7034 tw32(HOSTCC_RXCOL_TICKS, 0);
7035 tw32(HOSTCC_RXMAX_FRAMES, 0);
7036 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7039 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7040 u32 val = ec->stats_block_coalesce_usecs;
7042 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7043 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7045 if (!netif_carrier_ok(tp->dev))
7048 tw32(HOSTCC_STAT_COAL_TICKS, val);
7051 for (i = 0; i < tp->irq_cnt - 1; i++) {
7054 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7055 tw32(reg, ec->rx_coalesce_usecs);
7056 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7057 tw32(reg, ec->tx_coalesce_usecs);
7058 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7059 tw32(reg, ec->rx_max_coalesced_frames);
7060 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7061 tw32(reg, ec->tx_max_coalesced_frames);
7062 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7063 tw32(reg, ec->rx_max_coalesced_frames_irq);
7064 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7065 tw32(reg, ec->tx_max_coalesced_frames_irq);
7068 for (; i < tp->irq_max - 1; i++) {
7069 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7070 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7071 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7072 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7073 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7074 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7078 /* tp->lock is held. */
7079 static void tg3_rings_reset(struct tg3 *tp)
7082 u32 stblk, txrcb, rxrcb, limit;
7083 struct tg3_napi *tnapi = &tp->napi[0];
7085 /* Disable all transmit rings but the first. */
7086 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7087 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7089 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7091 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7092 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7093 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7094 BDINFO_FLAGS_DISABLED);
7097 /* Disable all receive return rings but the first. */
7098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7099 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7100 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7101 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7102 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7103 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7105 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7107 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7108 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7109 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7110 BDINFO_FLAGS_DISABLED);
7112 /* Disable interrupts */
7113 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7115 /* Zero mailbox registers. */
7116 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7117 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7118 tp->napi[i].tx_prod = 0;
7119 tp->napi[i].tx_cons = 0;
7120 tw32_mailbox(tp->napi[i].prodmbox, 0);
7121 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7122 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7125 tp->napi[0].tx_prod = 0;
7126 tp->napi[0].tx_cons = 0;
7127 tw32_mailbox(tp->napi[0].prodmbox, 0);
7128 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7131 /* Make sure the NIC-based send BD rings are disabled. */
7132 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7133 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7134 for (i = 0; i < 16; i++)
7135 tw32_tx_mbox(mbox + i * 8, 0);
7138 txrcb = NIC_SRAM_SEND_RCB;
7139 rxrcb = NIC_SRAM_RCV_RET_RCB;
7141 /* Clear status block in ram. */
7142 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7144 /* Set status block DMA address */
7145 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7146 ((u64) tnapi->status_mapping >> 32));
7147 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7148 ((u64) tnapi->status_mapping & 0xffffffff));
7150 if (tnapi->tx_ring) {
7151 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7152 (TG3_TX_RING_SIZE <<
7153 BDINFO_FLAGS_MAXLEN_SHIFT),
7154 NIC_SRAM_TX_BUFFER_DESC);
7155 txrcb += TG3_BDINFO_SIZE;
7158 if (tnapi->rx_rcb) {
7159 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7160 (TG3_RX_RCB_RING_SIZE(tp) <<
7161 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7162 rxrcb += TG3_BDINFO_SIZE;
7165 stblk = HOSTCC_STATBLCK_RING1;
7167 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7168 u64 mapping = (u64)tnapi->status_mapping;
7169 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7170 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7172 /* Clear status block in ram. */
7173 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7175 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7176 (TG3_TX_RING_SIZE <<
7177 BDINFO_FLAGS_MAXLEN_SHIFT),
7178 NIC_SRAM_TX_BUFFER_DESC);
7180 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7181 (TG3_RX_RCB_RING_SIZE(tp) <<
7182 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7185 txrcb += TG3_BDINFO_SIZE;
7186 rxrcb += TG3_BDINFO_SIZE;
7190 /* tp->lock is held. */
7191 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7193 u32 val, rdmac_mode;
7195 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
7197 tg3_disable_ints(tp);
7201 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7203 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7204 tg3_abort_hw(tp, 1);
7208 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7211 err = tg3_chip_reset(tp);
7215 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7217 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7218 val = tr32(TG3_CPMU_CTRL);
7219 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7220 tw32(TG3_CPMU_CTRL, val);
7222 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7223 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7224 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7225 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7227 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7228 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7229 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7230 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7232 val = tr32(TG3_CPMU_HST_ACC);
7233 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7234 val |= CPMU_HST_ACC_MACCLK_6_25;
7235 tw32(TG3_CPMU_HST_ACC, val);
7238 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7239 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7240 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7241 PCIE_PWR_MGMT_L1_THRESH_4MS;
7242 tw32(PCIE_PWR_MGMT_THRESH, val);
7244 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7245 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7247 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7249 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7250 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7253 /* This works around an issue with Athlon chipsets on
7254 * B3 tigon3 silicon. This bit has no effect on any
7255 * other revision. But do not set this on PCI Express
7256 * chips and don't even touch the clocks if the CPMU is present.
7258 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7259 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7260 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7261 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7264 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7265 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7266 val = tr32(TG3PCI_PCISTATE);
7267 val |= PCISTATE_RETRY_SAME_DMA;
7268 tw32(TG3PCI_PCISTATE, val);
7271 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7272 /* Allow reads and writes to the
7273 * APE register and memory space.
7275 val = tr32(TG3PCI_PCISTATE);
7276 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7277 PCISTATE_ALLOW_APE_SHMEM_WR;
7278 tw32(TG3PCI_PCISTATE, val);
7281 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7282 /* Enable some hw fixes. */
7283 val = tr32(TG3PCI_MSI_DATA);
7284 val |= (1 << 26) | (1 << 28) | (1 << 29);
7285 tw32(TG3PCI_MSI_DATA, val);
7288 /* Descriptor ring init may make accesses to the
7289 * NIC SRAM area to setup the TX descriptors, so we
7290 * can only do this after the hardware has been
7291 * successfully reset.
7293 err = tg3_init_rings(tp);
7297 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7298 val = tr32(TG3PCI_DMA_RW_CTRL) &
7299 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7300 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7301 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7302 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7303 /* This value is determined during the probe time DMA
7304 * engine test, tg3_test_dma.
7306 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7309 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7310 GRC_MODE_4X_NIC_SEND_RINGS |
7311 GRC_MODE_NO_TX_PHDR_CSUM |
7312 GRC_MODE_NO_RX_PHDR_CSUM);
7313 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7315 /* Pseudo-header checksum is done by hardware logic and not
7316 * the offload processers, so make the chip do the pseudo-
7317 * header checksums on receive. For transmit it is more
7318 * convenient to do the pseudo-header checksum in software
7319 * as Linux does that on transmit for us in all cases.
7321 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7325 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7327 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7328 val = tr32(GRC_MISC_CFG);
7330 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7331 tw32(GRC_MISC_CFG, val);
7333 /* Initialize MBUF/DESC pool. */
7334 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7336 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7337 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7338 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7339 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7341 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7342 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7343 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7345 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7348 fw_len = tp->fw_len;
7349 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7350 tw32(BUFMGR_MB_POOL_ADDR,
7351 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7352 tw32(BUFMGR_MB_POOL_SIZE,
7353 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7356 if (tp->dev->mtu <= ETH_DATA_LEN) {
7357 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7358 tp->bufmgr_config.mbuf_read_dma_low_water);
7359 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7360 tp->bufmgr_config.mbuf_mac_rx_low_water);
7361 tw32(BUFMGR_MB_HIGH_WATER,
7362 tp->bufmgr_config.mbuf_high_water);
7364 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7365 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7366 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7367 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7368 tw32(BUFMGR_MB_HIGH_WATER,
7369 tp->bufmgr_config.mbuf_high_water_jumbo);
7371 tw32(BUFMGR_DMA_LOW_WATER,
7372 tp->bufmgr_config.dma_low_water);
7373 tw32(BUFMGR_DMA_HIGH_WATER,
7374 tp->bufmgr_config.dma_high_water);
7376 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7377 for (i = 0; i < 2000; i++) {
7378 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7383 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7388 /* Setup replenish threshold. */
7389 val = tp->rx_pending / 8;
7392 else if (val > tp->rx_std_max_post)
7393 val = tp->rx_std_max_post;
7394 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7395 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7396 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7398 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7399 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7402 tw32(RCVBDI_STD_THRESH, val);
7404 /* Initialize TG3_BDINFO's at:
7405 * RCVDBDI_STD_BD: standard eth size rx ring
7406 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7407 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7410 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7411 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7412 * ring attribute flags
7413 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7415 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7416 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7418 * The size of each ring is fixed in the firmware, but the location is
7421 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7422 ((u64) tpr->rx_std_mapping >> 32));
7423 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7424 ((u64) tpr->rx_std_mapping & 0xffffffff));
7425 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7426 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7427 NIC_SRAM_RX_BUFFER_DESC);
7429 /* Disable the mini ring */
7430 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7431 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7432 BDINFO_FLAGS_DISABLED);
7434 /* Program the jumbo buffer descriptor ring control
7435 * blocks on those devices that have them.
7437 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7438 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7439 /* Setup replenish threshold. */
7440 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7442 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7443 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7444 ((u64) tpr->rx_jmb_mapping >> 32));
7445 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7446 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7447 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7448 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7449 BDINFO_FLAGS_USE_EXT_RECV);
7450 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7451 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7452 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7454 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7455 BDINFO_FLAGS_DISABLED);
7458 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7459 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7460 (RX_STD_MAX_SIZE << 2);
7462 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7464 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7466 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7468 tpr->rx_std_ptr = tp->rx_pending;
7469 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7472 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7473 tp->rx_jumbo_pending : 0;
7474 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7477 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7478 tw32(STD_REPLENISH_LWM, 32);
7479 tw32(JMB_REPLENISH_LWM, 16);
7482 tg3_rings_reset(tp);
7484 /* Initialize MAC address and backoff seed. */
7485 __tg3_set_mac_addr(tp, 0);
7487 /* MTU + ethernet header + FCS + optional VLAN tag */
7488 tw32(MAC_RX_MTU_SIZE,
7489 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7491 /* The slot time is changed by tg3_setup_phy if we
7492 * run at gigabit with half duplex.
7494 tw32(MAC_TX_LENGTHS,
7495 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7496 (6 << TX_LENGTHS_IPG_SHIFT) |
7497 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7499 /* Receive rules. */
7500 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7501 tw32(RCVLPC_CONFIG, 0x0181);
7503 /* Calculate RDMAC_MODE setting early, we need it to determine
7504 * the RCVLPC_STATE_ENABLE mask.
7506 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7507 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7508 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7509 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7510 RDMAC_MODE_LNGREAD_ENAB);
7512 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7513 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7515 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7516 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7517 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7519 /* If statement applies to 5705 and 5750 PCI devices only */
7520 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7521 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7522 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7523 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7525 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7526 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7527 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7528 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7532 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7533 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7535 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7536 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7538 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
7539 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7540 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7541 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7543 /* Receive/send statistics. */
7544 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7545 val = tr32(RCVLPC_STATS_ENABLE);
7546 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7547 tw32(RCVLPC_STATS_ENABLE, val);
7548 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7549 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7550 val = tr32(RCVLPC_STATS_ENABLE);
7551 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7552 tw32(RCVLPC_STATS_ENABLE, val);
7554 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7556 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7557 tw32(SNDDATAI_STATSENAB, 0xffffff);
7558 tw32(SNDDATAI_STATSCTRL,
7559 (SNDDATAI_SCTRL_ENABLE |
7560 SNDDATAI_SCTRL_FASTUPD));
7562 /* Setup host coalescing engine. */
7563 tw32(HOSTCC_MODE, 0);
7564 for (i = 0; i < 2000; i++) {
7565 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7570 __tg3_set_coalesce(tp, &tp->coal);
7572 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7573 /* Status/statistics block address. See tg3_timer,
7574 * the tg3_periodic_fetch_stats call there, and
7575 * tg3_get_stats to see how this works for 5705/5750 chips.
7577 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7578 ((u64) tp->stats_mapping >> 32));
7579 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7580 ((u64) tp->stats_mapping & 0xffffffff));
7581 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7583 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7585 /* Clear statistics and status block memory areas */
7586 for (i = NIC_SRAM_STATS_BLK;
7587 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7589 tg3_write_mem(tp, i, 0);
7594 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7596 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7597 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7598 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7599 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7601 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7602 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7603 /* reset to prevent losing 1st rx packet intermittently */
7604 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7608 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7609 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7612 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7613 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7614 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7615 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7616 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7617 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7618 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7621 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7622 * If TG3_FLG2_IS_NIC is zero, we should read the
7623 * register to preserve the GPIO settings for LOMs. The GPIOs,
7624 * whether used as inputs or outputs, are set by boot code after
7627 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7630 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7631 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7632 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7635 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7636 GRC_LCLCTRL_GPIO_OUTPUT3;
7638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7639 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7641 tp->grc_local_ctrl &= ~gpio_mask;
7642 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7644 /* GPIO1 must be driven high for eeprom write protect */
7645 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7646 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7647 GRC_LCLCTRL_GPIO_OUTPUT1);
7649 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7652 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
7653 val = tr32(MSGINT_MODE);
7654 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
7655 tw32(MSGINT_MODE, val);
7658 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7659 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7663 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7664 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7665 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7666 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7667 WDMAC_MODE_LNGREAD_ENAB);
7669 /* If statement applies to 5705 and 5750 PCI devices only */
7670 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7671 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7672 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7673 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
7674 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7675 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7677 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7678 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7679 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7680 val |= WDMAC_MODE_RX_ACCEL;
7684 /* Enable host coalescing bug fix */
7685 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7686 val |= WDMAC_MODE_STATUS_TAG_FIX;
7688 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7689 val |= WDMAC_MODE_BURST_ALL_DATA;
7691 tw32_f(WDMAC_MODE, val);
7694 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7697 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7699 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7700 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7701 pcix_cmd |= PCI_X_CMD_READ_2K;
7702 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7703 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7704 pcix_cmd |= PCI_X_CMD_READ_2K;
7706 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7710 tw32_f(RDMAC_MODE, rdmac_mode);
7713 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7714 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7715 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7719 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7721 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7723 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7724 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7725 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7726 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7727 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7728 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7729 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
7730 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
7731 val |= SNDBDI_MODE_MULTI_TXQ_EN;
7732 tw32(SNDBDI_MODE, val);
7733 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7735 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7736 err = tg3_load_5701_a0_firmware_fix(tp);
7741 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7742 err = tg3_load_tso_firmware(tp);
7747 tp->tx_mode = TX_MODE_ENABLE;
7748 tw32_f(MAC_TX_MODE, tp->tx_mode);
7751 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
7752 u32 reg = MAC_RSS_INDIR_TBL_0;
7753 u8 *ent = (u8 *)&val;
7755 /* Setup the indirection table */
7756 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
7757 int idx = i % sizeof(val);
7759 ent[idx] = i % (tp->irq_cnt - 1);
7760 if (idx == sizeof(val) - 1) {
7766 /* Setup the "secret" hash key. */
7767 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
7768 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
7769 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
7770 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
7771 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
7772 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
7773 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
7774 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
7775 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
7776 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
7779 tp->rx_mode = RX_MODE_ENABLE;
7780 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7781 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7783 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
7784 tp->rx_mode |= RX_MODE_RSS_ENABLE |
7785 RX_MODE_RSS_ITBL_HASH_BITS_7 |
7786 RX_MODE_RSS_IPV6_HASH_EN |
7787 RX_MODE_RSS_TCP_IPV6_HASH_EN |
7788 RX_MODE_RSS_IPV4_HASH_EN |
7789 RX_MODE_RSS_TCP_IPV4_HASH_EN;
7791 tw32_f(MAC_RX_MODE, tp->rx_mode);
7794 tw32(MAC_LED_CTRL, tp->led_ctrl);
7796 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7797 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7798 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7801 tw32_f(MAC_RX_MODE, tp->rx_mode);
7804 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7805 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7806 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7807 /* Set drive transmission level to 1.2V */
7808 /* only if the signal pre-emphasis bit is not set */
7809 val = tr32(MAC_SERDES_CFG);
7812 tw32(MAC_SERDES_CFG, val);
7814 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7815 tw32(MAC_SERDES_CFG, 0x616000);
7818 /* Prevent chip from dropping frames when flow control
7821 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7824 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7825 /* Use hardware link auto-negotiation */
7826 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7829 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7830 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7833 tmp = tr32(SERDES_RX_CTRL);
7834 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7835 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7836 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7837 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7840 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7841 if (tp->link_config.phy_is_low_power) {
7842 tp->link_config.phy_is_low_power = 0;
7843 tp->link_config.speed = tp->link_config.orig_speed;
7844 tp->link_config.duplex = tp->link_config.orig_duplex;
7845 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7848 err = tg3_setup_phy(tp, 0);
7852 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7853 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
7856 /* Clear CRC stats. */
7857 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7858 tg3_writephy(tp, MII_TG3_TEST1,
7859 tmp | MII_TG3_TEST1_CRC_EN);
7860 tg3_readphy(tp, 0x14, &tmp);
7865 __tg3_set_rx_mode(tp->dev);
7867 /* Initialize receive rules. */
7868 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7869 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7870 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7871 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7873 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7874 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7878 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7882 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7884 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7886 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7888 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7890 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7892 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7894 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7896 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7898 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7900 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7902 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7904 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7906 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7908 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7916 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7917 /* Write our heartbeat update interval to APE. */
7918 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7919 APE_HOST_HEARTBEAT_INT_DISABLE);
7921 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7926 /* Called at device open time to get the chip ready for
7927 * packet processing. Invoked with tp->lock held.
7929 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7931 tg3_switch_clocks(tp);
7933 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7935 return tg3_reset_hw(tp, reset_phy);
7938 #define TG3_STAT_ADD32(PSTAT, REG) \
7939 do { u32 __val = tr32(REG); \
7940 (PSTAT)->low += __val; \
7941 if ((PSTAT)->low < __val) \
7942 (PSTAT)->high += 1; \
7945 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7947 struct tg3_hw_stats *sp = tp->hw_stats;
7949 if (!netif_carrier_ok(tp->dev))
7952 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7953 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7954 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7955 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7956 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7957 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7958 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7959 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7960 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7961 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7962 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7963 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7964 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7966 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7967 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7968 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7969 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7970 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7971 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7972 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7973 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7974 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7975 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7976 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7977 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7978 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7979 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7981 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7982 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7983 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7986 static void tg3_timer(unsigned long __opaque)
7988 struct tg3 *tp = (struct tg3 *) __opaque;
7993 spin_lock(&tp->lock);
7995 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7996 /* All of this garbage is because when using non-tagged
7997 * IRQ status the mailbox/status_block protocol the chip
7998 * uses with the cpu is race prone.
8000 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8001 tw32(GRC_LOCAL_CTRL,
8002 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8004 tw32(HOSTCC_MODE, tp->coalesce_mode |
8005 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8008 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8009 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8010 spin_unlock(&tp->lock);
8011 schedule_work(&tp->reset_task);
8016 /* This part only runs once per second. */
8017 if (!--tp->timer_counter) {
8018 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8019 tg3_periodic_fetch_stats(tp);
8021 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8025 mac_stat = tr32(MAC_STATUS);
8028 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
8029 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8031 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8035 tg3_setup_phy(tp, 0);
8036 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8037 u32 mac_stat = tr32(MAC_STATUS);
8040 if (netif_carrier_ok(tp->dev) &&
8041 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8044 if (! netif_carrier_ok(tp->dev) &&
8045 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8046 MAC_STATUS_SIGNAL_DET))) {
8050 if (!tp->serdes_counter) {
8053 ~MAC_MODE_PORT_MODE_MASK));
8055 tw32_f(MAC_MODE, tp->mac_mode);
8058 tg3_setup_phy(tp, 0);
8060 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8061 tg3_serdes_parallel_detect(tp);
8063 tp->timer_counter = tp->timer_multiplier;
8066 /* Heartbeat is only sent once every 2 seconds.
8068 * The heartbeat is to tell the ASF firmware that the host
8069 * driver is still alive. In the event that the OS crashes,
8070 * ASF needs to reset the hardware to free up the FIFO space
8071 * that may be filled with rx packets destined for the host.
8072 * If the FIFO is full, ASF will no longer function properly.
8074 * Unintended resets have been reported on real time kernels
8075 * where the timer doesn't run on time. Netpoll will also have
8078 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8079 * to check the ring condition when the heartbeat is expiring
8080 * before doing the reset. This will prevent most unintended
8083 if (!--tp->asf_counter) {
8084 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8085 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8086 tg3_wait_for_event_ack(tp);
8088 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8089 FWCMD_NICDRV_ALIVE3);
8090 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8091 /* 5 seconds timeout */
8092 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
8094 tg3_generate_fw_event(tp);
8096 tp->asf_counter = tp->asf_multiplier;
8099 spin_unlock(&tp->lock);
8102 tp->timer.expires = jiffies + tp->timer_offset;
8103 add_timer(&tp->timer);
8106 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8109 unsigned long flags;
8111 struct tg3_napi *tnapi = &tp->napi[irq_num];
8113 if (tp->irq_cnt == 1)
8114 name = tp->dev->name;
8116 name = &tnapi->irq_lbl[0];
8117 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8118 name[IFNAMSIZ-1] = 0;
8121 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8123 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8125 flags = IRQF_SAMPLE_RANDOM;
8128 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8129 fn = tg3_interrupt_tagged;
8130 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8133 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8136 static int tg3_test_interrupt(struct tg3 *tp)
8138 struct tg3_napi *tnapi = &tp->napi[0];
8139 struct net_device *dev = tp->dev;
8140 int err, i, intr_ok = 0;
8143 if (!netif_running(dev))
8146 tg3_disable_ints(tp);
8148 free_irq(tnapi->irq_vec, tnapi);
8151 * Turn off MSI one shot mode. Otherwise this test has no
8152 * observable way to know whether the interrupt was delivered.
8154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
8155 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8156 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8157 tw32(MSGINT_MODE, val);
8160 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8161 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8165 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8166 tg3_enable_ints(tp);
8168 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8171 for (i = 0; i < 5; i++) {
8172 u32 int_mbox, misc_host_ctrl;
8174 int_mbox = tr32_mailbox(tnapi->int_mbox);
8175 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8177 if ((int_mbox != 0) ||
8178 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8186 tg3_disable_ints(tp);
8188 free_irq(tnapi->irq_vec, tnapi);
8190 err = tg3_request_irq(tp, 0);
8196 /* Reenable MSI one shot mode. */
8197 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
8198 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8199 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8200 tw32(MSGINT_MODE, val);
8208 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8209 * successfully restored
8211 static int tg3_test_msi(struct tg3 *tp)
8216 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8219 /* Turn off SERR reporting in case MSI terminates with Master
8222 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8223 pci_write_config_word(tp->pdev, PCI_COMMAND,
8224 pci_cmd & ~PCI_COMMAND_SERR);
8226 err = tg3_test_interrupt(tp);
8228 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8233 /* other failures */
8237 /* MSI test failed, go back to INTx mode */
8238 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8239 "switching to INTx mode. Please report this failure to "
8240 "the PCI maintainer and include system chipset information.\n",
8243 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8245 pci_disable_msi(tp->pdev);
8247 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8249 err = tg3_request_irq(tp, 0);
8253 /* Need to reset the chip because the MSI cycle may have terminated
8254 * with Master Abort.
8256 tg3_full_lock(tp, 1);
8258 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8259 err = tg3_init_hw(tp, 1);
8261 tg3_full_unlock(tp);
8264 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8269 static int tg3_request_firmware(struct tg3 *tp)
8271 const __be32 *fw_data;
8273 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8274 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
8275 tp->dev->name, tp->fw_needed);
8279 fw_data = (void *)tp->fw->data;
8281 /* Firmware blob starts with version numbers, followed by
8282 * start address and _full_ length including BSS sections
8283 * (which must be longer than the actual data, of course
8286 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8287 if (tp->fw_len < (tp->fw->size - 12)) {
8288 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
8289 tp->dev->name, tp->fw_len, tp->fw_needed);
8290 release_firmware(tp->fw);
8295 /* We no longer need firmware; we have it. */
8296 tp->fw_needed = NULL;
8300 static bool tg3_enable_msix(struct tg3 *tp)
8302 int i, rc, cpus = num_online_cpus();
8303 struct msix_entry msix_ent[tp->irq_max];
8306 /* Just fallback to the simpler MSI mode. */
8310 * We want as many rx rings enabled as there are cpus.
8311 * The first MSIX vector only deals with link interrupts, etc,
8312 * so we add one to the number of vectors we are requesting.
8314 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8316 for (i = 0; i < tp->irq_max; i++) {
8317 msix_ent[i].entry = i;
8318 msix_ent[i].vector = 0;
8321 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8323 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
8325 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8328 "%s: Requested %d MSI-X vectors, received %d\n",
8329 tp->dev->name, tp->irq_cnt, rc);
8333 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8335 for (i = 0; i < tp->irq_max; i++)
8336 tp->napi[i].irq_vec = msix_ent[i].vector;
8338 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8343 static void tg3_ints_init(struct tg3 *tp)
8345 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8346 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8347 /* All MSI supporting chips should support tagged
8348 * status. Assert that this is the case.
8350 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8351 "Not using MSI.\n", tp->dev->name);
8355 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8356 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8357 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8358 pci_enable_msi(tp->pdev) == 0)
8359 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8361 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8362 u32 msi_mode = tr32(MSGINT_MODE);
8363 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8364 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8365 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8368 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8370 tp->napi[0].irq_vec = tp->pdev->irq;
8371 tp->dev->real_num_tx_queues = 1;
8375 static void tg3_ints_fini(struct tg3 *tp)
8377 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8378 pci_disable_msix(tp->pdev);
8379 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8380 pci_disable_msi(tp->pdev);
8381 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8382 tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
8385 static int tg3_open(struct net_device *dev)
8387 struct tg3 *tp = netdev_priv(dev);
8390 if (tp->fw_needed) {
8391 err = tg3_request_firmware(tp);
8392 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8396 printk(KERN_WARNING "%s: TSO capability disabled.\n",
8398 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8399 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8400 printk(KERN_NOTICE "%s: TSO capability restored.\n",
8402 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8406 netif_carrier_off(tp->dev);
8408 err = tg3_set_power_state(tp, PCI_D0);
8412 tg3_full_lock(tp, 0);
8414 tg3_disable_ints(tp);
8415 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8417 tg3_full_unlock(tp);
8420 * Setup interrupts first so we know how
8421 * many NAPI resources to allocate
8425 /* The placement of this call is tied
8426 * to the setup and use of Host TX descriptors.
8428 err = tg3_alloc_consistent(tp);
8432 tg3_napi_enable(tp);
8434 for (i = 0; i < tp->irq_cnt; i++) {
8435 struct tg3_napi *tnapi = &tp->napi[i];
8436 err = tg3_request_irq(tp, i);
8438 for (i--; i >= 0; i--)
8439 free_irq(tnapi->irq_vec, tnapi);
8447 tg3_full_lock(tp, 0);
8449 err = tg3_init_hw(tp, 1);
8451 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8454 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8455 tp->timer_offset = HZ;
8457 tp->timer_offset = HZ / 10;
8459 BUG_ON(tp->timer_offset > HZ);
8460 tp->timer_counter = tp->timer_multiplier =
8461 (HZ / tp->timer_offset);
8462 tp->asf_counter = tp->asf_multiplier =
8463 ((HZ / tp->timer_offset) * 2);
8465 init_timer(&tp->timer);
8466 tp->timer.expires = jiffies + tp->timer_offset;
8467 tp->timer.data = (unsigned long) tp;
8468 tp->timer.function = tg3_timer;
8471 tg3_full_unlock(tp);
8476 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8477 err = tg3_test_msi(tp);
8480 tg3_full_lock(tp, 0);
8481 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8483 tg3_full_unlock(tp);
8488 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8489 (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
8490 (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
8491 u32 val = tr32(PCIE_TRANSACTION_CFG);
8493 tw32(PCIE_TRANSACTION_CFG,
8494 val | PCIE_TRANS_CFG_1SHOT_MSI);
8500 tg3_full_lock(tp, 0);
8502 add_timer(&tp->timer);
8503 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8504 tg3_enable_ints(tp);
8506 tg3_full_unlock(tp);
8508 netif_tx_start_all_queues(dev);
8513 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8514 struct tg3_napi *tnapi = &tp->napi[i];
8515 free_irq(tnapi->irq_vec, tnapi);
8519 tg3_napi_disable(tp);
8520 tg3_free_consistent(tp);
8528 /*static*/ void tg3_dump_state(struct tg3 *tp)
8530 u32 val32, val32_2, val32_3, val32_4, val32_5;
8533 struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
8535 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8536 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8537 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8541 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8542 tr32(MAC_MODE), tr32(MAC_STATUS));
8543 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8544 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8545 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8546 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8547 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8548 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8550 /* Send data initiator control block */
8551 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8552 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8553 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8554 tr32(SNDDATAI_STATSCTRL));
8556 /* Send data completion control block */
8557 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8559 /* Send BD ring selector block */
8560 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8561 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8563 /* Send BD initiator control block */
8564 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8565 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8567 /* Send BD completion control block */
8568 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8570 /* Receive list placement control block */
8571 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8572 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8573 printk(" RCVLPC_STATSCTRL[%08x]\n",
8574 tr32(RCVLPC_STATSCTRL));
8576 /* Receive data and receive BD initiator control block */
8577 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8578 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8580 /* Receive data completion control block */
8581 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8584 /* Receive BD initiator control block */
8585 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8586 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8588 /* Receive BD completion control block */
8589 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8590 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8592 /* Receive list selector control block */
8593 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8594 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8596 /* Mbuf cluster free block */
8597 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8598 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8600 /* Host coalescing control block */
8601 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8602 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8603 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8604 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8605 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8606 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8607 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8608 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8609 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8610 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8611 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8612 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8614 /* Memory arbiter control block */
8615 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8616 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8618 /* Buffer manager control block */
8619 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8620 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8621 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8622 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8623 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8624 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8625 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8626 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8628 /* Read DMA control block */
8629 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8630 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8632 /* Write DMA control block */
8633 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8634 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8636 /* DMA completion block */
8637 printk("DEBUG: DMAC_MODE[%08x]\n",
8641 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8642 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8643 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8644 tr32(GRC_LOCAL_CTRL));
8647 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8648 tr32(RCVDBDI_JUMBO_BD + 0x0),
8649 tr32(RCVDBDI_JUMBO_BD + 0x4),
8650 tr32(RCVDBDI_JUMBO_BD + 0x8),
8651 tr32(RCVDBDI_JUMBO_BD + 0xc));
8652 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8653 tr32(RCVDBDI_STD_BD + 0x0),
8654 tr32(RCVDBDI_STD_BD + 0x4),
8655 tr32(RCVDBDI_STD_BD + 0x8),
8656 tr32(RCVDBDI_STD_BD + 0xc));
8657 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8658 tr32(RCVDBDI_MINI_BD + 0x0),
8659 tr32(RCVDBDI_MINI_BD + 0x4),
8660 tr32(RCVDBDI_MINI_BD + 0x8),
8661 tr32(RCVDBDI_MINI_BD + 0xc));
8663 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8664 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8665 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8666 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8667 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8668 val32, val32_2, val32_3, val32_4);
8670 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8671 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8672 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8673 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8674 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8675 val32, val32_2, val32_3, val32_4);
8677 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8678 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8679 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8680 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8681 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8682 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8683 val32, val32_2, val32_3, val32_4, val32_5);
8685 /* SW status block */
8687 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8690 sblk->rx_jumbo_consumer,
8692 sblk->rx_mini_consumer,
8693 sblk->idx[0].rx_producer,
8694 sblk->idx[0].tx_consumer);
8696 /* SW statistics block */
8697 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8698 ((u32 *)tp->hw_stats)[0],
8699 ((u32 *)tp->hw_stats)[1],
8700 ((u32 *)tp->hw_stats)[2],
8701 ((u32 *)tp->hw_stats)[3]);
8704 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8705 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8706 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8707 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8708 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8710 /* NIC side send descriptors. */
8711 for (i = 0; i < 6; i++) {
8714 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8715 + (i * sizeof(struct tg3_tx_buffer_desc));
8716 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8718 readl(txd + 0x0), readl(txd + 0x4),
8719 readl(txd + 0x8), readl(txd + 0xc));
8722 /* NIC side RX descriptors. */
8723 for (i = 0; i < 6; i++) {
8726 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8727 + (i * sizeof(struct tg3_rx_buffer_desc));
8728 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8730 readl(rxd + 0x0), readl(rxd + 0x4),
8731 readl(rxd + 0x8), readl(rxd + 0xc));
8732 rxd += (4 * sizeof(u32));
8733 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8735 readl(rxd + 0x0), readl(rxd + 0x4),
8736 readl(rxd + 0x8), readl(rxd + 0xc));
8739 for (i = 0; i < 6; i++) {
8742 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8743 + (i * sizeof(struct tg3_rx_buffer_desc));
8744 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8746 readl(rxd + 0x0), readl(rxd + 0x4),
8747 readl(rxd + 0x8), readl(rxd + 0xc));
8748 rxd += (4 * sizeof(u32));
8749 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8751 readl(rxd + 0x0), readl(rxd + 0x4),
8752 readl(rxd + 0x8), readl(rxd + 0xc));
8757 static struct net_device_stats *tg3_get_stats(struct net_device *);
8758 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8760 static int tg3_close(struct net_device *dev)
8763 struct tg3 *tp = netdev_priv(dev);
8765 tg3_napi_disable(tp);
8766 cancel_work_sync(&tp->reset_task);
8768 netif_tx_stop_all_queues(dev);
8770 del_timer_sync(&tp->timer);
8774 tg3_full_lock(tp, 1);
8779 tg3_disable_ints(tp);
8781 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8783 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8785 tg3_full_unlock(tp);
8787 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8788 struct tg3_napi *tnapi = &tp->napi[i];
8789 free_irq(tnapi->irq_vec, tnapi);
8794 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8795 sizeof(tp->net_stats_prev));
8796 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8797 sizeof(tp->estats_prev));
8799 tg3_free_consistent(tp);
8801 tg3_set_power_state(tp, PCI_D3hot);
8803 netif_carrier_off(tp->dev);
8808 static inline unsigned long get_stat64(tg3_stat64_t *val)
8812 #if (BITS_PER_LONG == 32)
8815 ret = ((u64)val->high << 32) | ((u64)val->low);
8820 static inline u64 get_estat64(tg3_stat64_t *val)
8822 return ((u64)val->high << 32) | ((u64)val->low);
8825 static unsigned long calc_crc_errors(struct tg3 *tp)
8827 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8829 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8830 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8831 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8834 spin_lock_bh(&tp->lock);
8835 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8836 tg3_writephy(tp, MII_TG3_TEST1,
8837 val | MII_TG3_TEST1_CRC_EN);
8838 tg3_readphy(tp, 0x14, &val);
8841 spin_unlock_bh(&tp->lock);
8843 tp->phy_crc_errors += val;
8845 return tp->phy_crc_errors;
8848 return get_stat64(&hw_stats->rx_fcs_errors);
8851 #define ESTAT_ADD(member) \
8852 estats->member = old_estats->member + \
8853 get_estat64(&hw_stats->member)
8855 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8857 struct tg3_ethtool_stats *estats = &tp->estats;
8858 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8859 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8864 ESTAT_ADD(rx_octets);
8865 ESTAT_ADD(rx_fragments);
8866 ESTAT_ADD(rx_ucast_packets);
8867 ESTAT_ADD(rx_mcast_packets);
8868 ESTAT_ADD(rx_bcast_packets);
8869 ESTAT_ADD(rx_fcs_errors);
8870 ESTAT_ADD(rx_align_errors);
8871 ESTAT_ADD(rx_xon_pause_rcvd);
8872 ESTAT_ADD(rx_xoff_pause_rcvd);
8873 ESTAT_ADD(rx_mac_ctrl_rcvd);
8874 ESTAT_ADD(rx_xoff_entered);
8875 ESTAT_ADD(rx_frame_too_long_errors);
8876 ESTAT_ADD(rx_jabbers);
8877 ESTAT_ADD(rx_undersize_packets);
8878 ESTAT_ADD(rx_in_length_errors);
8879 ESTAT_ADD(rx_out_length_errors);
8880 ESTAT_ADD(rx_64_or_less_octet_packets);
8881 ESTAT_ADD(rx_65_to_127_octet_packets);
8882 ESTAT_ADD(rx_128_to_255_octet_packets);
8883 ESTAT_ADD(rx_256_to_511_octet_packets);
8884 ESTAT_ADD(rx_512_to_1023_octet_packets);
8885 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8886 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8887 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8888 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8889 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8891 ESTAT_ADD(tx_octets);
8892 ESTAT_ADD(tx_collisions);
8893 ESTAT_ADD(tx_xon_sent);
8894 ESTAT_ADD(tx_xoff_sent);
8895 ESTAT_ADD(tx_flow_control);
8896 ESTAT_ADD(tx_mac_errors);
8897 ESTAT_ADD(tx_single_collisions);
8898 ESTAT_ADD(tx_mult_collisions);
8899 ESTAT_ADD(tx_deferred);
8900 ESTAT_ADD(tx_excessive_collisions);
8901 ESTAT_ADD(tx_late_collisions);
8902 ESTAT_ADD(tx_collide_2times);
8903 ESTAT_ADD(tx_collide_3times);
8904 ESTAT_ADD(tx_collide_4times);
8905 ESTAT_ADD(tx_collide_5times);
8906 ESTAT_ADD(tx_collide_6times);
8907 ESTAT_ADD(tx_collide_7times);
8908 ESTAT_ADD(tx_collide_8times);
8909 ESTAT_ADD(tx_collide_9times);
8910 ESTAT_ADD(tx_collide_10times);
8911 ESTAT_ADD(tx_collide_11times);
8912 ESTAT_ADD(tx_collide_12times);
8913 ESTAT_ADD(tx_collide_13times);
8914 ESTAT_ADD(tx_collide_14times);
8915 ESTAT_ADD(tx_collide_15times);
8916 ESTAT_ADD(tx_ucast_packets);
8917 ESTAT_ADD(tx_mcast_packets);
8918 ESTAT_ADD(tx_bcast_packets);
8919 ESTAT_ADD(tx_carrier_sense_errors);
8920 ESTAT_ADD(tx_discards);
8921 ESTAT_ADD(tx_errors);
8923 ESTAT_ADD(dma_writeq_full);
8924 ESTAT_ADD(dma_write_prioq_full);
8925 ESTAT_ADD(rxbds_empty);
8926 ESTAT_ADD(rx_discards);
8927 ESTAT_ADD(rx_errors);
8928 ESTAT_ADD(rx_threshold_hit);
8930 ESTAT_ADD(dma_readq_full);
8931 ESTAT_ADD(dma_read_prioq_full);
8932 ESTAT_ADD(tx_comp_queue_full);
8934 ESTAT_ADD(ring_set_send_prod_index);
8935 ESTAT_ADD(ring_status_update);
8936 ESTAT_ADD(nic_irqs);
8937 ESTAT_ADD(nic_avoided_irqs);
8938 ESTAT_ADD(nic_tx_threshold_hit);
8943 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8945 struct tg3 *tp = netdev_priv(dev);
8946 struct net_device_stats *stats = &tp->net_stats;
8947 struct net_device_stats *old_stats = &tp->net_stats_prev;
8948 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8953 stats->rx_packets = old_stats->rx_packets +
8954 get_stat64(&hw_stats->rx_ucast_packets) +
8955 get_stat64(&hw_stats->rx_mcast_packets) +
8956 get_stat64(&hw_stats->rx_bcast_packets);
8958 stats->tx_packets = old_stats->tx_packets +
8959 get_stat64(&hw_stats->tx_ucast_packets) +
8960 get_stat64(&hw_stats->tx_mcast_packets) +
8961 get_stat64(&hw_stats->tx_bcast_packets);
8963 stats->rx_bytes = old_stats->rx_bytes +
8964 get_stat64(&hw_stats->rx_octets);
8965 stats->tx_bytes = old_stats->tx_bytes +
8966 get_stat64(&hw_stats->tx_octets);
8968 stats->rx_errors = old_stats->rx_errors +
8969 get_stat64(&hw_stats->rx_errors);
8970 stats->tx_errors = old_stats->tx_errors +
8971 get_stat64(&hw_stats->tx_errors) +
8972 get_stat64(&hw_stats->tx_mac_errors) +
8973 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8974 get_stat64(&hw_stats->tx_discards);
8976 stats->multicast = old_stats->multicast +
8977 get_stat64(&hw_stats->rx_mcast_packets);
8978 stats->collisions = old_stats->collisions +
8979 get_stat64(&hw_stats->tx_collisions);
8981 stats->rx_length_errors = old_stats->rx_length_errors +
8982 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8983 get_stat64(&hw_stats->rx_undersize_packets);
8985 stats->rx_over_errors = old_stats->rx_over_errors +
8986 get_stat64(&hw_stats->rxbds_empty);
8987 stats->rx_frame_errors = old_stats->rx_frame_errors +
8988 get_stat64(&hw_stats->rx_align_errors);
8989 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8990 get_stat64(&hw_stats->tx_discards);
8991 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8992 get_stat64(&hw_stats->tx_carrier_sense_errors);
8994 stats->rx_crc_errors = old_stats->rx_crc_errors +
8995 calc_crc_errors(tp);
8997 stats->rx_missed_errors = old_stats->rx_missed_errors +
8998 get_stat64(&hw_stats->rx_discards);
9003 static inline u32 calc_crc(unsigned char *buf, int len)
9011 for (j = 0; j < len; j++) {
9014 for (k = 0; k < 8; k++) {
9028 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9030 /* accept or reject all multicast frames */
9031 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9032 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9033 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9034 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9037 static void __tg3_set_rx_mode(struct net_device *dev)
9039 struct tg3 *tp = netdev_priv(dev);
9042 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9043 RX_MODE_KEEP_VLAN_TAG);
9045 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9048 #if TG3_VLAN_TAG_USED
9050 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9051 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9053 /* By definition, VLAN is disabled always in this
9056 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9057 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9060 if (dev->flags & IFF_PROMISC) {
9061 /* Promiscuous mode. */
9062 rx_mode |= RX_MODE_PROMISC;
9063 } else if (dev->flags & IFF_ALLMULTI) {
9064 /* Accept all multicast. */
9065 tg3_set_multi (tp, 1);
9066 } else if (dev->mc_count < 1) {
9067 /* Reject all multicast. */
9068 tg3_set_multi (tp, 0);
9070 /* Accept one or more multicast(s). */
9071 struct dev_mc_list *mclist;
9073 u32 mc_filter[4] = { 0, };
9078 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
9079 i++, mclist = mclist->next) {
9081 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
9083 regidx = (bit & 0x60) >> 5;
9085 mc_filter[regidx] |= (1 << bit);
9088 tw32(MAC_HASH_REG_0, mc_filter[0]);
9089 tw32(MAC_HASH_REG_1, mc_filter[1]);
9090 tw32(MAC_HASH_REG_2, mc_filter[2]);
9091 tw32(MAC_HASH_REG_3, mc_filter[3]);
9094 if (rx_mode != tp->rx_mode) {
9095 tp->rx_mode = rx_mode;
9096 tw32_f(MAC_RX_MODE, rx_mode);
9101 static void tg3_set_rx_mode(struct net_device *dev)
9103 struct tg3 *tp = netdev_priv(dev);
9105 if (!netif_running(dev))
9108 tg3_full_lock(tp, 0);
9109 __tg3_set_rx_mode(dev);
9110 tg3_full_unlock(tp);
9113 #define TG3_REGDUMP_LEN (32 * 1024)
9115 static int tg3_get_regs_len(struct net_device *dev)
9117 return TG3_REGDUMP_LEN;
9120 static void tg3_get_regs(struct net_device *dev,
9121 struct ethtool_regs *regs, void *_p)
9124 struct tg3 *tp = netdev_priv(dev);
9130 memset(p, 0, TG3_REGDUMP_LEN);
9132 if (tp->link_config.phy_is_low_power)
9135 tg3_full_lock(tp, 0);
9137 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9138 #define GET_REG32_LOOP(base,len) \
9139 do { p = (u32 *)(orig_p + (base)); \
9140 for (i = 0; i < len; i += 4) \
9141 __GET_REG32((base) + i); \
9143 #define GET_REG32_1(reg) \
9144 do { p = (u32 *)(orig_p + (reg)); \
9145 __GET_REG32((reg)); \
9148 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9149 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9150 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9151 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9152 GET_REG32_1(SNDDATAC_MODE);
9153 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9154 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9155 GET_REG32_1(SNDBDC_MODE);
9156 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9157 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9158 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9159 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9160 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9161 GET_REG32_1(RCVDCC_MODE);
9162 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9163 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9164 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9165 GET_REG32_1(MBFREE_MODE);
9166 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9167 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9168 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9169 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9170 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9171 GET_REG32_1(RX_CPU_MODE);
9172 GET_REG32_1(RX_CPU_STATE);
9173 GET_REG32_1(RX_CPU_PGMCTR);
9174 GET_REG32_1(RX_CPU_HWBKPT);
9175 GET_REG32_1(TX_CPU_MODE);
9176 GET_REG32_1(TX_CPU_STATE);
9177 GET_REG32_1(TX_CPU_PGMCTR);
9178 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9179 GET_REG32_LOOP(FTQ_RESET, 0x120);
9180 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9181 GET_REG32_1(DMAC_MODE);
9182 GET_REG32_LOOP(GRC_MODE, 0x4c);
9183 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9184 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9187 #undef GET_REG32_LOOP
9190 tg3_full_unlock(tp);
9193 static int tg3_get_eeprom_len(struct net_device *dev)
9195 struct tg3 *tp = netdev_priv(dev);
9197 return tp->nvram_size;
9200 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9202 struct tg3 *tp = netdev_priv(dev);
9205 u32 i, offset, len, b_offset, b_count;
9208 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9211 if (tp->link_config.phy_is_low_power)
9214 offset = eeprom->offset;
9218 eeprom->magic = TG3_EEPROM_MAGIC;
9221 /* adjustments to start on required 4 byte boundary */
9222 b_offset = offset & 3;
9223 b_count = 4 - b_offset;
9224 if (b_count > len) {
9225 /* i.e. offset=1 len=2 */
9228 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9231 memcpy(data, ((char*)&val) + b_offset, b_count);
9234 eeprom->len += b_count;
9237 /* read bytes upto the last 4 byte boundary */
9238 pd = &data[eeprom->len];
9239 for (i = 0; i < (len - (len & 3)); i += 4) {
9240 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9245 memcpy(pd + i, &val, 4);
9250 /* read last bytes not ending on 4 byte boundary */
9251 pd = &data[eeprom->len];
9253 b_offset = offset + len - b_count;
9254 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9257 memcpy(pd, &val, b_count);
9258 eeprom->len += b_count;
9263 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9265 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9267 struct tg3 *tp = netdev_priv(dev);
9269 u32 offset, len, b_offset, odd_len;
9273 if (tp->link_config.phy_is_low_power)
9276 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9277 eeprom->magic != TG3_EEPROM_MAGIC)
9280 offset = eeprom->offset;
9283 if ((b_offset = (offset & 3))) {
9284 /* adjustments to start on required 4 byte boundary */
9285 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9296 /* adjustments to end on required 4 byte boundary */
9298 len = (len + 3) & ~3;
9299 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9305 if (b_offset || odd_len) {
9306 buf = kmalloc(len, GFP_KERNEL);
9310 memcpy(buf, &start, 4);
9312 memcpy(buf+len-4, &end, 4);
9313 memcpy(buf + b_offset, data, eeprom->len);
9316 ret = tg3_nvram_write_block(tp, offset, len, buf);
9324 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9326 struct tg3 *tp = netdev_priv(dev);
9328 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9329 struct phy_device *phydev;
9330 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9332 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9333 return phy_ethtool_gset(phydev, cmd);
9336 cmd->supported = (SUPPORTED_Autoneg);
9338 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9339 cmd->supported |= (SUPPORTED_1000baseT_Half |
9340 SUPPORTED_1000baseT_Full);
9342 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9343 cmd->supported |= (SUPPORTED_100baseT_Half |
9344 SUPPORTED_100baseT_Full |
9345 SUPPORTED_10baseT_Half |
9346 SUPPORTED_10baseT_Full |
9348 cmd->port = PORT_TP;
9350 cmd->supported |= SUPPORTED_FIBRE;
9351 cmd->port = PORT_FIBRE;
9354 cmd->advertising = tp->link_config.advertising;
9355 if (netif_running(dev)) {
9356 cmd->speed = tp->link_config.active_speed;
9357 cmd->duplex = tp->link_config.active_duplex;
9359 cmd->phy_address = tp->phy_addr;
9360 cmd->transceiver = XCVR_INTERNAL;
9361 cmd->autoneg = tp->link_config.autoneg;
9367 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9369 struct tg3 *tp = netdev_priv(dev);
9371 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9372 struct phy_device *phydev;
9373 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9375 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9376 return phy_ethtool_sset(phydev, cmd);
9379 if (cmd->autoneg != AUTONEG_ENABLE &&
9380 cmd->autoneg != AUTONEG_DISABLE)
9383 if (cmd->autoneg == AUTONEG_DISABLE &&
9384 cmd->duplex != DUPLEX_FULL &&
9385 cmd->duplex != DUPLEX_HALF)
9388 if (cmd->autoneg == AUTONEG_ENABLE) {
9389 u32 mask = ADVERTISED_Autoneg |
9391 ADVERTISED_Asym_Pause;
9393 if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
9394 mask |= ADVERTISED_1000baseT_Half |
9395 ADVERTISED_1000baseT_Full;
9397 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9398 mask |= ADVERTISED_100baseT_Half |
9399 ADVERTISED_100baseT_Full |
9400 ADVERTISED_10baseT_Half |
9401 ADVERTISED_10baseT_Full |
9404 mask |= ADVERTISED_FIBRE;
9406 if (cmd->advertising & ~mask)
9409 mask &= (ADVERTISED_1000baseT_Half |
9410 ADVERTISED_1000baseT_Full |
9411 ADVERTISED_100baseT_Half |
9412 ADVERTISED_100baseT_Full |
9413 ADVERTISED_10baseT_Half |
9414 ADVERTISED_10baseT_Full);
9416 cmd->advertising &= mask;
9418 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9419 if (cmd->speed != SPEED_1000)
9422 if (cmd->duplex != DUPLEX_FULL)
9425 if (cmd->speed != SPEED_100 &&
9426 cmd->speed != SPEED_10)
9431 tg3_full_lock(tp, 0);
9433 tp->link_config.autoneg = cmd->autoneg;
9434 if (cmd->autoneg == AUTONEG_ENABLE) {
9435 tp->link_config.advertising = (cmd->advertising |
9436 ADVERTISED_Autoneg);
9437 tp->link_config.speed = SPEED_INVALID;
9438 tp->link_config.duplex = DUPLEX_INVALID;
9440 tp->link_config.advertising = 0;
9441 tp->link_config.speed = cmd->speed;
9442 tp->link_config.duplex = cmd->duplex;
9445 tp->link_config.orig_speed = tp->link_config.speed;
9446 tp->link_config.orig_duplex = tp->link_config.duplex;
9447 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9449 if (netif_running(dev))
9450 tg3_setup_phy(tp, 1);
9452 tg3_full_unlock(tp);
9457 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9459 struct tg3 *tp = netdev_priv(dev);
9461 strcpy(info->driver, DRV_MODULE_NAME);
9462 strcpy(info->version, DRV_MODULE_VERSION);
9463 strcpy(info->fw_version, tp->fw_ver);
9464 strcpy(info->bus_info, pci_name(tp->pdev));
9467 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9469 struct tg3 *tp = netdev_priv(dev);
9471 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9472 device_can_wakeup(&tp->pdev->dev))
9473 wol->supported = WAKE_MAGIC;
9477 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9478 device_can_wakeup(&tp->pdev->dev))
9479 wol->wolopts = WAKE_MAGIC;
9480 memset(&wol->sopass, 0, sizeof(wol->sopass));
9483 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9485 struct tg3 *tp = netdev_priv(dev);
9486 struct device *dp = &tp->pdev->dev;
9488 if (wol->wolopts & ~WAKE_MAGIC)
9490 if ((wol->wolopts & WAKE_MAGIC) &&
9491 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9494 spin_lock_bh(&tp->lock);
9495 if (wol->wolopts & WAKE_MAGIC) {
9496 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9497 device_set_wakeup_enable(dp, true);
9499 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9500 device_set_wakeup_enable(dp, false);
9502 spin_unlock_bh(&tp->lock);
9507 static u32 tg3_get_msglevel(struct net_device *dev)
9509 struct tg3 *tp = netdev_priv(dev);
9510 return tp->msg_enable;
9513 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9515 struct tg3 *tp = netdev_priv(dev);
9516 tp->msg_enable = value;
9519 static int tg3_set_tso(struct net_device *dev, u32 value)
9521 struct tg3 *tp = netdev_priv(dev);
9523 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9528 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9529 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9530 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9532 dev->features |= NETIF_F_TSO6;
9533 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9534 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9535 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9536 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9537 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9538 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9539 dev->features |= NETIF_F_TSO_ECN;
9541 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9543 return ethtool_op_set_tso(dev, value);
9546 static int tg3_nway_reset(struct net_device *dev)
9548 struct tg3 *tp = netdev_priv(dev);
9551 if (!netif_running(dev))
9554 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9557 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9558 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9560 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9564 spin_lock_bh(&tp->lock);
9566 tg3_readphy(tp, MII_BMCR, &bmcr);
9567 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9568 ((bmcr & BMCR_ANENABLE) ||
9569 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9570 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9574 spin_unlock_bh(&tp->lock);
9580 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9582 struct tg3 *tp = netdev_priv(dev);
9584 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9585 ering->rx_mini_max_pending = 0;
9586 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9587 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9589 ering->rx_jumbo_max_pending = 0;
9591 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9593 ering->rx_pending = tp->rx_pending;
9594 ering->rx_mini_pending = 0;
9595 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9596 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9598 ering->rx_jumbo_pending = 0;
9600 ering->tx_pending = tp->napi[0].tx_pending;
9603 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9605 struct tg3 *tp = netdev_priv(dev);
9606 int i, irq_sync = 0, err = 0;
9608 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9609 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9610 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9611 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9612 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9613 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9616 if (netif_running(dev)) {
9622 tg3_full_lock(tp, irq_sync);
9624 tp->rx_pending = ering->rx_pending;
9626 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9627 tp->rx_pending > 63)
9628 tp->rx_pending = 63;
9629 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9631 for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
9632 tp->napi[i].tx_pending = ering->tx_pending;
9634 if (netif_running(dev)) {
9635 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9636 err = tg3_restart_hw(tp, 1);
9638 tg3_netif_start(tp);
9641 tg3_full_unlock(tp);
9643 if (irq_sync && !err)
9649 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9651 struct tg3 *tp = netdev_priv(dev);
9653 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9655 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9656 epause->rx_pause = 1;
9658 epause->rx_pause = 0;
9660 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9661 epause->tx_pause = 1;
9663 epause->tx_pause = 0;
9666 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9668 struct tg3 *tp = netdev_priv(dev);
9671 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9672 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9675 if (epause->autoneg) {
9677 struct phy_device *phydev;
9679 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9681 if (epause->rx_pause) {
9682 if (epause->tx_pause)
9683 newadv = ADVERTISED_Pause;
9685 newadv = ADVERTISED_Pause |
9686 ADVERTISED_Asym_Pause;
9687 } else if (epause->tx_pause) {
9688 newadv = ADVERTISED_Asym_Pause;
9692 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9693 u32 oldadv = phydev->advertising &
9695 ADVERTISED_Asym_Pause);
9696 if (oldadv != newadv) {
9697 phydev->advertising &=
9698 ~(ADVERTISED_Pause |
9699 ADVERTISED_Asym_Pause);
9700 phydev->advertising |= newadv;
9701 err = phy_start_aneg(phydev);
9704 tp->link_config.advertising &=
9705 ~(ADVERTISED_Pause |
9706 ADVERTISED_Asym_Pause);
9707 tp->link_config.advertising |= newadv;
9710 if (epause->rx_pause)
9711 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9713 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9715 if (epause->tx_pause)
9716 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9718 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9720 if (netif_running(dev))
9721 tg3_setup_flow_control(tp, 0, 0);
9726 if (netif_running(dev)) {
9731 tg3_full_lock(tp, irq_sync);
9733 if (epause->autoneg)
9734 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9736 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9737 if (epause->rx_pause)
9738 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9740 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9741 if (epause->tx_pause)
9742 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9744 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9746 if (netif_running(dev)) {
9747 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9748 err = tg3_restart_hw(tp, 1);
9750 tg3_netif_start(tp);
9753 tg3_full_unlock(tp);
9759 static u32 tg3_get_rx_csum(struct net_device *dev)
9761 struct tg3 *tp = netdev_priv(dev);
9762 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9765 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9767 struct tg3 *tp = netdev_priv(dev);
9769 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9775 spin_lock_bh(&tp->lock);
9777 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9779 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9780 spin_unlock_bh(&tp->lock);
9785 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9787 struct tg3 *tp = netdev_priv(dev);
9789 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9795 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9796 ethtool_op_set_tx_ipv6_csum(dev, data);
9798 ethtool_op_set_tx_csum(dev, data);
9803 static int tg3_get_sset_count (struct net_device *dev, int sset)
9807 return TG3_NUM_TEST;
9809 return TG3_NUM_STATS;
9815 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9817 switch (stringset) {
9819 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
9822 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
9825 WARN_ON(1); /* we need a WARN() */
9830 static int tg3_phys_id(struct net_device *dev, u32 data)
9832 struct tg3 *tp = netdev_priv(dev);
9835 if (!netif_running(tp->dev))
9839 data = UINT_MAX / 2;
9841 for (i = 0; i < (data * 2); i++) {
9843 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9844 LED_CTRL_1000MBPS_ON |
9845 LED_CTRL_100MBPS_ON |
9846 LED_CTRL_10MBPS_ON |
9847 LED_CTRL_TRAFFIC_OVERRIDE |
9848 LED_CTRL_TRAFFIC_BLINK |
9849 LED_CTRL_TRAFFIC_LED);
9852 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9853 LED_CTRL_TRAFFIC_OVERRIDE);
9855 if (msleep_interruptible(500))
9858 tw32(MAC_LED_CTRL, tp->led_ctrl);
9862 static void tg3_get_ethtool_stats (struct net_device *dev,
9863 struct ethtool_stats *estats, u64 *tmp_stats)
9865 struct tg3 *tp = netdev_priv(dev);
9866 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9869 #define NVRAM_TEST_SIZE 0x100
9870 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9871 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9872 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
9873 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9874 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9876 static int tg3_test_nvram(struct tg3 *tp)
9880 int i, j, k, err = 0, size;
9882 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9885 if (tg3_nvram_read(tp, 0, &magic) != 0)
9888 if (magic == TG3_EEPROM_MAGIC)
9889 size = NVRAM_TEST_SIZE;
9890 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9891 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9892 TG3_EEPROM_SB_FORMAT_1) {
9893 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9894 case TG3_EEPROM_SB_REVISION_0:
9895 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9897 case TG3_EEPROM_SB_REVISION_2:
9898 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9900 case TG3_EEPROM_SB_REVISION_3:
9901 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9908 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9909 size = NVRAM_SELFBOOT_HW_SIZE;
9913 buf = kmalloc(size, GFP_KERNEL);
9918 for (i = 0, j = 0; i < size; i += 4, j++) {
9919 err = tg3_nvram_read_be32(tp, i, &buf[j]);
9926 /* Selfboot format */
9927 magic = be32_to_cpu(buf[0]);
9928 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9929 TG3_EEPROM_MAGIC_FW) {
9930 u8 *buf8 = (u8 *) buf, csum8 = 0;
9932 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9933 TG3_EEPROM_SB_REVISION_2) {
9934 /* For rev 2, the csum doesn't include the MBA. */
9935 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9937 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9940 for (i = 0; i < size; i++)
9953 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9954 TG3_EEPROM_MAGIC_HW) {
9955 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9956 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9957 u8 *buf8 = (u8 *) buf;
9959 /* Separate the parity bits and the data bytes. */
9960 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9961 if ((i == 0) || (i == 8)) {
9965 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9966 parity[k++] = buf8[i] & msk;
9973 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9974 parity[k++] = buf8[i] & msk;
9977 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9978 parity[k++] = buf8[i] & msk;
9981 data[j++] = buf8[i];
9985 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9986 u8 hw8 = hweight8(data[i]);
9988 if ((hw8 & 0x1) && parity[i])
9990 else if (!(hw8 & 0x1) && !parity[i])
9997 /* Bootstrap checksum at offset 0x10 */
9998 csum = calc_crc((unsigned char *) buf, 0x10);
9999 if (csum != be32_to_cpu(buf[0x10/4]))
10002 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10003 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10004 if (csum != be32_to_cpu(buf[0xfc/4]))
10014 #define TG3_SERDES_TIMEOUT_SEC 2
10015 #define TG3_COPPER_TIMEOUT_SEC 6
10017 static int tg3_test_link(struct tg3 *tp)
10021 if (!netif_running(tp->dev))
10024 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10025 max = TG3_SERDES_TIMEOUT_SEC;
10027 max = TG3_COPPER_TIMEOUT_SEC;
10029 for (i = 0; i < max; i++) {
10030 if (netif_carrier_ok(tp->dev))
10033 if (msleep_interruptible(1000))
10040 /* Only test the commonly used registers */
10041 static int tg3_test_registers(struct tg3 *tp)
10043 int i, is_5705, is_5750;
10044 u32 offset, read_mask, write_mask, val, save_val, read_val;
10048 #define TG3_FL_5705 0x1
10049 #define TG3_FL_NOT_5705 0x2
10050 #define TG3_FL_NOT_5788 0x4
10051 #define TG3_FL_NOT_5750 0x8
10055 /* MAC Control Registers */
10056 { MAC_MODE, TG3_FL_NOT_5705,
10057 0x00000000, 0x00ef6f8c },
10058 { MAC_MODE, TG3_FL_5705,
10059 0x00000000, 0x01ef6b8c },
10060 { MAC_STATUS, TG3_FL_NOT_5705,
10061 0x03800107, 0x00000000 },
10062 { MAC_STATUS, TG3_FL_5705,
10063 0x03800100, 0x00000000 },
10064 { MAC_ADDR_0_HIGH, 0x0000,
10065 0x00000000, 0x0000ffff },
10066 { MAC_ADDR_0_LOW, 0x0000,
10067 0x00000000, 0xffffffff },
10068 { MAC_RX_MTU_SIZE, 0x0000,
10069 0x00000000, 0x0000ffff },
10070 { MAC_TX_MODE, 0x0000,
10071 0x00000000, 0x00000070 },
10072 { MAC_TX_LENGTHS, 0x0000,
10073 0x00000000, 0x00003fff },
10074 { MAC_RX_MODE, TG3_FL_NOT_5705,
10075 0x00000000, 0x000007fc },
10076 { MAC_RX_MODE, TG3_FL_5705,
10077 0x00000000, 0x000007dc },
10078 { MAC_HASH_REG_0, 0x0000,
10079 0x00000000, 0xffffffff },
10080 { MAC_HASH_REG_1, 0x0000,
10081 0x00000000, 0xffffffff },
10082 { MAC_HASH_REG_2, 0x0000,
10083 0x00000000, 0xffffffff },
10084 { MAC_HASH_REG_3, 0x0000,
10085 0x00000000, 0xffffffff },
10087 /* Receive Data and Receive BD Initiator Control Registers. */
10088 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10089 0x00000000, 0xffffffff },
10090 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10091 0x00000000, 0xffffffff },
10092 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10093 0x00000000, 0x00000003 },
10094 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10095 0x00000000, 0xffffffff },
10096 { RCVDBDI_STD_BD+0, 0x0000,
10097 0x00000000, 0xffffffff },
10098 { RCVDBDI_STD_BD+4, 0x0000,
10099 0x00000000, 0xffffffff },
10100 { RCVDBDI_STD_BD+8, 0x0000,
10101 0x00000000, 0xffff0002 },
10102 { RCVDBDI_STD_BD+0xc, 0x0000,
10103 0x00000000, 0xffffffff },
10105 /* Receive BD Initiator Control Registers. */
10106 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10107 0x00000000, 0xffffffff },
10108 { RCVBDI_STD_THRESH, TG3_FL_5705,
10109 0x00000000, 0x000003ff },
10110 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10111 0x00000000, 0xffffffff },
10113 /* Host Coalescing Control Registers. */
10114 { HOSTCC_MODE, TG3_FL_NOT_5705,
10115 0x00000000, 0x00000004 },
10116 { HOSTCC_MODE, TG3_FL_5705,
10117 0x00000000, 0x000000f6 },
10118 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10119 0x00000000, 0xffffffff },
10120 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10121 0x00000000, 0x000003ff },
10122 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10123 0x00000000, 0xffffffff },
10124 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10125 0x00000000, 0x000003ff },
10126 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10127 0x00000000, 0xffffffff },
10128 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10129 0x00000000, 0x000000ff },
10130 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10131 0x00000000, 0xffffffff },
10132 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10133 0x00000000, 0x000000ff },
10134 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10135 0x00000000, 0xffffffff },
10136 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10137 0x00000000, 0xffffffff },
10138 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10139 0x00000000, 0xffffffff },
10140 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10141 0x00000000, 0x000000ff },
10142 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10143 0x00000000, 0xffffffff },
10144 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10145 0x00000000, 0x000000ff },
10146 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10147 0x00000000, 0xffffffff },
10148 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10149 0x00000000, 0xffffffff },
10150 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10151 0x00000000, 0xffffffff },
10152 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10153 0x00000000, 0xffffffff },
10154 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10155 0x00000000, 0xffffffff },
10156 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10157 0xffffffff, 0x00000000 },
10158 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10159 0xffffffff, 0x00000000 },
10161 /* Buffer Manager Control Registers. */
10162 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10163 0x00000000, 0x007fff80 },
10164 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10165 0x00000000, 0x007fffff },
10166 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10167 0x00000000, 0x0000003f },
10168 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10169 0x00000000, 0x000001ff },
10170 { BUFMGR_MB_HIGH_WATER, 0x0000,
10171 0x00000000, 0x000001ff },
10172 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10173 0xffffffff, 0x00000000 },
10174 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10175 0xffffffff, 0x00000000 },
10177 /* Mailbox Registers */
10178 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10179 0x00000000, 0x000001ff },
10180 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10181 0x00000000, 0x000001ff },
10182 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10183 0x00000000, 0x000007ff },
10184 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10185 0x00000000, 0x000001ff },
10187 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10190 is_5705 = is_5750 = 0;
10191 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10193 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10197 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10198 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10201 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10204 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10205 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10208 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10211 offset = (u32) reg_tbl[i].offset;
10212 read_mask = reg_tbl[i].read_mask;
10213 write_mask = reg_tbl[i].write_mask;
10215 /* Save the original register content */
10216 save_val = tr32(offset);
10218 /* Determine the read-only value. */
10219 read_val = save_val & read_mask;
10221 /* Write zero to the register, then make sure the read-only bits
10222 * are not changed and the read/write bits are all zeros.
10226 val = tr32(offset);
10228 /* Test the read-only and read/write bits. */
10229 if (((val & read_mask) != read_val) || (val & write_mask))
10232 /* Write ones to all the bits defined by RdMask and WrMask, then
10233 * make sure the read-only bits are not changed and the
10234 * read/write bits are all ones.
10236 tw32(offset, read_mask | write_mask);
10238 val = tr32(offset);
10240 /* Test the read-only bits. */
10241 if ((val & read_mask) != read_val)
10244 /* Test the read/write bits. */
10245 if ((val & write_mask) != write_mask)
10248 tw32(offset, save_val);
10254 if (netif_msg_hw(tp))
10255 printk(KERN_ERR PFX "Register test failed at offset %x\n",
10257 tw32(offset, save_val);
10261 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10263 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10267 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10268 for (j = 0; j < len; j += 4) {
10271 tg3_write_mem(tp, offset + j, test_pattern[i]);
10272 tg3_read_mem(tp, offset + j, &val);
10273 if (val != test_pattern[i])
10280 static int tg3_test_memory(struct tg3 *tp)
10282 static struct mem_entry {
10285 } mem_tbl_570x[] = {
10286 { 0x00000000, 0x00b50},
10287 { 0x00002000, 0x1c000},
10288 { 0xffffffff, 0x00000}
10289 }, mem_tbl_5705[] = {
10290 { 0x00000100, 0x0000c},
10291 { 0x00000200, 0x00008},
10292 { 0x00004000, 0x00800},
10293 { 0x00006000, 0x01000},
10294 { 0x00008000, 0x02000},
10295 { 0x00010000, 0x0e000},
10296 { 0xffffffff, 0x00000}
10297 }, mem_tbl_5755[] = {
10298 { 0x00000200, 0x00008},
10299 { 0x00004000, 0x00800},
10300 { 0x00006000, 0x00800},
10301 { 0x00008000, 0x02000},
10302 { 0x00010000, 0x0c000},
10303 { 0xffffffff, 0x00000}
10304 }, mem_tbl_5906[] = {
10305 { 0x00000200, 0x00008},
10306 { 0x00004000, 0x00400},
10307 { 0x00006000, 0x00400},
10308 { 0x00008000, 0x01000},
10309 { 0x00010000, 0x01000},
10310 { 0xffffffff, 0x00000}
10312 struct mem_entry *mem_tbl;
10316 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10317 mem_tbl = mem_tbl_5755;
10318 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10319 mem_tbl = mem_tbl_5906;
10320 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10321 mem_tbl = mem_tbl_5705;
10323 mem_tbl = mem_tbl_570x;
10325 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10326 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10327 mem_tbl[i].len)) != 0)
10334 #define TG3_MAC_LOOPBACK 0
10335 #define TG3_PHY_LOOPBACK 1
10337 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10339 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10340 u32 desc_idx, coal_now;
10341 struct sk_buff *skb, *rx_skb;
10344 int num_pkts, tx_len, rx_len, i, err;
10345 struct tg3_rx_buffer_desc *desc;
10346 struct tg3_napi *tnapi, *rnapi;
10347 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10349 if (tp->irq_cnt > 1) {
10350 tnapi = &tp->napi[1];
10351 rnapi = &tp->napi[1];
10353 tnapi = &tp->napi[0];
10354 rnapi = &tp->napi[0];
10356 coal_now = tnapi->coal_now | rnapi->coal_now;
10358 if (loopback_mode == TG3_MAC_LOOPBACK) {
10359 /* HW errata - mac loopback fails in some cases on 5780.
10360 * Normal traffic and PHY loopback are not affected by
10363 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10366 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10367 MAC_MODE_PORT_INT_LPBACK;
10368 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10369 mac_mode |= MAC_MODE_LINK_POLARITY;
10370 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10371 mac_mode |= MAC_MODE_PORT_MODE_MII;
10373 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10374 tw32(MAC_MODE, mac_mode);
10375 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10378 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10379 tg3_phy_fet_toggle_apd(tp, false);
10380 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10382 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10384 tg3_phy_toggle_automdix(tp, 0);
10386 tg3_writephy(tp, MII_BMCR, val);
10389 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10390 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10392 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
10393 mac_mode |= MAC_MODE_PORT_MODE_MII;
10395 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10397 /* reset to prevent losing 1st rx packet intermittently */
10398 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10399 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10401 tw32_f(MAC_RX_MODE, tp->rx_mode);
10403 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10404 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10405 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10406 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10407 mac_mode |= MAC_MODE_LINK_POLARITY;
10408 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10409 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10411 tw32(MAC_MODE, mac_mode);
10419 skb = netdev_alloc_skb(tp->dev, tx_len);
10423 tx_data = skb_put(skb, tx_len);
10424 memcpy(tx_data, tp->dev->dev_addr, 6);
10425 memset(tx_data + 6, 0x0, 8);
10427 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10429 for (i = 14; i < tx_len; i++)
10430 tx_data[i] = (u8) (i & 0xff);
10432 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
10433 dev_kfree_skb(skb);
10437 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10442 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10446 tg3_set_txd(tnapi, tnapi->tx_prod,
10447 skb_shinfo(skb)->dma_head, tx_len, 0, 1);
10452 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10453 tr32_mailbox(tnapi->prodmbox);
10457 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10458 for (i = 0; i < 35; i++) {
10459 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10464 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10465 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10466 if ((tx_idx == tnapi->tx_prod) &&
10467 (rx_idx == (rx_start_idx + num_pkts)))
10471 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
10472 dev_kfree_skb(skb);
10474 if (tx_idx != tnapi->tx_prod)
10477 if (rx_idx != rx_start_idx + num_pkts)
10480 desc = &rnapi->rx_rcb[rx_start_idx];
10481 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10482 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10483 if (opaque_key != RXD_OPAQUE_RING_STD)
10486 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10487 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10490 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10491 if (rx_len != tx_len)
10494 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10496 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10497 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10499 for (i = 14; i < tx_len; i++) {
10500 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10505 /* tg3_free_rings will unmap and free the rx_skb */
10510 #define TG3_MAC_LOOPBACK_FAILED 1
10511 #define TG3_PHY_LOOPBACK_FAILED 2
10512 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10513 TG3_PHY_LOOPBACK_FAILED)
10515 static int tg3_test_loopback(struct tg3 *tp)
10520 if (!netif_running(tp->dev))
10521 return TG3_LOOPBACK_FAILED;
10523 err = tg3_reset_hw(tp, 1);
10525 return TG3_LOOPBACK_FAILED;
10527 /* Turn off gphy autopowerdown. */
10528 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10529 tg3_phy_toggle_apd(tp, false);
10531 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10535 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10537 /* Wait for up to 40 microseconds to acquire lock. */
10538 for (i = 0; i < 4; i++) {
10539 status = tr32(TG3_CPMU_MUTEX_GNT);
10540 if (status == CPMU_MUTEX_GNT_DRIVER)
10545 if (status != CPMU_MUTEX_GNT_DRIVER)
10546 return TG3_LOOPBACK_FAILED;
10548 /* Turn off link-based power management. */
10549 cpmuctrl = tr32(TG3_CPMU_CTRL);
10550 tw32(TG3_CPMU_CTRL,
10551 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10552 CPMU_CTRL_LINK_AWARE_MODE));
10555 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10556 err |= TG3_MAC_LOOPBACK_FAILED;
10558 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10559 tw32(TG3_CPMU_CTRL, cpmuctrl);
10561 /* Release the mutex */
10562 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10565 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10566 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10567 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10568 err |= TG3_PHY_LOOPBACK_FAILED;
10571 /* Re-enable gphy autopowerdown. */
10572 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10573 tg3_phy_toggle_apd(tp, true);
10578 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10581 struct tg3 *tp = netdev_priv(dev);
10583 if (tp->link_config.phy_is_low_power)
10584 tg3_set_power_state(tp, PCI_D0);
10586 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10588 if (tg3_test_nvram(tp) != 0) {
10589 etest->flags |= ETH_TEST_FL_FAILED;
10592 if (tg3_test_link(tp) != 0) {
10593 etest->flags |= ETH_TEST_FL_FAILED;
10596 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10597 int err, err2 = 0, irq_sync = 0;
10599 if (netif_running(dev)) {
10601 tg3_netif_stop(tp);
10605 tg3_full_lock(tp, irq_sync);
10607 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10608 err = tg3_nvram_lock(tp);
10609 tg3_halt_cpu(tp, RX_CPU_BASE);
10610 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10611 tg3_halt_cpu(tp, TX_CPU_BASE);
10613 tg3_nvram_unlock(tp);
10615 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10618 if (tg3_test_registers(tp) != 0) {
10619 etest->flags |= ETH_TEST_FL_FAILED;
10622 if (tg3_test_memory(tp) != 0) {
10623 etest->flags |= ETH_TEST_FL_FAILED;
10626 if ((data[4] = tg3_test_loopback(tp)) != 0)
10627 etest->flags |= ETH_TEST_FL_FAILED;
10629 tg3_full_unlock(tp);
10631 if (tg3_test_interrupt(tp) != 0) {
10632 etest->flags |= ETH_TEST_FL_FAILED;
10636 tg3_full_lock(tp, 0);
10638 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10639 if (netif_running(dev)) {
10640 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10641 err2 = tg3_restart_hw(tp, 1);
10643 tg3_netif_start(tp);
10646 tg3_full_unlock(tp);
10648 if (irq_sync && !err2)
10651 if (tp->link_config.phy_is_low_power)
10652 tg3_set_power_state(tp, PCI_D3hot);
10656 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10658 struct mii_ioctl_data *data = if_mii(ifr);
10659 struct tg3 *tp = netdev_priv(dev);
10662 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10663 struct phy_device *phydev;
10664 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10666 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10667 return phy_mii_ioctl(phydev, data, cmd);
10672 data->phy_id = tp->phy_addr;
10675 case SIOCGMIIREG: {
10678 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10679 break; /* We have no PHY */
10681 if (tp->link_config.phy_is_low_power)
10684 spin_lock_bh(&tp->lock);
10685 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10686 spin_unlock_bh(&tp->lock);
10688 data->val_out = mii_regval;
10694 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10695 break; /* We have no PHY */
10697 if (tp->link_config.phy_is_low_power)
10700 spin_lock_bh(&tp->lock);
10701 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10702 spin_unlock_bh(&tp->lock);
10710 return -EOPNOTSUPP;
10713 #if TG3_VLAN_TAG_USED
10714 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10716 struct tg3 *tp = netdev_priv(dev);
10718 if (!netif_running(dev)) {
10723 tg3_netif_stop(tp);
10725 tg3_full_lock(tp, 0);
10729 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10730 __tg3_set_rx_mode(dev);
10732 tg3_netif_start(tp);
10734 tg3_full_unlock(tp);
10738 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10740 struct tg3 *tp = netdev_priv(dev);
10742 memcpy(ec, &tp->coal, sizeof(*ec));
10746 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10748 struct tg3 *tp = netdev_priv(dev);
10749 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10750 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10752 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10753 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10754 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10755 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10756 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10759 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10760 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10761 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10762 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10763 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10764 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10765 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10766 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10767 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10768 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10771 /* No rx interrupts will be generated if both are zero */
10772 if ((ec->rx_coalesce_usecs == 0) &&
10773 (ec->rx_max_coalesced_frames == 0))
10776 /* No tx interrupts will be generated if both are zero */
10777 if ((ec->tx_coalesce_usecs == 0) &&
10778 (ec->tx_max_coalesced_frames == 0))
10781 /* Only copy relevant parameters, ignore all others. */
10782 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10783 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10784 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10785 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10786 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10787 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10788 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10789 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10790 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10792 if (netif_running(dev)) {
10793 tg3_full_lock(tp, 0);
10794 __tg3_set_coalesce(tp, &tp->coal);
10795 tg3_full_unlock(tp);
10800 static const struct ethtool_ops tg3_ethtool_ops = {
10801 .get_settings = tg3_get_settings,
10802 .set_settings = tg3_set_settings,
10803 .get_drvinfo = tg3_get_drvinfo,
10804 .get_regs_len = tg3_get_regs_len,
10805 .get_regs = tg3_get_regs,
10806 .get_wol = tg3_get_wol,
10807 .set_wol = tg3_set_wol,
10808 .get_msglevel = tg3_get_msglevel,
10809 .set_msglevel = tg3_set_msglevel,
10810 .nway_reset = tg3_nway_reset,
10811 .get_link = ethtool_op_get_link,
10812 .get_eeprom_len = tg3_get_eeprom_len,
10813 .get_eeprom = tg3_get_eeprom,
10814 .set_eeprom = tg3_set_eeprom,
10815 .get_ringparam = tg3_get_ringparam,
10816 .set_ringparam = tg3_set_ringparam,
10817 .get_pauseparam = tg3_get_pauseparam,
10818 .set_pauseparam = tg3_set_pauseparam,
10819 .get_rx_csum = tg3_get_rx_csum,
10820 .set_rx_csum = tg3_set_rx_csum,
10821 .set_tx_csum = tg3_set_tx_csum,
10822 .set_sg = ethtool_op_set_sg,
10823 .set_tso = tg3_set_tso,
10824 .self_test = tg3_self_test,
10825 .get_strings = tg3_get_strings,
10826 .phys_id = tg3_phys_id,
10827 .get_ethtool_stats = tg3_get_ethtool_stats,
10828 .get_coalesce = tg3_get_coalesce,
10829 .set_coalesce = tg3_set_coalesce,
10830 .get_sset_count = tg3_get_sset_count,
10833 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10835 u32 cursize, val, magic;
10837 tp->nvram_size = EEPROM_CHIP_SIZE;
10839 if (tg3_nvram_read(tp, 0, &magic) != 0)
10842 if ((magic != TG3_EEPROM_MAGIC) &&
10843 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10844 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10848 * Size the chip by reading offsets at increasing powers of two.
10849 * When we encounter our validation signature, we know the addressing
10850 * has wrapped around, and thus have our chip size.
10854 while (cursize < tp->nvram_size) {
10855 if (tg3_nvram_read(tp, cursize, &val) != 0)
10864 tp->nvram_size = cursize;
10867 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10871 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
10872 tg3_nvram_read(tp, 0, &val) != 0)
10875 /* Selfboot format */
10876 if (val != TG3_EEPROM_MAGIC) {
10877 tg3_get_eeprom_size(tp);
10881 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10883 /* This is confusing. We want to operate on the
10884 * 16-bit value at offset 0xf2. The tg3_nvram_read()
10885 * call will read from NVRAM and byteswap the data
10886 * according to the byteswapping settings for all
10887 * other register accesses. This ensures the data we
10888 * want will always reside in the lower 16-bits.
10889 * However, the data in NVRAM is in LE format, which
10890 * means the data from the NVRAM read will always be
10891 * opposite the endianness of the CPU. The 16-bit
10892 * byteswap then brings the data to CPU endianness.
10894 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
10898 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10901 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10905 nvcfg1 = tr32(NVRAM_CFG1);
10906 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10907 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10909 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10910 tw32(NVRAM_CFG1, nvcfg1);
10913 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10914 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10915 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10916 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10917 tp->nvram_jedecnum = JEDEC_ATMEL;
10918 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10919 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10921 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10922 tp->nvram_jedecnum = JEDEC_ATMEL;
10923 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10925 case FLASH_VENDOR_ATMEL_EEPROM:
10926 tp->nvram_jedecnum = JEDEC_ATMEL;
10927 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10928 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10930 case FLASH_VENDOR_ST:
10931 tp->nvram_jedecnum = JEDEC_ST;
10932 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10933 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10935 case FLASH_VENDOR_SAIFUN:
10936 tp->nvram_jedecnum = JEDEC_SAIFUN;
10937 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10939 case FLASH_VENDOR_SST_SMALL:
10940 case FLASH_VENDOR_SST_LARGE:
10941 tp->nvram_jedecnum = JEDEC_SST;
10942 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10946 tp->nvram_jedecnum = JEDEC_ATMEL;
10947 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10948 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10952 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
10954 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10955 case FLASH_5752PAGE_SIZE_256:
10956 tp->nvram_pagesize = 256;
10958 case FLASH_5752PAGE_SIZE_512:
10959 tp->nvram_pagesize = 512;
10961 case FLASH_5752PAGE_SIZE_1K:
10962 tp->nvram_pagesize = 1024;
10964 case FLASH_5752PAGE_SIZE_2K:
10965 tp->nvram_pagesize = 2048;
10967 case FLASH_5752PAGE_SIZE_4K:
10968 tp->nvram_pagesize = 4096;
10970 case FLASH_5752PAGE_SIZE_264:
10971 tp->nvram_pagesize = 264;
10973 case FLASH_5752PAGE_SIZE_528:
10974 tp->nvram_pagesize = 528;
10979 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10983 nvcfg1 = tr32(NVRAM_CFG1);
10985 /* NVRAM protection for TPM */
10986 if (nvcfg1 & (1 << 27))
10987 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
10989 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10990 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10991 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10992 tp->nvram_jedecnum = JEDEC_ATMEL;
10993 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10995 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10996 tp->nvram_jedecnum = JEDEC_ATMEL;
10997 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10998 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11000 case FLASH_5752VENDOR_ST_M45PE10:
11001 case FLASH_5752VENDOR_ST_M45PE20:
11002 case FLASH_5752VENDOR_ST_M45PE40:
11003 tp->nvram_jedecnum = JEDEC_ST;
11004 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11005 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11009 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11010 tg3_nvram_get_pagesize(tp, nvcfg1);
11012 /* For eeprom, set pagesize to maximum eeprom size */
11013 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11015 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11016 tw32(NVRAM_CFG1, nvcfg1);
11020 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11022 u32 nvcfg1, protect = 0;
11024 nvcfg1 = tr32(NVRAM_CFG1);
11026 /* NVRAM protection for TPM */
11027 if (nvcfg1 & (1 << 27)) {
11028 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11032 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11034 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11035 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11036 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11037 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11038 tp->nvram_jedecnum = JEDEC_ATMEL;
11039 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11040 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11041 tp->nvram_pagesize = 264;
11042 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11043 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11044 tp->nvram_size = (protect ? 0x3e200 :
11045 TG3_NVRAM_SIZE_512KB);
11046 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11047 tp->nvram_size = (protect ? 0x1f200 :
11048 TG3_NVRAM_SIZE_256KB);
11050 tp->nvram_size = (protect ? 0x1f200 :
11051 TG3_NVRAM_SIZE_128KB);
11053 case FLASH_5752VENDOR_ST_M45PE10:
11054 case FLASH_5752VENDOR_ST_M45PE20:
11055 case FLASH_5752VENDOR_ST_M45PE40:
11056 tp->nvram_jedecnum = JEDEC_ST;
11057 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11058 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11059 tp->nvram_pagesize = 256;
11060 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11061 tp->nvram_size = (protect ?
11062 TG3_NVRAM_SIZE_64KB :
11063 TG3_NVRAM_SIZE_128KB);
11064 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11065 tp->nvram_size = (protect ?
11066 TG3_NVRAM_SIZE_64KB :
11067 TG3_NVRAM_SIZE_256KB);
11069 tp->nvram_size = (protect ?
11070 TG3_NVRAM_SIZE_128KB :
11071 TG3_NVRAM_SIZE_512KB);
11076 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11080 nvcfg1 = tr32(NVRAM_CFG1);
11082 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11083 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11084 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11085 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11086 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11087 tp->nvram_jedecnum = JEDEC_ATMEL;
11088 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11089 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11091 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11092 tw32(NVRAM_CFG1, nvcfg1);
11094 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11095 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11096 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11097 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11098 tp->nvram_jedecnum = JEDEC_ATMEL;
11099 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11100 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11101 tp->nvram_pagesize = 264;
11103 case FLASH_5752VENDOR_ST_M45PE10:
11104 case FLASH_5752VENDOR_ST_M45PE20:
11105 case FLASH_5752VENDOR_ST_M45PE40:
11106 tp->nvram_jedecnum = JEDEC_ST;
11107 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11108 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11109 tp->nvram_pagesize = 256;
11114 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11116 u32 nvcfg1, protect = 0;
11118 nvcfg1 = tr32(NVRAM_CFG1);
11120 /* NVRAM protection for TPM */
11121 if (nvcfg1 & (1 << 27)) {
11122 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11126 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11128 case FLASH_5761VENDOR_ATMEL_ADB021D:
11129 case FLASH_5761VENDOR_ATMEL_ADB041D:
11130 case FLASH_5761VENDOR_ATMEL_ADB081D:
11131 case FLASH_5761VENDOR_ATMEL_ADB161D:
11132 case FLASH_5761VENDOR_ATMEL_MDB021D:
11133 case FLASH_5761VENDOR_ATMEL_MDB041D:
11134 case FLASH_5761VENDOR_ATMEL_MDB081D:
11135 case FLASH_5761VENDOR_ATMEL_MDB161D:
11136 tp->nvram_jedecnum = JEDEC_ATMEL;
11137 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11138 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11139 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11140 tp->nvram_pagesize = 256;
11142 case FLASH_5761VENDOR_ST_A_M45PE20:
11143 case FLASH_5761VENDOR_ST_A_M45PE40:
11144 case FLASH_5761VENDOR_ST_A_M45PE80:
11145 case FLASH_5761VENDOR_ST_A_M45PE16:
11146 case FLASH_5761VENDOR_ST_M_M45PE20:
11147 case FLASH_5761VENDOR_ST_M_M45PE40:
11148 case FLASH_5761VENDOR_ST_M_M45PE80:
11149 case FLASH_5761VENDOR_ST_M_M45PE16:
11150 tp->nvram_jedecnum = JEDEC_ST;
11151 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11152 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11153 tp->nvram_pagesize = 256;
11158 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11161 case FLASH_5761VENDOR_ATMEL_ADB161D:
11162 case FLASH_5761VENDOR_ATMEL_MDB161D:
11163 case FLASH_5761VENDOR_ST_A_M45PE16:
11164 case FLASH_5761VENDOR_ST_M_M45PE16:
11165 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11167 case FLASH_5761VENDOR_ATMEL_ADB081D:
11168 case FLASH_5761VENDOR_ATMEL_MDB081D:
11169 case FLASH_5761VENDOR_ST_A_M45PE80:
11170 case FLASH_5761VENDOR_ST_M_M45PE80:
11171 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11173 case FLASH_5761VENDOR_ATMEL_ADB041D:
11174 case FLASH_5761VENDOR_ATMEL_MDB041D:
11175 case FLASH_5761VENDOR_ST_A_M45PE40:
11176 case FLASH_5761VENDOR_ST_M_M45PE40:
11177 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11179 case FLASH_5761VENDOR_ATMEL_ADB021D:
11180 case FLASH_5761VENDOR_ATMEL_MDB021D:
11181 case FLASH_5761VENDOR_ST_A_M45PE20:
11182 case FLASH_5761VENDOR_ST_M_M45PE20:
11183 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11189 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11191 tp->nvram_jedecnum = JEDEC_ATMEL;
11192 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11193 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11196 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11200 nvcfg1 = tr32(NVRAM_CFG1);
11202 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11203 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11204 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11205 tp->nvram_jedecnum = JEDEC_ATMEL;
11206 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11207 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11209 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11210 tw32(NVRAM_CFG1, nvcfg1);
11212 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11213 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11214 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11215 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11216 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11217 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11218 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11219 tp->nvram_jedecnum = JEDEC_ATMEL;
11220 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11221 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11223 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11224 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11225 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11226 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11227 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11229 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11230 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11231 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11233 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11234 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11235 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11239 case FLASH_5752VENDOR_ST_M45PE10:
11240 case FLASH_5752VENDOR_ST_M45PE20:
11241 case FLASH_5752VENDOR_ST_M45PE40:
11242 tp->nvram_jedecnum = JEDEC_ST;
11243 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11244 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11246 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11247 case FLASH_5752VENDOR_ST_M45PE10:
11248 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11250 case FLASH_5752VENDOR_ST_M45PE20:
11251 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11253 case FLASH_5752VENDOR_ST_M45PE40:
11254 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11259 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11263 tg3_nvram_get_pagesize(tp, nvcfg1);
11264 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11265 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11269 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11273 nvcfg1 = tr32(NVRAM_CFG1);
11275 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11276 case FLASH_5717VENDOR_ATMEL_EEPROM:
11277 case FLASH_5717VENDOR_MICRO_EEPROM:
11278 tp->nvram_jedecnum = JEDEC_ATMEL;
11279 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11280 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11282 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11283 tw32(NVRAM_CFG1, nvcfg1);
11285 case FLASH_5717VENDOR_ATMEL_MDB011D:
11286 case FLASH_5717VENDOR_ATMEL_ADB011B:
11287 case FLASH_5717VENDOR_ATMEL_ADB011D:
11288 case FLASH_5717VENDOR_ATMEL_MDB021D:
11289 case FLASH_5717VENDOR_ATMEL_ADB021B:
11290 case FLASH_5717VENDOR_ATMEL_ADB021D:
11291 case FLASH_5717VENDOR_ATMEL_45USPT:
11292 tp->nvram_jedecnum = JEDEC_ATMEL;
11293 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11294 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11296 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11297 case FLASH_5717VENDOR_ATMEL_MDB021D:
11298 case FLASH_5717VENDOR_ATMEL_ADB021B:
11299 case FLASH_5717VENDOR_ATMEL_ADB021D:
11300 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11303 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11307 case FLASH_5717VENDOR_ST_M_M25PE10:
11308 case FLASH_5717VENDOR_ST_A_M25PE10:
11309 case FLASH_5717VENDOR_ST_M_M45PE10:
11310 case FLASH_5717VENDOR_ST_A_M45PE10:
11311 case FLASH_5717VENDOR_ST_M_M25PE20:
11312 case FLASH_5717VENDOR_ST_A_M25PE20:
11313 case FLASH_5717VENDOR_ST_M_M45PE20:
11314 case FLASH_5717VENDOR_ST_A_M45PE20:
11315 case FLASH_5717VENDOR_ST_25USPT:
11316 case FLASH_5717VENDOR_ST_45USPT:
11317 tp->nvram_jedecnum = JEDEC_ST;
11318 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11319 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11321 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11322 case FLASH_5717VENDOR_ST_M_M25PE20:
11323 case FLASH_5717VENDOR_ST_A_M25PE20:
11324 case FLASH_5717VENDOR_ST_M_M45PE20:
11325 case FLASH_5717VENDOR_ST_A_M45PE20:
11326 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11329 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11334 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11338 tg3_nvram_get_pagesize(tp, nvcfg1);
11339 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11340 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11343 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11344 static void __devinit tg3_nvram_init(struct tg3 *tp)
11346 tw32_f(GRC_EEPROM_ADDR,
11347 (EEPROM_ADDR_FSM_RESET |
11348 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11349 EEPROM_ADDR_CLKPERD_SHIFT)));
11353 /* Enable seeprom accesses. */
11354 tw32_f(GRC_LOCAL_CTRL,
11355 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11358 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11359 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11360 tp->tg3_flags |= TG3_FLAG_NVRAM;
11362 if (tg3_nvram_lock(tp)) {
11363 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
11364 "tg3_nvram_init failed.\n", tp->dev->name);
11367 tg3_enable_nvram_access(tp);
11369 tp->nvram_size = 0;
11371 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11372 tg3_get_5752_nvram_info(tp);
11373 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11374 tg3_get_5755_nvram_info(tp);
11375 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11376 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11377 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11378 tg3_get_5787_nvram_info(tp);
11379 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11380 tg3_get_5761_nvram_info(tp);
11381 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11382 tg3_get_5906_nvram_info(tp);
11383 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11384 tg3_get_57780_nvram_info(tp);
11385 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
11386 tg3_get_5717_nvram_info(tp);
11388 tg3_get_nvram_info(tp);
11390 if (tp->nvram_size == 0)
11391 tg3_get_nvram_size(tp);
11393 tg3_disable_nvram_access(tp);
11394 tg3_nvram_unlock(tp);
11397 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11399 tg3_get_eeprom_size(tp);
11403 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11404 u32 offset, u32 len, u8 *buf)
11409 for (i = 0; i < len; i += 4) {
11415 memcpy(&data, buf + i, 4);
11418 * The SEEPROM interface expects the data to always be opposite
11419 * the native endian format. We accomplish this by reversing
11420 * all the operations that would have been performed on the
11421 * data from a call to tg3_nvram_read_be32().
11423 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11425 val = tr32(GRC_EEPROM_ADDR);
11426 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11428 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11430 tw32(GRC_EEPROM_ADDR, val |
11431 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11432 (addr & EEPROM_ADDR_ADDR_MASK) |
11433 EEPROM_ADDR_START |
11434 EEPROM_ADDR_WRITE);
11436 for (j = 0; j < 1000; j++) {
11437 val = tr32(GRC_EEPROM_ADDR);
11439 if (val & EEPROM_ADDR_COMPLETE)
11443 if (!(val & EEPROM_ADDR_COMPLETE)) {
11452 /* offset and length are dword aligned */
11453 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11457 u32 pagesize = tp->nvram_pagesize;
11458 u32 pagemask = pagesize - 1;
11462 tmp = kmalloc(pagesize, GFP_KERNEL);
11468 u32 phy_addr, page_off, size;
11470 phy_addr = offset & ~pagemask;
11472 for (j = 0; j < pagesize; j += 4) {
11473 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11474 (__be32 *) (tmp + j));
11481 page_off = offset & pagemask;
11488 memcpy(tmp + page_off, buf, size);
11490 offset = offset + (pagesize - page_off);
11492 tg3_enable_nvram_access(tp);
11495 * Before we can erase the flash page, we need
11496 * to issue a special "write enable" command.
11498 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11500 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11503 /* Erase the target page */
11504 tw32(NVRAM_ADDR, phy_addr);
11506 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11507 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11509 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11512 /* Issue another write enable to start the write. */
11513 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11515 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11518 for (j = 0; j < pagesize; j += 4) {
11521 data = *((__be32 *) (tmp + j));
11523 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11525 tw32(NVRAM_ADDR, phy_addr + j);
11527 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11531 nvram_cmd |= NVRAM_CMD_FIRST;
11532 else if (j == (pagesize - 4))
11533 nvram_cmd |= NVRAM_CMD_LAST;
11535 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11542 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11543 tg3_nvram_exec_cmd(tp, nvram_cmd);
11550 /* offset and length are dword aligned */
11551 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11556 for (i = 0; i < len; i += 4, offset += 4) {
11557 u32 page_off, phy_addr, nvram_cmd;
11560 memcpy(&data, buf + i, 4);
11561 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11563 page_off = offset % tp->nvram_pagesize;
11565 phy_addr = tg3_nvram_phys_addr(tp, offset);
11567 tw32(NVRAM_ADDR, phy_addr);
11569 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11571 if ((page_off == 0) || (i == 0))
11572 nvram_cmd |= NVRAM_CMD_FIRST;
11573 if (page_off == (tp->nvram_pagesize - 4))
11574 nvram_cmd |= NVRAM_CMD_LAST;
11576 if (i == (len - 4))
11577 nvram_cmd |= NVRAM_CMD_LAST;
11579 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11580 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11581 (tp->nvram_jedecnum == JEDEC_ST) &&
11582 (nvram_cmd & NVRAM_CMD_FIRST)) {
11584 if ((ret = tg3_nvram_exec_cmd(tp,
11585 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11590 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11591 /* We always do complete word writes to eeprom. */
11592 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11595 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11601 /* offset and length are dword aligned */
11602 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11606 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11607 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11608 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11612 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11613 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11618 ret = tg3_nvram_lock(tp);
11622 tg3_enable_nvram_access(tp);
11623 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11624 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11625 tw32(NVRAM_WRITE1, 0x406);
11627 grc_mode = tr32(GRC_MODE);
11628 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11630 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11631 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11633 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11637 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11641 grc_mode = tr32(GRC_MODE);
11642 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11644 tg3_disable_nvram_access(tp);
11645 tg3_nvram_unlock(tp);
11648 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11649 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11656 struct subsys_tbl_ent {
11657 u16 subsys_vendor, subsys_devid;
11661 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11662 /* Broadcom boards. */
11663 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11664 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11665 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11666 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11667 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11668 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11669 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11670 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11671 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11672 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11673 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11676 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11677 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11678 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11679 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11680 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11683 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11684 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11685 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11686 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11688 /* Compaq boards. */
11689 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11690 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11691 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11692 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11693 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11696 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11699 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11703 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11704 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11705 tp->pdev->subsystem_vendor) &&
11706 (subsys_id_to_phy_id[i].subsys_devid ==
11707 tp->pdev->subsystem_device))
11708 return &subsys_id_to_phy_id[i];
11713 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11718 /* On some early chips the SRAM cannot be accessed in D3hot state,
11719 * so need make sure we're in D0.
11721 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11722 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11723 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11726 /* Make sure register accesses (indirect or otherwise)
11727 * will function correctly.
11729 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11730 tp->misc_host_ctrl);
11732 /* The memory arbiter has to be enabled in order for SRAM accesses
11733 * to succeed. Normally on powerup the tg3 chip firmware will make
11734 * sure it is enabled, but other entities such as system netboot
11735 * code might disable it.
11737 val = tr32(MEMARB_MODE);
11738 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11740 tp->phy_id = PHY_ID_INVALID;
11741 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11743 /* Assume an onboard device and WOL capable by default. */
11744 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11746 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11747 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11748 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11749 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11751 val = tr32(VCPU_CFGSHDW);
11752 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11753 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11754 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11755 (val & VCPU_CFGSHDW_WOL_MAGPKT))
11756 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11760 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11761 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11762 u32 nic_cfg, led_cfg;
11763 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11764 int eeprom_phy_serdes = 0;
11766 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11767 tp->nic_sram_data_cfg = nic_cfg;
11769 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11770 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11771 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11772 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11773 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11774 (ver > 0) && (ver < 0x100))
11775 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11777 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11778 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11780 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11781 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11782 eeprom_phy_serdes = 1;
11784 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11785 if (nic_phy_id != 0) {
11786 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11787 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11789 eeprom_phy_id = (id1 >> 16) << 10;
11790 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11791 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11795 tp->phy_id = eeprom_phy_id;
11796 if (eeprom_phy_serdes) {
11797 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11798 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11800 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11803 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11804 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11805 SHASTA_EXT_LED_MODE_MASK);
11807 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11811 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11812 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11815 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11816 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11819 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11820 tp->led_ctrl = LED_CTRL_MODE_MAC;
11822 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11823 * read on some older 5700/5701 bootcode.
11825 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11827 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11829 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11833 case SHASTA_EXT_LED_SHARED:
11834 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11835 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11836 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11837 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11838 LED_CTRL_MODE_PHY_2);
11841 case SHASTA_EXT_LED_MAC:
11842 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11845 case SHASTA_EXT_LED_COMBO:
11846 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11847 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11848 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11849 LED_CTRL_MODE_PHY_2);
11854 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11855 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11856 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11857 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11859 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11860 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11862 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11863 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11864 if ((tp->pdev->subsystem_vendor ==
11865 PCI_VENDOR_ID_ARIMA) &&
11866 (tp->pdev->subsystem_device == 0x205a ||
11867 tp->pdev->subsystem_device == 0x2063))
11868 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11870 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11871 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11874 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11875 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11876 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11877 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11880 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11881 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11882 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11884 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11885 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11886 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11888 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11889 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11890 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11892 if (cfg2 & (1 << 17))
11893 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11895 /* serdes signal pre-emphasis in register 0x590 set by */
11896 /* bootcode if bit 18 is set */
11897 if (cfg2 & (1 << 18))
11898 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11900 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11901 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11902 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11903 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11905 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11908 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11909 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11910 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11913 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11914 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11915 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11916 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11917 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11918 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11921 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11922 device_set_wakeup_enable(&tp->pdev->dev,
11923 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11926 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11931 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11932 tw32(OTP_CTRL, cmd);
11934 /* Wait for up to 1 ms for command to execute. */
11935 for (i = 0; i < 100; i++) {
11936 val = tr32(OTP_STATUS);
11937 if (val & OTP_STATUS_CMD_DONE)
11942 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11945 /* Read the gphy configuration from the OTP region of the chip. The gphy
11946 * configuration is a 32-bit value that straddles the alignment boundary.
11947 * We do two 32-bit reads and then shift and merge the results.
11949 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11951 u32 bhalf_otp, thalf_otp;
11953 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11955 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11958 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11960 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11963 thalf_otp = tr32(OTP_READ_DATA);
11965 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11967 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11970 bhalf_otp = tr32(OTP_READ_DATA);
11972 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11975 static int __devinit tg3_phy_probe(struct tg3 *tp)
11977 u32 hw_phy_id_1, hw_phy_id_2;
11978 u32 hw_phy_id, hw_phy_id_masked;
11981 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11982 return tg3_phy_init(tp);
11984 /* Reading the PHY ID register can conflict with ASF
11985 * firmware access to the PHY hardware.
11988 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11989 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11990 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11992 /* Now read the physical PHY_ID from the chip and verify
11993 * that it is sane. If it doesn't look good, we fall back
11994 * to either the hard-coded table based PHY_ID and failing
11995 * that the value found in the eeprom area.
11997 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11998 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12000 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12001 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12002 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12004 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
12007 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
12008 tp->phy_id = hw_phy_id;
12009 if (hw_phy_id_masked == PHY_ID_BCM8002)
12010 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12012 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
12014 if (tp->phy_id != PHY_ID_INVALID) {
12015 /* Do nothing, phy ID already set up in
12016 * tg3_get_eeprom_hw_cfg().
12019 struct subsys_tbl_ent *p;
12021 /* No eeprom signature? Try the hardcoded
12022 * subsys device table.
12024 p = lookup_by_subsys(tp);
12028 tp->phy_id = p->phy_id;
12030 tp->phy_id == PHY_ID_BCM8002)
12031 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12035 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
12036 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12037 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12038 u32 bmsr, adv_reg, tg3_ctrl, mask;
12040 tg3_readphy(tp, MII_BMSR, &bmsr);
12041 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12042 (bmsr & BMSR_LSTATUS))
12043 goto skip_phy_reset;
12045 err = tg3_phy_reset(tp);
12049 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12050 ADVERTISE_100HALF | ADVERTISE_100FULL |
12051 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12053 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
12054 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12055 MII_TG3_CTRL_ADV_1000_FULL);
12056 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12057 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12058 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12059 MII_TG3_CTRL_ENABLE_AS_MASTER);
12062 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12063 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12064 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12065 if (!tg3_copper_is_advertising_all(tp, mask)) {
12066 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12068 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12069 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12071 tg3_writephy(tp, MII_BMCR,
12072 BMCR_ANENABLE | BMCR_ANRESTART);
12074 tg3_phy_set_wirespeed(tp);
12076 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12077 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12078 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12082 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
12083 err = tg3_init_5401phy_dsp(tp);
12088 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
12089 err = tg3_init_5401phy_dsp(tp);
12092 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
12093 tp->link_config.advertising =
12094 (ADVERTISED_1000baseT_Half |
12095 ADVERTISED_1000baseT_Full |
12096 ADVERTISED_Autoneg |
12098 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
12099 tp->link_config.advertising &=
12100 ~(ADVERTISED_1000baseT_Half |
12101 ADVERTISED_1000baseT_Full);
12106 static void __devinit tg3_read_partno(struct tg3 *tp)
12108 unsigned char vpd_data[256]; /* in little-endian format */
12112 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12113 tg3_nvram_read(tp, 0x0, &magic))
12114 goto out_not_found;
12116 if (magic == TG3_EEPROM_MAGIC) {
12117 for (i = 0; i < 256; i += 4) {
12120 /* The data is in little-endian format in NVRAM.
12121 * Use the big-endian read routines to preserve
12122 * the byte order as it exists in NVRAM.
12124 if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp))
12125 goto out_not_found;
12127 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12132 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
12133 for (i = 0; i < 256; i += 4) {
12138 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
12140 while (j++ < 100) {
12141 pci_read_config_word(tp->pdev, vpd_cap +
12142 PCI_VPD_ADDR, &tmp16);
12143 if (tmp16 & 0x8000)
12147 if (!(tmp16 & 0x8000))
12148 goto out_not_found;
12150 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
12152 v = cpu_to_le32(tmp);
12153 memcpy(&vpd_data[i], &v, sizeof(v));
12157 /* Now parse and find the part number. */
12158 for (i = 0; i < 254; ) {
12159 unsigned char val = vpd_data[i];
12160 unsigned int block_end;
12162 if (val == 0x82 || val == 0x91) {
12165 (vpd_data[i + 2] << 8)));
12170 goto out_not_found;
12172 block_end = (i + 3 +
12174 (vpd_data[i + 2] << 8)));
12177 if (block_end > 256)
12178 goto out_not_found;
12180 while (i < (block_end - 2)) {
12181 if (vpd_data[i + 0] == 'P' &&
12182 vpd_data[i + 1] == 'N') {
12183 int partno_len = vpd_data[i + 2];
12186 if (partno_len > 24 || (partno_len + i) > 256)
12187 goto out_not_found;
12189 memcpy(tp->board_part_number,
12190 &vpd_data[i], partno_len);
12195 i += 3 + vpd_data[i + 2];
12198 /* Part number not found. */
12199 goto out_not_found;
12203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12204 strcpy(tp->board_part_number, "BCM95906");
12205 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12206 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12207 strcpy(tp->board_part_number, "BCM57780");
12208 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12209 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12210 strcpy(tp->board_part_number, "BCM57760");
12211 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12212 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12213 strcpy(tp->board_part_number, "BCM57790");
12214 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12215 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12216 strcpy(tp->board_part_number, "BCM57788");
12218 strcpy(tp->board_part_number, "none");
12221 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12225 if (tg3_nvram_read(tp, offset, &val) ||
12226 (val & 0xfc000000) != 0x0c000000 ||
12227 tg3_nvram_read(tp, offset + 4, &val) ||
12234 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12236 u32 val, offset, start, ver_offset;
12238 bool newver = false;
12240 if (tg3_nvram_read(tp, 0xc, &offset) ||
12241 tg3_nvram_read(tp, 0x4, &start))
12244 offset = tg3_nvram_logical_addr(tp, offset);
12246 if (tg3_nvram_read(tp, offset, &val))
12249 if ((val & 0xfc000000) == 0x0c000000) {
12250 if (tg3_nvram_read(tp, offset + 4, &val))
12258 if (tg3_nvram_read(tp, offset + 8, &ver_offset))
12261 offset = offset + ver_offset - start;
12262 for (i = 0; i < 16; i += 4) {
12264 if (tg3_nvram_read_be32(tp, offset + i, &v))
12267 memcpy(tp->fw_ver + i, &v, sizeof(v));
12272 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12275 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12276 TG3_NVM_BCVER_MAJSFT;
12277 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12278 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
12282 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12284 u32 val, major, minor;
12286 /* Use native endian representation */
12287 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12290 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12291 TG3_NVM_HWSB_CFG1_MAJSFT;
12292 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12293 TG3_NVM_HWSB_CFG1_MINSFT;
12295 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12298 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12300 u32 offset, major, minor, build;
12302 tp->fw_ver[0] = 's';
12303 tp->fw_ver[1] = 'b';
12304 tp->fw_ver[2] = '\0';
12306 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12309 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12310 case TG3_EEPROM_SB_REVISION_0:
12311 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12313 case TG3_EEPROM_SB_REVISION_2:
12314 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12316 case TG3_EEPROM_SB_REVISION_3:
12317 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12323 if (tg3_nvram_read(tp, offset, &val))
12326 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12327 TG3_EEPROM_SB_EDH_BLD_SHFT;
12328 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12329 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12330 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12332 if (minor > 99 || build > 26)
12335 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
12338 tp->fw_ver[8] = 'a' + build - 1;
12339 tp->fw_ver[9] = '\0';
12343 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12345 u32 val, offset, start;
12348 for (offset = TG3_NVM_DIR_START;
12349 offset < TG3_NVM_DIR_END;
12350 offset += TG3_NVM_DIRENT_SIZE) {
12351 if (tg3_nvram_read(tp, offset, &val))
12354 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12358 if (offset == TG3_NVM_DIR_END)
12361 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12362 start = 0x08000000;
12363 else if (tg3_nvram_read(tp, offset - 4, &start))
12366 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12367 !tg3_fw_img_is_valid(tp, offset) ||
12368 tg3_nvram_read(tp, offset + 8, &val))
12371 offset += val - start;
12373 vlen = strlen(tp->fw_ver);
12375 tp->fw_ver[vlen++] = ',';
12376 tp->fw_ver[vlen++] = ' ';
12378 for (i = 0; i < 4; i++) {
12380 if (tg3_nvram_read_be32(tp, offset, &v))
12383 offset += sizeof(v);
12385 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12386 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12390 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12395 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12400 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12401 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12404 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12405 if (apedata != APE_SEG_SIG_MAGIC)
12408 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12409 if (!(apedata & APE_FW_STATUS_READY))
12412 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12414 vlen = strlen(tp->fw_ver);
12416 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
12417 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12418 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12419 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12420 (apedata & APE_FW_VERSION_BLDMSK));
12423 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12427 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12428 tp->fw_ver[0] = 's';
12429 tp->fw_ver[1] = 'b';
12430 tp->fw_ver[2] = '\0';
12435 if (tg3_nvram_read(tp, 0, &val))
12438 if (val == TG3_EEPROM_MAGIC)
12439 tg3_read_bc_ver(tp);
12440 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12441 tg3_read_sb_ver(tp, val);
12442 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12443 tg3_read_hwsb_ver(tp);
12447 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12448 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
12451 tg3_read_mgmtfw_ver(tp);
12453 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12456 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12458 static int __devinit tg3_get_invariants(struct tg3 *tp)
12460 static struct pci_device_id write_reorder_chipsets[] = {
12461 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12462 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12463 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12464 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12465 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12466 PCI_DEVICE_ID_VIA_8385_0) },
12470 u32 pci_state_reg, grc_misc_cfg;
12475 /* Force memory write invalidate off. If we leave it on,
12476 * then on 5700_BX chips we have to enable a workaround.
12477 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12478 * to match the cacheline size. The Broadcom driver have this
12479 * workaround but turns MWI off all the times so never uses
12480 * it. This seems to suggest that the workaround is insufficient.
12482 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12483 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12484 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12486 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12487 * has the register indirect write enable bit set before
12488 * we try to access any of the MMIO registers. It is also
12489 * critical that the PCI-X hw workaround situation is decided
12490 * before that as well.
12492 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12495 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12496 MISC_HOST_CTRL_CHIPREV_SHIFT);
12497 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12498 u32 prod_id_asic_rev;
12500 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C ||
12501 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S ||
12502 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C ||
12503 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S)
12504 pci_read_config_dword(tp->pdev,
12505 TG3PCI_GEN2_PRODID_ASICREV,
12506 &prod_id_asic_rev);
12508 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12509 &prod_id_asic_rev);
12511 tp->pci_chip_rev_id = prod_id_asic_rev;
12514 /* Wrong chip ID in 5752 A0. This code can be removed later
12515 * as A0 is not in production.
12517 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12518 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12520 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12521 * we need to disable memory and use config. cycles
12522 * only to access all registers. The 5702/03 chips
12523 * can mistakenly decode the special cycles from the
12524 * ICH chipsets as memory write cycles, causing corruption
12525 * of register and memory space. Only certain ICH bridges
12526 * will drive special cycles with non-zero data during the
12527 * address phase which can fall within the 5703's address
12528 * range. This is not an ICH bug as the PCI spec allows
12529 * non-zero address during special cycles. However, only
12530 * these ICH bridges are known to drive non-zero addresses
12531 * during special cycles.
12533 * Since special cycles do not cross PCI bridges, we only
12534 * enable this workaround if the 5703 is on the secondary
12535 * bus of these ICH bridges.
12537 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12538 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12539 static struct tg3_dev_id {
12543 } ich_chipsets[] = {
12544 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12546 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12548 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12550 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12554 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12555 struct pci_dev *bridge = NULL;
12557 while (pci_id->vendor != 0) {
12558 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12564 if (pci_id->rev != PCI_ANY_ID) {
12565 if (bridge->revision > pci_id->rev)
12568 if (bridge->subordinate &&
12569 (bridge->subordinate->number ==
12570 tp->pdev->bus->number)) {
12572 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12573 pci_dev_put(bridge);
12579 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12580 static struct tg3_dev_id {
12583 } bridge_chipsets[] = {
12584 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12585 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12588 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12589 struct pci_dev *bridge = NULL;
12591 while (pci_id->vendor != 0) {
12592 bridge = pci_get_device(pci_id->vendor,
12599 if (bridge->subordinate &&
12600 (bridge->subordinate->number <=
12601 tp->pdev->bus->number) &&
12602 (bridge->subordinate->subordinate >=
12603 tp->pdev->bus->number)) {
12604 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12605 pci_dev_put(bridge);
12611 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12612 * DMA addresses > 40-bit. This bridge may have other additional
12613 * 57xx devices behind it in some 4-port NIC designs for example.
12614 * Any tg3 device found behind the bridge will also need the 40-bit
12617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12618 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12619 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12620 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12621 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12624 struct pci_dev *bridge = NULL;
12627 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12628 PCI_DEVICE_ID_SERVERWORKS_EPB,
12630 if (bridge && bridge->subordinate &&
12631 (bridge->subordinate->number <=
12632 tp->pdev->bus->number) &&
12633 (bridge->subordinate->subordinate >=
12634 tp->pdev->bus->number)) {
12635 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12636 pci_dev_put(bridge);
12642 /* Initialize misc host control in PCI block. */
12643 tp->misc_host_ctrl |= (misc_ctrl_reg &
12644 MISC_HOST_CTRL_CHIPREV);
12645 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12646 tp->misc_host_ctrl);
12648 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
12649 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
12650 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12651 tp->pdev_peer = tg3_find_peer(tp);
12653 /* Intentionally exclude ASIC_REV_5906 */
12654 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12655 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12656 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12657 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12658 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12659 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12660 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12661 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12663 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12664 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12665 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12666 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12667 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12668 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12670 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12671 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12672 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12674 /* 5700 B0 chips do not support checksumming correctly due
12675 * to hardware bugs.
12677 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12678 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12680 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12681 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12682 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12683 tp->dev->features |= NETIF_F_IPV6_CSUM;
12686 /* Determine TSO capabilities */
12687 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12688 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
12689 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12690 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12691 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12692 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12693 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
12695 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12696 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12697 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12698 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12699 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
12700 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
12701 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
12702 tp->fw_needed = FIRMWARE_TG3TSO5;
12704 tp->fw_needed = FIRMWARE_TG3TSO;
12709 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12710 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12711 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12712 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12713 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12714 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12715 tp->pdev_peer == tp->pdev))
12716 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12718 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12719 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12720 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12724 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
12725 tp->irq_max = TG3_IRQ_MAX_VECS;
12729 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12730 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12731 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
12732 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
12733 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
12734 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
12737 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12738 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
12739 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12740 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
12742 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12745 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12746 if (tp->pcie_cap != 0) {
12749 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12751 pcie_set_readrq(tp->pdev, 4096);
12753 pci_read_config_word(tp->pdev,
12754 tp->pcie_cap + PCI_EXP_LNKCTL,
12756 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12757 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12758 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12759 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12761 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
12762 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
12763 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12765 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12766 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12767 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12768 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12769 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12770 if (!tp->pcix_cap) {
12771 printk(KERN_ERR PFX "Cannot find PCI-X "
12772 "capability, aborting.\n");
12776 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12777 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12780 /* If we have an AMD 762 or VIA K8T800 chipset, write
12781 * reordering to the mailbox registers done by the host
12782 * controller can cause major troubles. We read back from
12783 * every mailbox register write to force the writes to be
12784 * posted to the chip in order.
12786 if (pci_dev_present(write_reorder_chipsets) &&
12787 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12788 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12790 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
12791 &tp->pci_cacheline_sz);
12792 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12793 &tp->pci_lat_timer);
12794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12795 tp->pci_lat_timer < 64) {
12796 tp->pci_lat_timer = 64;
12797 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12798 tp->pci_lat_timer);
12801 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12802 /* 5700 BX chips need to have their TX producer index
12803 * mailboxes written twice to workaround a bug.
12805 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12807 /* If we are in PCI-X mode, enable register write workaround.
12809 * The workaround is to use indirect register accesses
12810 * for all chip writes not to mailbox registers.
12812 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12815 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12817 /* The chip can have it's power management PCI config
12818 * space registers clobbered due to this bug.
12819 * So explicitly force the chip into D0 here.
12821 pci_read_config_dword(tp->pdev,
12822 tp->pm_cap + PCI_PM_CTRL,
12824 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12825 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12826 pci_write_config_dword(tp->pdev,
12827 tp->pm_cap + PCI_PM_CTRL,
12830 /* Also, force SERR#/PERR# in PCI command. */
12831 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12832 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12833 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12837 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12838 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12839 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12840 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12842 /* Chip-specific fixup from Broadcom driver */
12843 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12844 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12845 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12846 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12849 /* Default fast path register access methods */
12850 tp->read32 = tg3_read32;
12851 tp->write32 = tg3_write32;
12852 tp->read32_mbox = tg3_read32;
12853 tp->write32_mbox = tg3_write32;
12854 tp->write32_tx_mbox = tg3_write32;
12855 tp->write32_rx_mbox = tg3_write32;
12857 /* Various workaround register access methods */
12858 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12859 tp->write32 = tg3_write_indirect_reg32;
12860 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12861 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12862 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12864 * Back to back register writes can cause problems on these
12865 * chips, the workaround is to read back all reg writes
12866 * except those to mailbox regs.
12868 * See tg3_write_indirect_reg32().
12870 tp->write32 = tg3_write_flush_reg32;
12873 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12874 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12875 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12876 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12877 tp->write32_rx_mbox = tg3_write_flush_reg32;
12880 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12881 tp->read32 = tg3_read_indirect_reg32;
12882 tp->write32 = tg3_write_indirect_reg32;
12883 tp->read32_mbox = tg3_read_indirect_mbox;
12884 tp->write32_mbox = tg3_write_indirect_mbox;
12885 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12886 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12891 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12892 pci_cmd &= ~PCI_COMMAND_MEMORY;
12893 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12895 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12896 tp->read32_mbox = tg3_read32_mbox_5906;
12897 tp->write32_mbox = tg3_write32_mbox_5906;
12898 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12899 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12902 if (tp->write32 == tg3_write_indirect_reg32 ||
12903 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12904 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12905 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12906 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12908 /* Get eeprom hw config before calling tg3_set_power_state().
12909 * In particular, the TG3_FLG2_IS_NIC flag must be
12910 * determined before calling tg3_set_power_state() so that
12911 * we know whether or not to switch out of Vaux power.
12912 * When the flag is set, it means that GPIO1 is used for eeprom
12913 * write protect and also implies that it is a LOM where GPIOs
12914 * are not used to switch power.
12916 tg3_get_eeprom_hw_cfg(tp);
12918 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12919 /* Allow reads and writes to the
12920 * APE register and memory space.
12922 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12923 PCISTATE_ALLOW_APE_SHMEM_WR;
12924 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12928 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12929 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12930 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12932 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12933 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12935 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12936 * GPIO1 driven high will bring 5700's external PHY out of reset.
12937 * It is also used as eeprom write protect on LOMs.
12939 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12940 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12941 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12942 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12943 GRC_LCLCTRL_GPIO_OUTPUT1);
12944 /* Unused GPIO3 must be driven as output on 5752 because there
12945 * are no pull-up resistors on unused GPIO pins.
12947 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12948 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12950 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12952 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12954 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
12955 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
12956 /* Turn off the debug UART. */
12957 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12958 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12959 /* Keep VMain power. */
12960 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12961 GRC_LCLCTRL_GPIO_OUTPUT0;
12964 /* Force the chip into D0. */
12965 err = tg3_set_power_state(tp, PCI_D0);
12967 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12968 pci_name(tp->pdev));
12972 /* Derive initial jumbo mode from MTU assigned in
12973 * ether_setup() via the alloc_etherdev() call
12975 if (tp->dev->mtu > ETH_DATA_LEN &&
12976 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12977 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12979 /* Determine WakeOnLan speed to use. */
12980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12981 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12982 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12983 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12984 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12986 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12990 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
12992 /* A few boards don't want Ethernet@WireSpeed phy feature */
12993 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12994 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12995 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12996 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12997 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
12998 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12999 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
13001 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13002 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13003 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
13004 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13005 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
13007 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13008 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
13009 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13010 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13011 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
13012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13014 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13016 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13017 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13018 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
13019 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13020 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
13022 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
13025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13026 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13027 tp->phy_otp = tg3_read_otp_phycfg(tp);
13028 if (tp->phy_otp == 0)
13029 tp->phy_otp = TG3_OTP_DEFAULT;
13032 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13033 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13035 tp->mi_mode = MAC_MI_MODE_BASE;
13037 tp->coalesce_mode = 0;
13038 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13039 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13040 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13042 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13043 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13044 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13046 err = tg3_mdio_init(tp);
13050 /* Initialize data/descriptor byte/word swapping. */
13051 val = tr32(GRC_MODE);
13052 val &= GRC_MODE_HOST_STACKUP;
13053 tw32(GRC_MODE, val | tp->grc_mode);
13055 tg3_switch_clocks(tp);
13057 /* Clear this out for sanity. */
13058 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13060 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13062 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13063 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13064 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13066 if (chiprevid == CHIPREV_ID_5701_A0 ||
13067 chiprevid == CHIPREV_ID_5701_B0 ||
13068 chiprevid == CHIPREV_ID_5701_B2 ||
13069 chiprevid == CHIPREV_ID_5701_B5) {
13070 void __iomem *sram_base;
13072 /* Write some dummy words into the SRAM status block
13073 * area, see if it reads back correctly. If the return
13074 * value is bad, force enable the PCIX workaround.
13076 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13078 writel(0x00000000, sram_base);
13079 writel(0x00000000, sram_base + 4);
13080 writel(0xffffffff, sram_base + 4);
13081 if (readl(sram_base) != 0x00000000)
13082 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13087 tg3_nvram_init(tp);
13089 grc_misc_cfg = tr32(GRC_MISC_CFG);
13090 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13092 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13093 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13094 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13095 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13097 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13098 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13099 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13100 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13101 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13102 HOSTCC_MODE_CLRTICK_TXBD);
13104 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13105 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13106 tp->misc_host_ctrl);
13109 /* Preserve the APE MAC_MODE bits */
13110 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13111 tp->mac_mode = tr32(MAC_MODE) |
13112 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13114 tp->mac_mode = TG3_DEF_MAC_MODE;
13116 /* these are limited to 10/100 only */
13117 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13118 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13119 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13120 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13121 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13122 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13123 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13124 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13125 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13126 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13127 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13128 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13129 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
13130 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
13132 err = tg3_phy_probe(tp);
13134 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
13135 pci_name(tp->pdev), err);
13136 /* ... but do not return immediately ... */
13140 tg3_read_partno(tp);
13141 tg3_read_fw_ver(tp);
13143 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
13144 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13147 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
13149 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13152 /* 5700 {AX,BX} chips have a broken status block link
13153 * change bit implementation, so we must use the
13154 * status register in those cases.
13156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13157 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13159 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13161 /* The led_ctrl is set during tg3_phy_probe, here we might
13162 * have to force the link status polling mechanism based
13163 * upon subsystem IDs.
13165 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13166 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13167 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
13168 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
13169 TG3_FLAG_USE_LINKCHG_REG);
13172 /* For all SERDES we poll the MAC status register. */
13173 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
13174 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13176 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13178 tp->rx_offset = NET_IP_ALIGN;
13179 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13180 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
13183 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13185 /* Increment the rx prod index on the rx std ring by at most
13186 * 8 for these chips to workaround hw errata.
13188 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13189 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13190 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13191 tp->rx_std_max_post = 8;
13193 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13194 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13195 PCIE_PWR_MGMT_L1_THRESH_MSK;
13200 #ifdef CONFIG_SPARC
13201 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13203 struct net_device *dev = tp->dev;
13204 struct pci_dev *pdev = tp->pdev;
13205 struct device_node *dp = pci_device_to_OF_node(pdev);
13206 const unsigned char *addr;
13209 addr = of_get_property(dp, "local-mac-address", &len);
13210 if (addr && len == 6) {
13211 memcpy(dev->dev_addr, addr, 6);
13212 memcpy(dev->perm_addr, dev->dev_addr, 6);
13218 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13220 struct net_device *dev = tp->dev;
13222 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13223 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13228 static int __devinit tg3_get_device_address(struct tg3 *tp)
13230 struct net_device *dev = tp->dev;
13231 u32 hi, lo, mac_offset;
13234 #ifdef CONFIG_SPARC
13235 if (!tg3_get_macaddr_sparc(tp))
13240 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13241 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13242 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13244 if (tg3_nvram_lock(tp))
13245 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13247 tg3_nvram_unlock(tp);
13248 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13249 if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC)
13251 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13254 /* First try to get it from MAC address mailbox. */
13255 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13256 if ((hi >> 16) == 0x484b) {
13257 dev->dev_addr[0] = (hi >> 8) & 0xff;
13258 dev->dev_addr[1] = (hi >> 0) & 0xff;
13260 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13261 dev->dev_addr[2] = (lo >> 24) & 0xff;
13262 dev->dev_addr[3] = (lo >> 16) & 0xff;
13263 dev->dev_addr[4] = (lo >> 8) & 0xff;
13264 dev->dev_addr[5] = (lo >> 0) & 0xff;
13266 /* Some old bootcode may report a 0 MAC address in SRAM */
13267 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13270 /* Next, try NVRAM. */
13271 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13272 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13273 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13274 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13275 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13277 /* Finally just fetch it out of the MAC control regs. */
13279 hi = tr32(MAC_ADDR_0_HIGH);
13280 lo = tr32(MAC_ADDR_0_LOW);
13282 dev->dev_addr[5] = lo & 0xff;
13283 dev->dev_addr[4] = (lo >> 8) & 0xff;
13284 dev->dev_addr[3] = (lo >> 16) & 0xff;
13285 dev->dev_addr[2] = (lo >> 24) & 0xff;
13286 dev->dev_addr[1] = hi & 0xff;
13287 dev->dev_addr[0] = (hi >> 8) & 0xff;
13291 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13292 #ifdef CONFIG_SPARC
13293 if (!tg3_get_default_macaddr_sparc(tp))
13298 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13302 #define BOUNDARY_SINGLE_CACHELINE 1
13303 #define BOUNDARY_MULTI_CACHELINE 2
13305 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13307 int cacheline_size;
13311 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13313 cacheline_size = 1024;
13315 cacheline_size = (int) byte * 4;
13317 /* On 5703 and later chips, the boundary bits have no
13320 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13321 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13322 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13325 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13326 goal = BOUNDARY_MULTI_CACHELINE;
13328 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13329 goal = BOUNDARY_SINGLE_CACHELINE;
13335 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13336 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13343 /* PCI controllers on most RISC systems tend to disconnect
13344 * when a device tries to burst across a cache-line boundary.
13345 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13347 * Unfortunately, for PCI-E there are only limited
13348 * write-side controls for this, and thus for reads
13349 * we will still get the disconnects. We'll also waste
13350 * these PCI cycles for both read and write for chips
13351 * other than 5700 and 5701 which do not implement the
13354 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13355 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13356 switch (cacheline_size) {
13361 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13362 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13363 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13365 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13366 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13371 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13372 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13376 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13377 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13380 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13381 switch (cacheline_size) {
13385 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13386 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13387 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13393 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13394 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13398 switch (cacheline_size) {
13400 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13401 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13402 DMA_RWCTRL_WRITE_BNDRY_16);
13407 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13408 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13409 DMA_RWCTRL_WRITE_BNDRY_32);
13414 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13415 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13416 DMA_RWCTRL_WRITE_BNDRY_64);
13421 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13422 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13423 DMA_RWCTRL_WRITE_BNDRY_128);
13428 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13429 DMA_RWCTRL_WRITE_BNDRY_256);
13432 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13433 DMA_RWCTRL_WRITE_BNDRY_512);
13437 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13438 DMA_RWCTRL_WRITE_BNDRY_1024);
13447 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13449 struct tg3_internal_buffer_desc test_desc;
13450 u32 sram_dma_descs;
13453 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13455 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13456 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13457 tw32(RDMAC_STATUS, 0);
13458 tw32(WDMAC_STATUS, 0);
13460 tw32(BUFMGR_MODE, 0);
13461 tw32(FTQ_RESET, 0);
13463 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13464 test_desc.addr_lo = buf_dma & 0xffffffff;
13465 test_desc.nic_mbuf = 0x00002100;
13466 test_desc.len = size;
13469 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13470 * the *second* time the tg3 driver was getting loaded after an
13473 * Broadcom tells me:
13474 * ...the DMA engine is connected to the GRC block and a DMA
13475 * reset may affect the GRC block in some unpredictable way...
13476 * The behavior of resets to individual blocks has not been tested.
13478 * Broadcom noted the GRC reset will also reset all sub-components.
13481 test_desc.cqid_sqid = (13 << 8) | 2;
13483 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13486 test_desc.cqid_sqid = (16 << 8) | 7;
13488 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13491 test_desc.flags = 0x00000005;
13493 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13496 val = *(((u32 *)&test_desc) + i);
13497 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13498 sram_dma_descs + (i * sizeof(u32)));
13499 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13504 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13506 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13510 for (i = 0; i < 40; i++) {
13514 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13516 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13517 if ((val & 0xffff) == sram_dma_descs) {
13528 #define TEST_BUFFER_SIZE 0x2000
13530 static int __devinit tg3_test_dma(struct tg3 *tp)
13532 dma_addr_t buf_dma;
13533 u32 *buf, saved_dma_rwctrl;
13536 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13542 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13543 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13545 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13547 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13550 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13551 /* DMA read watermark not used on PCIE */
13552 tp->dma_rwctrl |= 0x00180000;
13553 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13554 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13555 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13556 tp->dma_rwctrl |= 0x003f0000;
13558 tp->dma_rwctrl |= 0x003f000f;
13560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13561 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13562 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13563 u32 read_water = 0x7;
13565 /* If the 5704 is behind the EPB bridge, we can
13566 * do the less restrictive ONE_DMA workaround for
13567 * better performance.
13569 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13570 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13571 tp->dma_rwctrl |= 0x8000;
13572 else if (ccval == 0x6 || ccval == 0x7)
13573 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13575 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13577 /* Set bit 23 to enable PCIX hw bug fix */
13579 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13580 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13582 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13583 /* 5780 always in PCIX mode */
13584 tp->dma_rwctrl |= 0x00144000;
13585 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13586 /* 5714 always in PCIX mode */
13587 tp->dma_rwctrl |= 0x00148000;
13589 tp->dma_rwctrl |= 0x001b000f;
13593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13594 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13595 tp->dma_rwctrl &= 0xfffffff0;
13597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13598 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13599 /* Remove this if it causes problems for some boards. */
13600 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13602 /* On 5700/5701 chips, we need to set this bit.
13603 * Otherwise the chip will issue cacheline transactions
13604 * to streamable DMA memory with not all the byte
13605 * enables turned on. This is an error on several
13606 * RISC PCI controllers, in particular sparc64.
13608 * On 5703/5704 chips, this bit has been reassigned
13609 * a different meaning. In particular, it is used
13610 * on those chips to enable a PCI-X workaround.
13612 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13615 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13618 /* Unneeded, already done by tg3_get_invariants. */
13619 tg3_switch_clocks(tp);
13622 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13623 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13626 /* It is best to perform DMA test with maximum write burst size
13627 * to expose the 5700/5701 write DMA bug.
13629 saved_dma_rwctrl = tp->dma_rwctrl;
13630 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13631 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13636 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13639 /* Send the buffer to the chip. */
13640 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13642 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13647 /* validate data reached card RAM correctly. */
13648 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13650 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13651 if (le32_to_cpu(val) != p[i]) {
13652 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13653 /* ret = -ENODEV here? */
13658 /* Now read it back. */
13659 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13661 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13667 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13671 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13672 DMA_RWCTRL_WRITE_BNDRY_16) {
13673 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13674 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13675 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13678 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13684 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13690 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13691 DMA_RWCTRL_WRITE_BNDRY_16) {
13692 static struct pci_device_id dma_wait_state_chipsets[] = {
13693 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13694 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13698 /* DMA test passed without adjusting DMA boundary,
13699 * now look for chipsets that are known to expose the
13700 * DMA bug without failing the test.
13702 if (pci_dev_present(dma_wait_state_chipsets)) {
13703 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13704 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13707 /* Safe to use the calculated DMA boundary. */
13708 tp->dma_rwctrl = saved_dma_rwctrl;
13710 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13714 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13719 static void __devinit tg3_init_link_config(struct tg3 *tp)
13721 tp->link_config.advertising =
13722 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13723 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13724 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13725 ADVERTISED_Autoneg | ADVERTISED_MII);
13726 tp->link_config.speed = SPEED_INVALID;
13727 tp->link_config.duplex = DUPLEX_INVALID;
13728 tp->link_config.autoneg = AUTONEG_ENABLE;
13729 tp->link_config.active_speed = SPEED_INVALID;
13730 tp->link_config.active_duplex = DUPLEX_INVALID;
13731 tp->link_config.phy_is_low_power = 0;
13732 tp->link_config.orig_speed = SPEED_INVALID;
13733 tp->link_config.orig_duplex = DUPLEX_INVALID;
13734 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13737 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13739 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS &&
13740 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
13741 tp->bufmgr_config.mbuf_read_dma_low_water =
13742 DEFAULT_MB_RDMA_LOW_WATER_5705;
13743 tp->bufmgr_config.mbuf_mac_rx_low_water =
13744 DEFAULT_MB_MACRX_LOW_WATER_5705;
13745 tp->bufmgr_config.mbuf_high_water =
13746 DEFAULT_MB_HIGH_WATER_5705;
13747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13748 tp->bufmgr_config.mbuf_mac_rx_low_water =
13749 DEFAULT_MB_MACRX_LOW_WATER_5906;
13750 tp->bufmgr_config.mbuf_high_water =
13751 DEFAULT_MB_HIGH_WATER_5906;
13754 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13755 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13756 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13757 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13758 tp->bufmgr_config.mbuf_high_water_jumbo =
13759 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13761 tp->bufmgr_config.mbuf_read_dma_low_water =
13762 DEFAULT_MB_RDMA_LOW_WATER;
13763 tp->bufmgr_config.mbuf_mac_rx_low_water =
13764 DEFAULT_MB_MACRX_LOW_WATER;
13765 tp->bufmgr_config.mbuf_high_water =
13766 DEFAULT_MB_HIGH_WATER;
13768 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13769 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13770 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13771 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13772 tp->bufmgr_config.mbuf_high_water_jumbo =
13773 DEFAULT_MB_HIGH_WATER_JUMBO;
13776 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13777 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13780 static char * __devinit tg3_phy_string(struct tg3 *tp)
13782 switch (tp->phy_id & PHY_ID_MASK) {
13783 case PHY_ID_BCM5400: return "5400";
13784 case PHY_ID_BCM5401: return "5401";
13785 case PHY_ID_BCM5411: return "5411";
13786 case PHY_ID_BCM5701: return "5701";
13787 case PHY_ID_BCM5703: return "5703";
13788 case PHY_ID_BCM5704: return "5704";
13789 case PHY_ID_BCM5705: return "5705";
13790 case PHY_ID_BCM5750: return "5750";
13791 case PHY_ID_BCM5752: return "5752";
13792 case PHY_ID_BCM5714: return "5714";
13793 case PHY_ID_BCM5780: return "5780";
13794 case PHY_ID_BCM5755: return "5755";
13795 case PHY_ID_BCM5787: return "5787";
13796 case PHY_ID_BCM5784: return "5784";
13797 case PHY_ID_BCM5756: return "5722/5756";
13798 case PHY_ID_BCM5906: return "5906";
13799 case PHY_ID_BCM5761: return "5761";
13800 case PHY_ID_BCM5717: return "5717";
13801 case PHY_ID_BCM8002: return "8002/serdes";
13802 case 0: return "serdes";
13803 default: return "unknown";
13807 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13809 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13810 strcpy(str, "PCI Express");
13812 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13813 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13815 strcpy(str, "PCIX:");
13817 if ((clock_ctrl == 7) ||
13818 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13819 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13820 strcat(str, "133MHz");
13821 else if (clock_ctrl == 0)
13822 strcat(str, "33MHz");
13823 else if (clock_ctrl == 2)
13824 strcat(str, "50MHz");
13825 else if (clock_ctrl == 4)
13826 strcat(str, "66MHz");
13827 else if (clock_ctrl == 6)
13828 strcat(str, "100MHz");
13830 strcpy(str, "PCI:");
13831 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13832 strcat(str, "66MHz");
13834 strcat(str, "33MHz");
13836 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13837 strcat(str, ":32-bit");
13839 strcat(str, ":64-bit");
13843 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13845 struct pci_dev *peer;
13846 unsigned int func, devnr = tp->pdev->devfn & ~7;
13848 for (func = 0; func < 8; func++) {
13849 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13850 if (peer && peer != tp->pdev)
13854 /* 5704 can be configured in single-port mode, set peer to
13855 * tp->pdev in that case.
13863 * We don't need to keep the refcount elevated; there's no way
13864 * to remove one half of this device without removing the other
13871 static void __devinit tg3_init_coal(struct tg3 *tp)
13873 struct ethtool_coalesce *ec = &tp->coal;
13875 memset(ec, 0, sizeof(*ec));
13876 ec->cmd = ETHTOOL_GCOALESCE;
13877 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13878 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13879 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13880 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13881 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13882 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13883 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13884 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13885 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13887 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13888 HOSTCC_MODE_CLRTICK_TXBD)) {
13889 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13890 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13891 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13892 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13895 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13896 ec->rx_coalesce_usecs_irq = 0;
13897 ec->tx_coalesce_usecs_irq = 0;
13898 ec->stats_block_coalesce_usecs = 0;
13902 static const struct net_device_ops tg3_netdev_ops = {
13903 .ndo_open = tg3_open,
13904 .ndo_stop = tg3_close,
13905 .ndo_start_xmit = tg3_start_xmit,
13906 .ndo_get_stats = tg3_get_stats,
13907 .ndo_validate_addr = eth_validate_addr,
13908 .ndo_set_multicast_list = tg3_set_rx_mode,
13909 .ndo_set_mac_address = tg3_set_mac_addr,
13910 .ndo_do_ioctl = tg3_ioctl,
13911 .ndo_tx_timeout = tg3_tx_timeout,
13912 .ndo_change_mtu = tg3_change_mtu,
13913 #if TG3_VLAN_TAG_USED
13914 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13916 #ifdef CONFIG_NET_POLL_CONTROLLER
13917 .ndo_poll_controller = tg3_poll_controller,
13921 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13922 .ndo_open = tg3_open,
13923 .ndo_stop = tg3_close,
13924 .ndo_start_xmit = tg3_start_xmit_dma_bug,
13925 .ndo_get_stats = tg3_get_stats,
13926 .ndo_validate_addr = eth_validate_addr,
13927 .ndo_set_multicast_list = tg3_set_rx_mode,
13928 .ndo_set_mac_address = tg3_set_mac_addr,
13929 .ndo_do_ioctl = tg3_ioctl,
13930 .ndo_tx_timeout = tg3_tx_timeout,
13931 .ndo_change_mtu = tg3_change_mtu,
13932 #if TG3_VLAN_TAG_USED
13933 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13935 #ifdef CONFIG_NET_POLL_CONTROLLER
13936 .ndo_poll_controller = tg3_poll_controller,
13940 static int __devinit tg3_init_one(struct pci_dev *pdev,
13941 const struct pci_device_id *ent)
13943 static int tg3_version_printed = 0;
13944 struct net_device *dev;
13946 int i, err, pm_cap;
13947 u32 sndmbx, rcvmbx, intmbx;
13949 u64 dma_mask, persist_dma_mask;
13951 if (tg3_version_printed++ == 0)
13952 printk(KERN_INFO "%s", version);
13954 err = pci_enable_device(pdev);
13956 printk(KERN_ERR PFX "Cannot enable PCI device, "
13961 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13963 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13965 goto err_out_disable_pdev;
13968 pci_set_master(pdev);
13970 /* Find power-management capability. */
13971 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13973 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13976 goto err_out_free_res;
13979 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
13981 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13983 goto err_out_free_res;
13986 SET_NETDEV_DEV(dev, &pdev->dev);
13988 #if TG3_VLAN_TAG_USED
13989 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13992 tp = netdev_priv(dev);
13995 tp->pm_cap = pm_cap;
13996 tp->rx_mode = TG3_DEF_RX_MODE;
13997 tp->tx_mode = TG3_DEF_TX_MODE;
14000 tp->msg_enable = tg3_debug;
14002 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14004 /* The word/byte swap controls here control register access byte
14005 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14008 tp->misc_host_ctrl =
14009 MISC_HOST_CTRL_MASK_PCI_INT |
14010 MISC_HOST_CTRL_WORD_SWAP |
14011 MISC_HOST_CTRL_INDIR_ACCESS |
14012 MISC_HOST_CTRL_PCISTATE_RW;
14014 /* The NONFRM (non-frame) byte/word swap controls take effect
14015 * on descriptor entries, anything which isn't packet data.
14017 * The StrongARM chips on the board (one for tx, one for rx)
14018 * are running in big-endian mode.
14020 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14021 GRC_MODE_WSWAP_NONFRM_DATA);
14022 #ifdef __BIG_ENDIAN
14023 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14025 spin_lock_init(&tp->lock);
14026 spin_lock_init(&tp->indirect_lock);
14027 INIT_WORK(&tp->reset_task, tg3_reset_task);
14029 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14031 printk(KERN_ERR PFX "Cannot map device registers, "
14034 goto err_out_free_dev;
14037 tg3_init_link_config(tp);
14039 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14040 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14042 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14043 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14044 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14045 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14046 struct tg3_napi *tnapi = &tp->napi[i];
14049 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14051 tnapi->int_mbox = intmbx;
14057 tnapi->consmbox = rcvmbx;
14058 tnapi->prodmbox = sndmbx;
14061 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14063 tnapi->coal_now = HOSTCC_MODE_NOW;
14065 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14069 * If we support MSIX, we'll be using RSS. If we're using
14070 * RSS, the first vector only handles link interrupts and the
14071 * remaining vectors handle rx and tx interrupts. Reuse the
14072 * mailbox values for the next iteration. The values we setup
14073 * above are still useful for the single vectored mode.
14086 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
14087 dev->ethtool_ops = &tg3_ethtool_ops;
14088 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14089 dev->irq = pdev->irq;
14091 err = tg3_get_invariants(tp);
14093 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
14095 goto err_out_iounmap;
14098 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14099 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
14100 dev->netdev_ops = &tg3_netdev_ops;
14102 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14105 /* The EPB bridge inside 5714, 5715, and 5780 and any
14106 * device behind the EPB cannot support DMA addresses > 40-bit.
14107 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14108 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14109 * do DMA address check in tg3_start_xmit().
14111 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14112 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14113 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14114 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14115 #ifdef CONFIG_HIGHMEM
14116 dma_mask = DMA_BIT_MASK(64);
14119 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14121 /* Configure DMA attributes. */
14122 if (dma_mask > DMA_BIT_MASK(32)) {
14123 err = pci_set_dma_mask(pdev, dma_mask);
14125 dev->features |= NETIF_F_HIGHDMA;
14126 err = pci_set_consistent_dma_mask(pdev,
14129 printk(KERN_ERR PFX "Unable to obtain 64 bit "
14130 "DMA for consistent allocations\n");
14131 goto err_out_iounmap;
14135 if (err || dma_mask == DMA_BIT_MASK(32)) {
14136 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14138 printk(KERN_ERR PFX "No usable DMA configuration, "
14140 goto err_out_iounmap;
14144 tg3_init_bufmgr_config(tp);
14146 /* Selectively allow TSO based on operating conditions */
14147 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14148 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14149 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14151 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14152 tp->fw_needed = NULL;
14155 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14156 tp->fw_needed = FIRMWARE_TG3;
14158 /* TSO is on by default on chips that support hardware TSO.
14159 * Firmware TSO on older chips gives lower performance, so it
14160 * is off by default, but can be enabled using ethtool.
14162 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14163 (dev->features & NETIF_F_IP_CSUM))
14164 dev->features |= NETIF_F_TSO;
14166 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14167 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14168 if (dev->features & NETIF_F_IPV6_CSUM)
14169 dev->features |= NETIF_F_TSO6;
14170 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14172 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14173 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14176 dev->features |= NETIF_F_TSO_ECN;
14179 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14180 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14181 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14182 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14183 tp->rx_pending = 63;
14186 err = tg3_get_device_address(tp);
14188 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
14193 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14194 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14195 if (!tp->aperegs) {
14196 printk(KERN_ERR PFX "Cannot map APE registers, "
14202 tg3_ape_lock_init(tp);
14204 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14205 tg3_read_dash_ver(tp);
14209 * Reset chip in case UNDI or EFI driver did not shutdown
14210 * DMA self test will enable WDMAC and we'll see (spurious)
14211 * pending DMA on the PCI bus at that point.
14213 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14214 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14215 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14216 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14219 err = tg3_test_dma(tp);
14221 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
14222 goto err_out_apeunmap;
14225 /* flow control autonegotiation is default behavior */
14226 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14227 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14231 pci_set_drvdata(pdev, dev);
14233 err = register_netdev(dev);
14235 printk(KERN_ERR PFX "Cannot register net device, "
14237 goto err_out_apeunmap;
14240 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14242 tp->board_part_number,
14243 tp->pci_chip_rev_id,
14244 tg3_bus_string(tp, str),
14247 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14248 struct phy_device *phydev;
14249 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14251 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14252 tp->dev->name, phydev->drv->name,
14253 dev_name(&phydev->dev));
14256 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
14257 tp->dev->name, tg3_phy_string(tp),
14258 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14259 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14260 "10/100/1000Base-T")),
14261 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
14263 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14265 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14266 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14267 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
14268 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14269 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14270 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14271 dev->name, tp->dma_rwctrl,
14272 (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
14273 (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
14279 iounmap(tp->aperegs);
14280 tp->aperegs = NULL;
14285 release_firmware(tp->fw);
14297 pci_release_regions(pdev);
14299 err_out_disable_pdev:
14300 pci_disable_device(pdev);
14301 pci_set_drvdata(pdev, NULL);
14305 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14307 struct net_device *dev = pci_get_drvdata(pdev);
14310 struct tg3 *tp = netdev_priv(dev);
14313 release_firmware(tp->fw);
14315 flush_scheduled_work();
14317 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14322 unregister_netdev(dev);
14324 iounmap(tp->aperegs);
14325 tp->aperegs = NULL;
14332 pci_release_regions(pdev);
14333 pci_disable_device(pdev);
14334 pci_set_drvdata(pdev, NULL);
14338 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14340 struct net_device *dev = pci_get_drvdata(pdev);
14341 struct tg3 *tp = netdev_priv(dev);
14342 pci_power_t target_state;
14345 /* PCI register 4 needs to be saved whether netif_running() or not.
14346 * MSI address and data need to be saved if using MSI and
14349 pci_save_state(pdev);
14351 if (!netif_running(dev))
14354 flush_scheduled_work();
14356 tg3_netif_stop(tp);
14358 del_timer_sync(&tp->timer);
14360 tg3_full_lock(tp, 1);
14361 tg3_disable_ints(tp);
14362 tg3_full_unlock(tp);
14364 netif_device_detach(dev);
14366 tg3_full_lock(tp, 0);
14367 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14368 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14369 tg3_full_unlock(tp);
14371 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14373 err = tg3_set_power_state(tp, target_state);
14377 tg3_full_lock(tp, 0);
14379 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14380 err2 = tg3_restart_hw(tp, 1);
14384 tp->timer.expires = jiffies + tp->timer_offset;
14385 add_timer(&tp->timer);
14387 netif_device_attach(dev);
14388 tg3_netif_start(tp);
14391 tg3_full_unlock(tp);
14400 static int tg3_resume(struct pci_dev *pdev)
14402 struct net_device *dev = pci_get_drvdata(pdev);
14403 struct tg3 *tp = netdev_priv(dev);
14406 pci_restore_state(tp->pdev);
14408 if (!netif_running(dev))
14411 err = tg3_set_power_state(tp, PCI_D0);
14415 netif_device_attach(dev);
14417 tg3_full_lock(tp, 0);
14419 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14420 err = tg3_restart_hw(tp, 1);
14424 tp->timer.expires = jiffies + tp->timer_offset;
14425 add_timer(&tp->timer);
14427 tg3_netif_start(tp);
14430 tg3_full_unlock(tp);
14438 static struct pci_driver tg3_driver = {
14439 .name = DRV_MODULE_NAME,
14440 .id_table = tg3_pci_tbl,
14441 .probe = tg3_init_one,
14442 .remove = __devexit_p(tg3_remove_one),
14443 .suspend = tg3_suspend,
14444 .resume = tg3_resume
14447 static int __init tg3_init(void)
14449 return pci_register_driver(&tg3_driver);
14452 static void __exit tg3_cleanup(void)
14454 pci_unregister_driver(&tg3_driver);
14457 module_init(tg3_init);
14458 module_exit(tg3_cleanup);