2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2009 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.102"
72 #define DRV_MODULE_RELDATE "September 1, 2009"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
105 #define TG3_RSS_INDIR_TBL_SIZE 128
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
115 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
117 #define TG3_TX_RING_SIZE 512
118 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
120 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
123 TG3_RX_JUMBO_RING_SIZE)
124 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
125 TG3_RX_RCB_RING_SIZE(tp))
126 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
128 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130 #define TG3_DMA_BYTE_ENAB 64
132 #define TG3_RX_STD_DMA_SZ 1536
133 #define TG3_RX_JMB_DMA_SZ 9046
135 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
137 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
140 /* minimum number of free TX descriptors required to wake up TX process */
141 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
143 #define TG3_RAW_IP_ALIGN 2
145 /* number of ETHTOOL_GSTATS u64's */
146 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
148 #define TG3_NUM_TEST 6
150 #define FIRMWARE_TG3 "tigon/tg3.bin"
151 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
152 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
154 static char version[] __devinitdata =
155 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
157 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
158 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
159 MODULE_LICENSE("GPL");
160 MODULE_VERSION(DRV_MODULE_VERSION);
161 MODULE_FIRMWARE(FIRMWARE_TG3);
162 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
163 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
165 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
167 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
168 module_param(tg3_debug, int, 0);
169 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
171 static struct pci_device_id tg3_pci_tbl[] = {
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
238 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
239 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
240 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
241 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
242 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
243 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
244 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
248 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
250 static const struct {
251 const char string[ETH_GSTRING_LEN];
252 } ethtool_stats_keys[TG3_NUM_STATS] = {
255 { "rx_ucast_packets" },
256 { "rx_mcast_packets" },
257 { "rx_bcast_packets" },
259 { "rx_align_errors" },
260 { "rx_xon_pause_rcvd" },
261 { "rx_xoff_pause_rcvd" },
262 { "rx_mac_ctrl_rcvd" },
263 { "rx_xoff_entered" },
264 { "rx_frame_too_long_errors" },
266 { "rx_undersize_packets" },
267 { "rx_in_length_errors" },
268 { "rx_out_length_errors" },
269 { "rx_64_or_less_octet_packets" },
270 { "rx_65_to_127_octet_packets" },
271 { "rx_128_to_255_octet_packets" },
272 { "rx_256_to_511_octet_packets" },
273 { "rx_512_to_1023_octet_packets" },
274 { "rx_1024_to_1522_octet_packets" },
275 { "rx_1523_to_2047_octet_packets" },
276 { "rx_2048_to_4095_octet_packets" },
277 { "rx_4096_to_8191_octet_packets" },
278 { "rx_8192_to_9022_octet_packets" },
285 { "tx_flow_control" },
287 { "tx_single_collisions" },
288 { "tx_mult_collisions" },
290 { "tx_excessive_collisions" },
291 { "tx_late_collisions" },
292 { "tx_collide_2times" },
293 { "tx_collide_3times" },
294 { "tx_collide_4times" },
295 { "tx_collide_5times" },
296 { "tx_collide_6times" },
297 { "tx_collide_7times" },
298 { "tx_collide_8times" },
299 { "tx_collide_9times" },
300 { "tx_collide_10times" },
301 { "tx_collide_11times" },
302 { "tx_collide_12times" },
303 { "tx_collide_13times" },
304 { "tx_collide_14times" },
305 { "tx_collide_15times" },
306 { "tx_ucast_packets" },
307 { "tx_mcast_packets" },
308 { "tx_bcast_packets" },
309 { "tx_carrier_sense_errors" },
313 { "dma_writeq_full" },
314 { "dma_write_prioq_full" },
318 { "rx_threshold_hit" },
320 { "dma_readq_full" },
321 { "dma_read_prioq_full" },
322 { "tx_comp_queue_full" },
324 { "ring_set_send_prod_index" },
325 { "ring_status_update" },
327 { "nic_avoided_irqs" },
328 { "nic_tx_threshold_hit" }
331 static const struct {
332 const char string[ETH_GSTRING_LEN];
333 } ethtool_test_keys[TG3_NUM_TEST] = {
334 { "nvram test (online) " },
335 { "link test (online) " },
336 { "register test (offline)" },
337 { "memory test (offline)" },
338 { "loopback test (offline)" },
339 { "interrupt test (offline)" },
342 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
344 writel(val, tp->regs + off);
347 static u32 tg3_read32(struct tg3 *tp, u32 off)
349 return (readl(tp->regs + off));
352 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
354 writel(val, tp->aperegs + off);
357 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
359 return (readl(tp->aperegs + off));
362 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
366 spin_lock_irqsave(&tp->indirect_lock, flags);
367 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
368 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
369 spin_unlock_irqrestore(&tp->indirect_lock, flags);
372 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
374 writel(val, tp->regs + off);
375 readl(tp->regs + off);
378 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
383 spin_lock_irqsave(&tp->indirect_lock, flags);
384 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
385 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
386 spin_unlock_irqrestore(&tp->indirect_lock, flags);
390 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
394 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
395 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
396 TG3_64BIT_REG_LOW, val);
399 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
400 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
401 TG3_64BIT_REG_LOW, val);
405 spin_lock_irqsave(&tp->indirect_lock, flags);
406 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
407 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
408 spin_unlock_irqrestore(&tp->indirect_lock, flags);
410 /* In indirect mode when disabling interrupts, we also need
411 * to clear the interrupt bit in the GRC local ctrl register.
413 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
415 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
416 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
420 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
425 spin_lock_irqsave(&tp->indirect_lock, flags);
426 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
427 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
428 spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 /* usec_wait specifies the wait time in usec when writing to certain registers
433 * where it is unsafe to read back the register without some delay.
434 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
435 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
437 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
439 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
440 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
441 /* Non-posted methods */
442 tp->write32(tp, off, val);
445 tg3_write32(tp, off, val);
450 /* Wait again after the read for the posted method to guarantee that
451 * the wait time is met.
457 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
459 tp->write32_mbox(tp, off, val);
460 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
461 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
462 tp->read32_mbox(tp, off);
465 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
467 void __iomem *mbox = tp->regs + off;
469 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
471 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
475 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
477 return (readl(tp->regs + off + GRCMBOX_BASE));
480 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
482 writel(val, tp->regs + off + GRCMBOX_BASE);
485 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
486 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
487 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
488 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
489 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
491 #define tw32(reg,val) tp->write32(tp, reg, val)
492 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
493 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
494 #define tr32(reg) tp->read32(tp, reg)
496 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
500 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
501 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
506 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
507 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
509 /* Always leave this as zero. */
510 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
513 tw32_f(TG3PCI_MEM_WIN_DATA, val);
515 /* Always leave this as zero. */
516 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
518 spin_unlock_irqrestore(&tp->indirect_lock, flags);
521 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
525 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
526 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
531 spin_lock_irqsave(&tp->indirect_lock, flags);
532 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
533 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
534 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
536 /* Always leave this as zero. */
537 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
539 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
540 *val = tr32(TG3PCI_MEM_WIN_DATA);
542 /* Always leave this as zero. */
543 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
545 spin_unlock_irqrestore(&tp->indirect_lock, flags);
548 static void tg3_ape_lock_init(struct tg3 *tp)
552 /* Make sure the driver hasn't any stale locks. */
553 for (i = 0; i < 8; i++)
554 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
555 APE_LOCK_GRANT_DRIVER);
558 static int tg3_ape_lock(struct tg3 *tp, int locknum)
564 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
568 case TG3_APE_LOCK_GRC:
569 case TG3_APE_LOCK_MEM:
577 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
579 /* Wait for up to 1 millisecond to acquire lock. */
580 for (i = 0; i < 100; i++) {
581 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
582 if (status == APE_LOCK_GRANT_DRIVER)
587 if (status != APE_LOCK_GRANT_DRIVER) {
588 /* Revoke the lock request. */
589 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
590 APE_LOCK_GRANT_DRIVER);
598 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
602 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
606 case TG3_APE_LOCK_GRC:
607 case TG3_APE_LOCK_MEM:
614 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
617 static void tg3_disable_ints(struct tg3 *tp)
621 tw32(TG3PCI_MISC_HOST_CTRL,
622 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
623 for (i = 0; i < tp->irq_max; i++)
624 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
627 static void tg3_enable_ints(struct tg3 *tp)
635 tw32(TG3PCI_MISC_HOST_CTRL,
636 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
638 for (i = 0; i < tp->irq_cnt; i++) {
639 struct tg3_napi *tnapi = &tp->napi[i];
640 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
641 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
642 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
644 coal_now |= tnapi->coal_now;
647 /* Force an initial interrupt */
648 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
649 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
650 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 HOSTCC_MODE_ENABLE | coal_now);
656 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
658 struct tg3 *tp = tnapi->tp;
659 struct tg3_hw_status *sblk = tnapi->hw_status;
660 unsigned int work_exists = 0;
662 /* check for phy events */
663 if (!(tp->tg3_flags &
664 (TG3_FLAG_USE_LINKCHG_REG |
665 TG3_FLAG_POLL_SERDES))) {
666 if (sblk->status & SD_STATUS_LINK_CHG)
669 /* check for RX/TX work to do */
670 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
671 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
678 * similar to tg3_enable_ints, but it accurately determines whether there
679 * is new work pending and can return without flushing the PIO write
680 * which reenables interrupts
682 static void tg3_int_reenable(struct tg3_napi *tnapi)
684 struct tg3 *tp = tnapi->tp;
686 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
689 /* When doing tagged status, this work check is unnecessary.
690 * The last_tag we write above tells the chip which piece of
691 * work we've completed.
693 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
695 tw32(HOSTCC_MODE, tp->coalesce_mode |
696 HOSTCC_MODE_ENABLE | tnapi->coal_now);
699 static void tg3_napi_disable(struct tg3 *tp)
703 for (i = tp->irq_cnt - 1; i >= 0; i--)
704 napi_disable(&tp->napi[i].napi);
707 static void tg3_napi_enable(struct tg3 *tp)
711 for (i = 0; i < tp->irq_cnt; i++)
712 napi_enable(&tp->napi[i].napi);
715 static inline void tg3_netif_stop(struct tg3 *tp)
717 tp->dev->trans_start = jiffies; /* prevent tx timeout */
718 tg3_napi_disable(tp);
719 netif_tx_disable(tp->dev);
722 static inline void tg3_netif_start(struct tg3 *tp)
724 /* NOTE: unconditional netif_tx_wake_all_queues is only
725 * appropriate so long as all callers are assured to
726 * have free tx slots (such as after tg3_init_hw)
728 netif_tx_wake_all_queues(tp->dev);
731 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
735 static void tg3_switch_clocks(struct tg3 *tp)
740 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
741 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
744 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
746 orig_clock_ctrl = clock_ctrl;
747 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
748 CLOCK_CTRL_CLKRUN_OENABLE |
750 tp->pci_clock_ctrl = clock_ctrl;
752 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
753 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
754 tw32_wait_f(TG3PCI_CLOCK_CTRL,
755 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
757 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
758 tw32_wait_f(TG3PCI_CLOCK_CTRL,
760 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
762 tw32_wait_f(TG3PCI_CLOCK_CTRL,
763 clock_ctrl | (CLOCK_CTRL_ALTCLK),
766 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
769 #define PHY_BUSY_LOOPS 5000
771 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
777 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
785 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
786 MI_COM_PHY_ADDR_MASK);
787 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
788 MI_COM_REG_ADDR_MASK);
789 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
791 tw32_f(MAC_MI_COM, frame_val);
793 loops = PHY_BUSY_LOOPS;
796 frame_val = tr32(MAC_MI_COM);
798 if ((frame_val & MI_COM_BUSY) == 0) {
800 frame_val = tr32(MAC_MI_COM);
808 *val = frame_val & MI_COM_DATA_MASK;
812 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
813 tw32_f(MAC_MI_MODE, tp->mi_mode);
820 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
826 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
827 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
830 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
832 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
836 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
837 MI_COM_PHY_ADDR_MASK);
838 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
839 MI_COM_REG_ADDR_MASK);
840 frame_val |= (val & MI_COM_DATA_MASK);
841 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
843 tw32_f(MAC_MI_COM, frame_val);
845 loops = PHY_BUSY_LOOPS;
848 frame_val = tr32(MAC_MI_COM);
849 if ((frame_val & MI_COM_BUSY) == 0) {
851 frame_val = tr32(MAC_MI_COM);
861 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
862 tw32_f(MAC_MI_MODE, tp->mi_mode);
869 static int tg3_bmcr_reset(struct tg3 *tp)
874 /* OK, reset it, and poll the BMCR_RESET bit until it
875 * clears or we time out.
877 phy_control = BMCR_RESET;
878 err = tg3_writephy(tp, MII_BMCR, phy_control);
884 err = tg3_readphy(tp, MII_BMCR, &phy_control);
888 if ((phy_control & BMCR_RESET) == 0) {
900 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
902 struct tg3 *tp = bp->priv;
905 spin_lock_bh(&tp->lock);
907 if (tg3_readphy(tp, reg, &val))
910 spin_unlock_bh(&tp->lock);
915 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
917 struct tg3 *tp = bp->priv;
920 spin_lock_bh(&tp->lock);
922 if (tg3_writephy(tp, reg, val))
925 spin_unlock_bh(&tp->lock);
930 static int tg3_mdio_reset(struct mii_bus *bp)
935 static void tg3_mdio_config_5785(struct tg3 *tp)
938 struct phy_device *phydev;
940 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
941 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
942 case TG3_PHY_ID_BCM50610:
943 val = MAC_PHYCFG2_50610_LED_MODES;
945 case TG3_PHY_ID_BCMAC131:
946 val = MAC_PHYCFG2_AC131_LED_MODES;
948 case TG3_PHY_ID_RTL8211C:
949 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
951 case TG3_PHY_ID_RTL8201E:
952 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
958 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
959 tw32(MAC_PHYCFG2, val);
961 val = tr32(MAC_PHYCFG1);
962 val &= ~(MAC_PHYCFG1_RGMII_INT |
963 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
964 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
965 tw32(MAC_PHYCFG1, val);
970 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
971 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
972 MAC_PHYCFG2_FMODE_MASK_MASK |
973 MAC_PHYCFG2_GMODE_MASK_MASK |
974 MAC_PHYCFG2_ACT_MASK_MASK |
975 MAC_PHYCFG2_QUAL_MASK_MASK |
976 MAC_PHYCFG2_INBAND_ENABLE;
978 tw32(MAC_PHYCFG2, val);
980 val = tr32(MAC_PHYCFG1);
981 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
982 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
983 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
984 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
985 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
986 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
987 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
989 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
990 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
991 tw32(MAC_PHYCFG1, val);
993 val = tr32(MAC_EXT_RGMII_MODE);
994 val &= ~(MAC_RGMII_MODE_RX_INT_B |
995 MAC_RGMII_MODE_RX_QUALITY |
996 MAC_RGMII_MODE_RX_ACTIVITY |
997 MAC_RGMII_MODE_RX_ENG_DET |
998 MAC_RGMII_MODE_TX_ENABLE |
999 MAC_RGMII_MODE_TX_LOWPWR |
1000 MAC_RGMII_MODE_TX_RESET);
1001 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1002 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1003 val |= MAC_RGMII_MODE_RX_INT_B |
1004 MAC_RGMII_MODE_RX_QUALITY |
1005 MAC_RGMII_MODE_RX_ACTIVITY |
1006 MAC_RGMII_MODE_RX_ENG_DET;
1007 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1008 val |= MAC_RGMII_MODE_TX_ENABLE |
1009 MAC_RGMII_MODE_TX_LOWPWR |
1010 MAC_RGMII_MODE_TX_RESET;
1012 tw32(MAC_EXT_RGMII_MODE, val);
1015 static void tg3_mdio_start(struct tg3 *tp)
1017 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1018 tw32_f(MAC_MI_MODE, tp->mi_mode);
1021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1022 u32 funcnum, is_serdes;
1024 funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1030 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1034 tp->phy_addr = TG3_PHY_MII_ADDR;
1036 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1038 tg3_mdio_config_5785(tp);
1041 static int tg3_mdio_init(struct tg3 *tp)
1045 struct phy_device *phydev;
1049 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1050 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1053 tp->mdio_bus = mdiobus_alloc();
1054 if (tp->mdio_bus == NULL)
1057 tp->mdio_bus->name = "tg3 mdio bus";
1058 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1059 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1060 tp->mdio_bus->priv = tp;
1061 tp->mdio_bus->parent = &tp->pdev->dev;
1062 tp->mdio_bus->read = &tg3_mdio_read;
1063 tp->mdio_bus->write = &tg3_mdio_write;
1064 tp->mdio_bus->reset = &tg3_mdio_reset;
1065 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1066 tp->mdio_bus->irq = &tp->mdio_irq[0];
1068 for (i = 0; i < PHY_MAX_ADDR; i++)
1069 tp->mdio_bus->irq[i] = PHY_POLL;
1071 /* The bus registration will look for all the PHYs on the mdio bus.
1072 * Unfortunately, it does not ensure the PHY is powered up before
1073 * accessing the PHY ID registers. A chip reset is the
1074 * quickest way to bring the device back to an operational state..
1076 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1079 i = mdiobus_register(tp->mdio_bus);
1081 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1083 mdiobus_free(tp->mdio_bus);
1087 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1089 if (!phydev || !phydev->drv) {
1090 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1091 mdiobus_unregister(tp->mdio_bus);
1092 mdiobus_free(tp->mdio_bus);
1096 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1097 case TG3_PHY_ID_BCM57780:
1098 phydev->interface = PHY_INTERFACE_MODE_GMII;
1100 case TG3_PHY_ID_BCM50610:
1101 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1102 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1103 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1104 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1105 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1106 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1108 case TG3_PHY_ID_RTL8211C:
1109 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1111 case TG3_PHY_ID_RTL8201E:
1112 case TG3_PHY_ID_BCMAC131:
1113 phydev->interface = PHY_INTERFACE_MODE_MII;
1114 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1118 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1120 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1121 tg3_mdio_config_5785(tp);
1126 static void tg3_mdio_fini(struct tg3 *tp)
1128 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1129 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1130 mdiobus_unregister(tp->mdio_bus);
1131 mdiobus_free(tp->mdio_bus);
1135 /* tp->lock is held. */
1136 static inline void tg3_generate_fw_event(struct tg3 *tp)
1140 val = tr32(GRC_RX_CPU_EVENT);
1141 val |= GRC_RX_CPU_DRIVER_EVENT;
1142 tw32_f(GRC_RX_CPU_EVENT, val);
1144 tp->last_event_jiffies = jiffies;
1147 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1149 /* tp->lock is held. */
1150 static void tg3_wait_for_event_ack(struct tg3 *tp)
1153 unsigned int delay_cnt;
1156 /* If enough time has passed, no wait is necessary. */
1157 time_remain = (long)(tp->last_event_jiffies + 1 +
1158 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1160 if (time_remain < 0)
1163 /* Check if we can shorten the wait time. */
1164 delay_cnt = jiffies_to_usecs(time_remain);
1165 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1166 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1167 delay_cnt = (delay_cnt >> 3) + 1;
1169 for (i = 0; i < delay_cnt; i++) {
1170 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1176 /* tp->lock is held. */
1177 static void tg3_ump_link_report(struct tg3 *tp)
1182 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1183 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1186 tg3_wait_for_event_ack(tp);
1188 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1190 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1193 if (!tg3_readphy(tp, MII_BMCR, ®))
1195 if (!tg3_readphy(tp, MII_BMSR, ®))
1196 val |= (reg & 0xffff);
1197 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1200 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1202 if (!tg3_readphy(tp, MII_LPA, ®))
1203 val |= (reg & 0xffff);
1204 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1207 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1208 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1210 if (!tg3_readphy(tp, MII_STAT1000, ®))
1211 val |= (reg & 0xffff);
1213 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1215 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1219 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1221 tg3_generate_fw_event(tp);
1224 static void tg3_link_report(struct tg3 *tp)
1226 if (!netif_carrier_ok(tp->dev)) {
1227 if (netif_msg_link(tp))
1228 printk(KERN_INFO PFX "%s: Link is down.\n",
1230 tg3_ump_link_report(tp);
1231 } else if (netif_msg_link(tp)) {
1232 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1234 (tp->link_config.active_speed == SPEED_1000 ?
1236 (tp->link_config.active_speed == SPEED_100 ?
1238 (tp->link_config.active_duplex == DUPLEX_FULL ?
1241 printk(KERN_INFO PFX
1242 "%s: Flow control is %s for TX and %s for RX.\n",
1244 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1246 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1248 tg3_ump_link_report(tp);
1252 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1256 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1257 miireg = ADVERTISE_PAUSE_CAP;
1258 else if (flow_ctrl & FLOW_CTRL_TX)
1259 miireg = ADVERTISE_PAUSE_ASYM;
1260 else if (flow_ctrl & FLOW_CTRL_RX)
1261 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1268 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1272 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1273 miireg = ADVERTISE_1000XPAUSE;
1274 else if (flow_ctrl & FLOW_CTRL_TX)
1275 miireg = ADVERTISE_1000XPSE_ASYM;
1276 else if (flow_ctrl & FLOW_CTRL_RX)
1277 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1284 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1288 if (lcladv & ADVERTISE_1000XPAUSE) {
1289 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1290 if (rmtadv & LPA_1000XPAUSE)
1291 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1292 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1295 if (rmtadv & LPA_1000XPAUSE)
1296 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1298 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1299 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1306 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1310 u32 old_rx_mode = tp->rx_mode;
1311 u32 old_tx_mode = tp->tx_mode;
1313 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1314 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1316 autoneg = tp->link_config.autoneg;
1318 if (autoneg == AUTONEG_ENABLE &&
1319 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1320 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1321 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1323 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1325 flowctrl = tp->link_config.flowctrl;
1327 tp->link_config.active_flowctrl = flowctrl;
1329 if (flowctrl & FLOW_CTRL_RX)
1330 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1332 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1334 if (old_rx_mode != tp->rx_mode)
1335 tw32_f(MAC_RX_MODE, tp->rx_mode);
1337 if (flowctrl & FLOW_CTRL_TX)
1338 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1340 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1342 if (old_tx_mode != tp->tx_mode)
1343 tw32_f(MAC_TX_MODE, tp->tx_mode);
1346 static void tg3_adjust_link(struct net_device *dev)
1348 u8 oldflowctrl, linkmesg = 0;
1349 u32 mac_mode, lcl_adv, rmt_adv;
1350 struct tg3 *tp = netdev_priv(dev);
1351 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1353 spin_lock_bh(&tp->lock);
1355 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1356 MAC_MODE_HALF_DUPLEX);
1358 oldflowctrl = tp->link_config.active_flowctrl;
1364 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1365 mac_mode |= MAC_MODE_PORT_MODE_MII;
1366 else if (phydev->speed == SPEED_1000 ||
1367 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1368 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1370 mac_mode |= MAC_MODE_PORT_MODE_MII;
1372 if (phydev->duplex == DUPLEX_HALF)
1373 mac_mode |= MAC_MODE_HALF_DUPLEX;
1375 lcl_adv = tg3_advert_flowctrl_1000T(
1376 tp->link_config.flowctrl);
1379 rmt_adv = LPA_PAUSE_CAP;
1380 if (phydev->asym_pause)
1381 rmt_adv |= LPA_PAUSE_ASYM;
1384 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1386 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1388 if (mac_mode != tp->mac_mode) {
1389 tp->mac_mode = mac_mode;
1390 tw32_f(MAC_MODE, tp->mac_mode);
1394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1395 if (phydev->speed == SPEED_10)
1397 MAC_MI_STAT_10MBPS_MODE |
1398 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1400 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1403 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1404 tw32(MAC_TX_LENGTHS,
1405 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1406 (6 << TX_LENGTHS_IPG_SHIFT) |
1407 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1409 tw32(MAC_TX_LENGTHS,
1410 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1411 (6 << TX_LENGTHS_IPG_SHIFT) |
1412 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1414 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1415 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1416 phydev->speed != tp->link_config.active_speed ||
1417 phydev->duplex != tp->link_config.active_duplex ||
1418 oldflowctrl != tp->link_config.active_flowctrl)
1421 tp->link_config.active_speed = phydev->speed;
1422 tp->link_config.active_duplex = phydev->duplex;
1424 spin_unlock_bh(&tp->lock);
1427 tg3_link_report(tp);
1430 static int tg3_phy_init(struct tg3 *tp)
1432 struct phy_device *phydev;
1434 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1437 /* Bring the PHY back to a known state. */
1440 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1442 /* Attach the MAC to the PHY. */
1443 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1444 phydev->dev_flags, phydev->interface);
1445 if (IS_ERR(phydev)) {
1446 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1447 return PTR_ERR(phydev);
1450 /* Mask with MAC supported features. */
1451 switch (phydev->interface) {
1452 case PHY_INTERFACE_MODE_GMII:
1453 case PHY_INTERFACE_MODE_RGMII:
1454 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1455 phydev->supported &= (PHY_GBIT_FEATURES |
1457 SUPPORTED_Asym_Pause);
1461 case PHY_INTERFACE_MODE_MII:
1462 phydev->supported &= (PHY_BASIC_FEATURES |
1464 SUPPORTED_Asym_Pause);
1467 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1471 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1473 phydev->advertising = phydev->supported;
1478 static void tg3_phy_start(struct tg3 *tp)
1480 struct phy_device *phydev;
1482 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1485 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1487 if (tp->link_config.phy_is_low_power) {
1488 tp->link_config.phy_is_low_power = 0;
1489 phydev->speed = tp->link_config.orig_speed;
1490 phydev->duplex = tp->link_config.orig_duplex;
1491 phydev->autoneg = tp->link_config.orig_autoneg;
1492 phydev->advertising = tp->link_config.orig_advertising;
1497 phy_start_aneg(phydev);
1500 static void tg3_phy_stop(struct tg3 *tp)
1502 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1505 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1508 static void tg3_phy_fini(struct tg3 *tp)
1510 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1511 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1512 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1516 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1518 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1519 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1522 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1526 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1529 tg3_writephy(tp, MII_TG3_FET_TEST,
1530 phytest | MII_TG3_FET_SHADOW_EN);
1531 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1533 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1535 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1536 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1538 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1542 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1546 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1549 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1550 tg3_phy_fet_toggle_apd(tp, enable);
1554 reg = MII_TG3_MISC_SHDW_WREN |
1555 MII_TG3_MISC_SHDW_SCR5_SEL |
1556 MII_TG3_MISC_SHDW_SCR5_LPED |
1557 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1558 MII_TG3_MISC_SHDW_SCR5_SDTL |
1559 MII_TG3_MISC_SHDW_SCR5_C125OE;
1560 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1561 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1563 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1566 reg = MII_TG3_MISC_SHDW_WREN |
1567 MII_TG3_MISC_SHDW_APD_SEL |
1568 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1570 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1572 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1575 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1579 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1580 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1583 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1586 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1587 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1589 tg3_writephy(tp, MII_TG3_FET_TEST,
1590 ephy | MII_TG3_FET_SHADOW_EN);
1591 if (!tg3_readphy(tp, reg, &phy)) {
1593 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1595 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1596 tg3_writephy(tp, reg, phy);
1598 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1601 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1602 MII_TG3_AUXCTL_SHDWSEL_MISC;
1603 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1604 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1606 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1608 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1609 phy |= MII_TG3_AUXCTL_MISC_WREN;
1610 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1615 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1619 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1622 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1623 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1624 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1625 (val | (1 << 15) | (1 << 4)));
1628 static void tg3_phy_apply_otp(struct tg3 *tp)
1637 /* Enable SM_DSP clock and tx 6dB coding. */
1638 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1639 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1640 MII_TG3_AUXCTL_ACTL_TX_6DB;
1641 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1643 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1644 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1645 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1647 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1648 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1649 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1651 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1652 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1653 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1655 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1656 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1658 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1659 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1661 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1662 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1663 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1665 /* Turn off SM_DSP clock. */
1666 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1667 MII_TG3_AUXCTL_ACTL_TX_6DB;
1668 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1671 static int tg3_wait_macro_done(struct tg3 *tp)
1678 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1679 if ((tmp32 & 0x1000) == 0)
1689 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1691 static const u32 test_pat[4][6] = {
1692 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1693 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1694 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1695 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1699 for (chan = 0; chan < 4; chan++) {
1702 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1703 (chan * 0x2000) | 0x0200);
1704 tg3_writephy(tp, 0x16, 0x0002);
1706 for (i = 0; i < 6; i++)
1707 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1710 tg3_writephy(tp, 0x16, 0x0202);
1711 if (tg3_wait_macro_done(tp)) {
1716 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1717 (chan * 0x2000) | 0x0200);
1718 tg3_writephy(tp, 0x16, 0x0082);
1719 if (tg3_wait_macro_done(tp)) {
1724 tg3_writephy(tp, 0x16, 0x0802);
1725 if (tg3_wait_macro_done(tp)) {
1730 for (i = 0; i < 6; i += 2) {
1733 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1734 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1735 tg3_wait_macro_done(tp)) {
1741 if (low != test_pat[chan][i] ||
1742 high != test_pat[chan][i+1]) {
1743 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1744 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1745 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1755 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1759 for (chan = 0; chan < 4; chan++) {
1762 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1763 (chan * 0x2000) | 0x0200);
1764 tg3_writephy(tp, 0x16, 0x0002);
1765 for (i = 0; i < 6; i++)
1766 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1767 tg3_writephy(tp, 0x16, 0x0202);
1768 if (tg3_wait_macro_done(tp))
1775 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1777 u32 reg32, phy9_orig;
1778 int retries, do_phy_reset, err;
1784 err = tg3_bmcr_reset(tp);
1790 /* Disable transmitter and interrupt. */
1791 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1795 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1797 /* Set full-duplex, 1000 mbps. */
1798 tg3_writephy(tp, MII_BMCR,
1799 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1801 /* Set to master mode. */
1802 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1805 tg3_writephy(tp, MII_TG3_CTRL,
1806 (MII_TG3_CTRL_AS_MASTER |
1807 MII_TG3_CTRL_ENABLE_AS_MASTER));
1809 /* Enable SM_DSP_CLOCK and 6dB. */
1810 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1812 /* Block the PHY control access. */
1813 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1814 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1816 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1819 } while (--retries);
1821 err = tg3_phy_reset_chanpat(tp);
1825 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1826 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1828 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1829 tg3_writephy(tp, 0x16, 0x0000);
1831 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1832 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1833 /* Set Extended packet length bit for jumbo frames */
1834 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1837 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1840 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1842 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1844 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1851 /* This will reset the tigon3 PHY if there is no valid
1852 * link unless the FORCE argument is non-zero.
1854 static int tg3_phy_reset(struct tg3 *tp)
1860 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1863 val = tr32(GRC_MISC_CFG);
1864 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1867 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1868 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1872 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1873 netif_carrier_off(tp->dev);
1874 tg3_link_report(tp);
1877 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1880 err = tg3_phy_reset_5703_4_5(tp);
1887 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1888 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1889 cpmuctrl = tr32(TG3_CPMU_CTRL);
1890 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1892 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1895 err = tg3_bmcr_reset(tp);
1899 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1902 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1903 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1905 tw32(TG3_CPMU_CTRL, cpmuctrl);
1908 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1909 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1912 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1913 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1914 CPMU_LSPD_1000MB_MACCLK_12_5) {
1915 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1917 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1921 tg3_phy_apply_otp(tp);
1923 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1924 tg3_phy_toggle_apd(tp, true);
1926 tg3_phy_toggle_apd(tp, false);
1929 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1930 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1931 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1932 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1933 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1934 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1935 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1937 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1938 tg3_writephy(tp, 0x1c, 0x8d68);
1939 tg3_writephy(tp, 0x1c, 0x8d68);
1941 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1942 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1943 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1944 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1945 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1946 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1947 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1948 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1949 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1951 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1952 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1953 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1954 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1955 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1956 tg3_writephy(tp, MII_TG3_TEST1,
1957 MII_TG3_TEST1_TRIM_EN | 0x4);
1959 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1960 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1962 /* Set Extended packet length bit (bit 14) on all chips that */
1963 /* support jumbo frames */
1964 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1965 /* Cannot do read-modify-write on 5401 */
1966 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1967 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1970 /* Set bit 14 with read-modify-write to preserve other bits */
1971 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1972 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1973 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1976 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1977 * jumbo frames transmission.
1979 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1982 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1983 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1984 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1987 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1988 /* adjust output voltage */
1989 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
1992 tg3_phy_toggle_automdix(tp, 1);
1993 tg3_phy_set_wirespeed(tp);
1997 static void tg3_frob_aux_power(struct tg3 *tp)
1999 struct tg3 *tp_peer = tp;
2001 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
2004 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2005 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2006 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2007 struct net_device *dev_peer;
2009 dev_peer = pci_get_drvdata(tp->pdev_peer);
2010 /* remove_one() may have been run on the peer. */
2014 tp_peer = netdev_priv(dev_peer);
2017 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2018 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2019 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2020 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2022 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2023 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2024 (GRC_LCLCTRL_GPIO_OE0 |
2025 GRC_LCLCTRL_GPIO_OE1 |
2026 GRC_LCLCTRL_GPIO_OE2 |
2027 GRC_LCLCTRL_GPIO_OUTPUT0 |
2028 GRC_LCLCTRL_GPIO_OUTPUT1),
2030 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2031 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2032 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2033 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2034 GRC_LCLCTRL_GPIO_OE1 |
2035 GRC_LCLCTRL_GPIO_OE2 |
2036 GRC_LCLCTRL_GPIO_OUTPUT0 |
2037 GRC_LCLCTRL_GPIO_OUTPUT1 |
2039 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2041 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2042 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2044 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2045 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2048 u32 grc_local_ctrl = 0;
2050 if (tp_peer != tp &&
2051 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2054 /* Workaround to prevent overdrawing Amps. */
2055 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2057 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2058 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2059 grc_local_ctrl, 100);
2062 /* On 5753 and variants, GPIO2 cannot be used. */
2063 no_gpio2 = tp->nic_sram_data_cfg &
2064 NIC_SRAM_DATA_CFG_NO_GPIO2;
2066 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2067 GRC_LCLCTRL_GPIO_OE1 |
2068 GRC_LCLCTRL_GPIO_OE2 |
2069 GRC_LCLCTRL_GPIO_OUTPUT1 |
2070 GRC_LCLCTRL_GPIO_OUTPUT2;
2072 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2073 GRC_LCLCTRL_GPIO_OUTPUT2);
2075 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2076 grc_local_ctrl, 100);
2078 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2080 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2081 grc_local_ctrl, 100);
2084 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2085 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2086 grc_local_ctrl, 100);
2090 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2091 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2092 if (tp_peer != tp &&
2093 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2096 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2097 (GRC_LCLCTRL_GPIO_OE1 |
2098 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2100 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2101 GRC_LCLCTRL_GPIO_OE1, 100);
2103 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2104 (GRC_LCLCTRL_GPIO_OE1 |
2105 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2110 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2112 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2114 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2115 if (speed != SPEED_10)
2117 } else if (speed == SPEED_10)
2123 static int tg3_setup_phy(struct tg3 *, int);
2125 #define RESET_KIND_SHUTDOWN 0
2126 #define RESET_KIND_INIT 1
2127 #define RESET_KIND_SUSPEND 2
2129 static void tg3_write_sig_post_reset(struct tg3 *, int);
2130 static int tg3_halt_cpu(struct tg3 *, u32);
2132 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2136 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2138 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2139 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2142 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2143 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2144 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2151 val = tr32(GRC_MISC_CFG);
2152 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2155 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2157 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2160 tg3_writephy(tp, MII_ADVERTISE, 0);
2161 tg3_writephy(tp, MII_BMCR,
2162 BMCR_ANENABLE | BMCR_ANRESTART);
2164 tg3_writephy(tp, MII_TG3_FET_TEST,
2165 phytest | MII_TG3_FET_SHADOW_EN);
2166 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2167 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2169 MII_TG3_FET_SHDW_AUXMODE4,
2172 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2175 } else if (do_low_power) {
2176 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2177 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2179 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2180 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2181 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2182 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2183 MII_TG3_AUXCTL_PCTL_VREG_11V);
2186 /* The PHY should not be powered down on some chips because
2189 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2190 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2191 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2192 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2195 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2196 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2197 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2198 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2199 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2200 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2203 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2206 /* tp->lock is held. */
2207 static int tg3_nvram_lock(struct tg3 *tp)
2209 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2212 if (tp->nvram_lock_cnt == 0) {
2213 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2214 for (i = 0; i < 8000; i++) {
2215 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2220 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2224 tp->nvram_lock_cnt++;
2229 /* tp->lock is held. */
2230 static void tg3_nvram_unlock(struct tg3 *tp)
2232 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2233 if (tp->nvram_lock_cnt > 0)
2234 tp->nvram_lock_cnt--;
2235 if (tp->nvram_lock_cnt == 0)
2236 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2240 /* tp->lock is held. */
2241 static void tg3_enable_nvram_access(struct tg3 *tp)
2243 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2244 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2245 u32 nvaccess = tr32(NVRAM_ACCESS);
2247 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2251 /* tp->lock is held. */
2252 static void tg3_disable_nvram_access(struct tg3 *tp)
2254 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2255 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2256 u32 nvaccess = tr32(NVRAM_ACCESS);
2258 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2262 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2263 u32 offset, u32 *val)
2268 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2271 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2272 EEPROM_ADDR_DEVID_MASK |
2274 tw32(GRC_EEPROM_ADDR,
2276 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2277 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2278 EEPROM_ADDR_ADDR_MASK) |
2279 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2281 for (i = 0; i < 1000; i++) {
2282 tmp = tr32(GRC_EEPROM_ADDR);
2284 if (tmp & EEPROM_ADDR_COMPLETE)
2288 if (!(tmp & EEPROM_ADDR_COMPLETE))
2291 tmp = tr32(GRC_EEPROM_DATA);
2294 * The data will always be opposite the native endian
2295 * format. Perform a blind byteswap to compensate.
2302 #define NVRAM_CMD_TIMEOUT 10000
2304 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2308 tw32(NVRAM_CMD, nvram_cmd);
2309 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2311 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2317 if (i == NVRAM_CMD_TIMEOUT)
2323 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2325 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2326 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2327 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2328 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2329 (tp->nvram_jedecnum == JEDEC_ATMEL))
2331 addr = ((addr / tp->nvram_pagesize) <<
2332 ATMEL_AT45DB0X1B_PAGE_POS) +
2333 (addr % tp->nvram_pagesize);
2338 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2340 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2341 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2342 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2343 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2344 (tp->nvram_jedecnum == JEDEC_ATMEL))
2346 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2347 tp->nvram_pagesize) +
2348 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2353 /* NOTE: Data read in from NVRAM is byteswapped according to
2354 * the byteswapping settings for all other register accesses.
2355 * tg3 devices are BE devices, so on a BE machine, the data
2356 * returned will be exactly as it is seen in NVRAM. On a LE
2357 * machine, the 32-bit value will be byteswapped.
2359 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2363 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2364 return tg3_nvram_read_using_eeprom(tp, offset, val);
2366 offset = tg3_nvram_phys_addr(tp, offset);
2368 if (offset > NVRAM_ADDR_MSK)
2371 ret = tg3_nvram_lock(tp);
2375 tg3_enable_nvram_access(tp);
2377 tw32(NVRAM_ADDR, offset);
2378 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2379 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2382 *val = tr32(NVRAM_RDDATA);
2384 tg3_disable_nvram_access(tp);
2386 tg3_nvram_unlock(tp);
2391 /* Ensures NVRAM data is in bytestream format. */
2392 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2395 int res = tg3_nvram_read(tp, offset, &v);
2397 *val = cpu_to_be32(v);
2401 /* tp->lock is held. */
2402 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2404 u32 addr_high, addr_low;
2407 addr_high = ((tp->dev->dev_addr[0] << 8) |
2408 tp->dev->dev_addr[1]);
2409 addr_low = ((tp->dev->dev_addr[2] << 24) |
2410 (tp->dev->dev_addr[3] << 16) |
2411 (tp->dev->dev_addr[4] << 8) |
2412 (tp->dev->dev_addr[5] << 0));
2413 for (i = 0; i < 4; i++) {
2414 if (i == 1 && skip_mac_1)
2416 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2417 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2420 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2422 for (i = 0; i < 12; i++) {
2423 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2424 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2428 addr_high = (tp->dev->dev_addr[0] +
2429 tp->dev->dev_addr[1] +
2430 tp->dev->dev_addr[2] +
2431 tp->dev->dev_addr[3] +
2432 tp->dev->dev_addr[4] +
2433 tp->dev->dev_addr[5]) &
2434 TX_BACKOFF_SEED_MASK;
2435 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2438 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2441 bool device_should_wake, do_low_power;
2443 /* Make sure register accesses (indirect or otherwise)
2444 * will function correctly.
2446 pci_write_config_dword(tp->pdev,
2447 TG3PCI_MISC_HOST_CTRL,
2448 tp->misc_host_ctrl);
2452 pci_enable_wake(tp->pdev, state, false);
2453 pci_set_power_state(tp->pdev, PCI_D0);
2455 /* Switch out of Vaux if it is a NIC */
2456 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2457 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2467 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2468 tp->dev->name, state);
2472 /* Restore the CLKREQ setting. */
2473 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2476 pci_read_config_word(tp->pdev,
2477 tp->pcie_cap + PCI_EXP_LNKCTL,
2479 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2480 pci_write_config_word(tp->pdev,
2481 tp->pcie_cap + PCI_EXP_LNKCTL,
2485 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2486 tw32(TG3PCI_MISC_HOST_CTRL,
2487 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2489 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2490 device_may_wakeup(&tp->pdev->dev) &&
2491 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2493 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2494 do_low_power = false;
2495 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2496 !tp->link_config.phy_is_low_power) {
2497 struct phy_device *phydev;
2498 u32 phyid, advertising;
2500 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2502 tp->link_config.phy_is_low_power = 1;
2504 tp->link_config.orig_speed = phydev->speed;
2505 tp->link_config.orig_duplex = phydev->duplex;
2506 tp->link_config.orig_autoneg = phydev->autoneg;
2507 tp->link_config.orig_advertising = phydev->advertising;
2509 advertising = ADVERTISED_TP |
2511 ADVERTISED_Autoneg |
2512 ADVERTISED_10baseT_Half;
2514 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2515 device_should_wake) {
2516 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2518 ADVERTISED_100baseT_Half |
2519 ADVERTISED_100baseT_Full |
2520 ADVERTISED_10baseT_Full;
2522 advertising |= ADVERTISED_10baseT_Full;
2525 phydev->advertising = advertising;
2527 phy_start_aneg(phydev);
2529 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2530 if (phyid != TG3_PHY_ID_BCMAC131) {
2531 phyid &= TG3_PHY_OUI_MASK;
2532 if (phyid == TG3_PHY_OUI_1 ||
2533 phyid == TG3_PHY_OUI_2 ||
2534 phyid == TG3_PHY_OUI_3)
2535 do_low_power = true;
2539 do_low_power = true;
2541 if (tp->link_config.phy_is_low_power == 0) {
2542 tp->link_config.phy_is_low_power = 1;
2543 tp->link_config.orig_speed = tp->link_config.speed;
2544 tp->link_config.orig_duplex = tp->link_config.duplex;
2545 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2548 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2549 tp->link_config.speed = SPEED_10;
2550 tp->link_config.duplex = DUPLEX_HALF;
2551 tp->link_config.autoneg = AUTONEG_ENABLE;
2552 tg3_setup_phy(tp, 0);
2556 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2559 val = tr32(GRC_VCPU_EXT_CTRL);
2560 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2561 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2565 for (i = 0; i < 200; i++) {
2566 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2567 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2572 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2573 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2574 WOL_DRV_STATE_SHUTDOWN |
2578 if (device_should_wake) {
2581 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2583 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2587 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2588 mac_mode = MAC_MODE_PORT_MODE_GMII;
2590 mac_mode = MAC_MODE_PORT_MODE_MII;
2592 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2593 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2595 u32 speed = (tp->tg3_flags &
2596 TG3_FLAG_WOL_SPEED_100MB) ?
2597 SPEED_100 : SPEED_10;
2598 if (tg3_5700_link_polarity(tp, speed))
2599 mac_mode |= MAC_MODE_LINK_POLARITY;
2601 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2604 mac_mode = MAC_MODE_PORT_MODE_TBI;
2607 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2608 tw32(MAC_LED_CTRL, tp->led_ctrl);
2610 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2611 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2612 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2613 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2614 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2615 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2617 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2618 mac_mode |= tp->mac_mode &
2619 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2620 if (mac_mode & MAC_MODE_APE_TX_EN)
2621 mac_mode |= MAC_MODE_TDE_ENABLE;
2624 tw32_f(MAC_MODE, mac_mode);
2627 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2631 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2632 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2633 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2636 base_val = tp->pci_clock_ctrl;
2637 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2638 CLOCK_CTRL_TXCLK_DISABLE);
2640 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2641 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2642 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2643 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2644 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2646 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2647 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2648 u32 newbits1, newbits2;
2650 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2651 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2652 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2653 CLOCK_CTRL_TXCLK_DISABLE |
2655 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2656 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2657 newbits1 = CLOCK_CTRL_625_CORE;
2658 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2660 newbits1 = CLOCK_CTRL_ALTCLK;
2661 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2664 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2667 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2670 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2673 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2675 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2676 CLOCK_CTRL_TXCLK_DISABLE |
2677 CLOCK_CTRL_44MHZ_CORE);
2679 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2682 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2683 tp->pci_clock_ctrl | newbits3, 40);
2687 if (!(device_should_wake) &&
2688 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2689 tg3_power_down_phy(tp, do_low_power);
2691 tg3_frob_aux_power(tp);
2693 /* Workaround for unstable PLL clock */
2694 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2695 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2696 u32 val = tr32(0x7d00);
2698 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2700 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2703 err = tg3_nvram_lock(tp);
2704 tg3_halt_cpu(tp, RX_CPU_BASE);
2706 tg3_nvram_unlock(tp);
2710 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2712 if (device_should_wake)
2713 pci_enable_wake(tp->pdev, state, true);
2715 /* Finally, set the new power state. */
2716 pci_set_power_state(tp->pdev, state);
2721 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2723 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2724 case MII_TG3_AUX_STAT_10HALF:
2726 *duplex = DUPLEX_HALF;
2729 case MII_TG3_AUX_STAT_10FULL:
2731 *duplex = DUPLEX_FULL;
2734 case MII_TG3_AUX_STAT_100HALF:
2736 *duplex = DUPLEX_HALF;
2739 case MII_TG3_AUX_STAT_100FULL:
2741 *duplex = DUPLEX_FULL;
2744 case MII_TG3_AUX_STAT_1000HALF:
2745 *speed = SPEED_1000;
2746 *duplex = DUPLEX_HALF;
2749 case MII_TG3_AUX_STAT_1000FULL:
2750 *speed = SPEED_1000;
2751 *duplex = DUPLEX_FULL;
2755 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2756 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2758 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2762 *speed = SPEED_INVALID;
2763 *duplex = DUPLEX_INVALID;
2768 static void tg3_phy_copper_begin(struct tg3 *tp)
2773 if (tp->link_config.phy_is_low_power) {
2774 /* Entering low power mode. Disable gigabit and
2775 * 100baseT advertisements.
2777 tg3_writephy(tp, MII_TG3_CTRL, 0);
2779 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2780 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2781 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2782 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2784 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2785 } else if (tp->link_config.speed == SPEED_INVALID) {
2786 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2787 tp->link_config.advertising &=
2788 ~(ADVERTISED_1000baseT_Half |
2789 ADVERTISED_1000baseT_Full);
2791 new_adv = ADVERTISE_CSMA;
2792 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2793 new_adv |= ADVERTISE_10HALF;
2794 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2795 new_adv |= ADVERTISE_10FULL;
2796 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2797 new_adv |= ADVERTISE_100HALF;
2798 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2799 new_adv |= ADVERTISE_100FULL;
2801 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2803 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2805 if (tp->link_config.advertising &
2806 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2808 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2809 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2810 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2811 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2812 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2813 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2814 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2815 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2816 MII_TG3_CTRL_ENABLE_AS_MASTER);
2817 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2819 tg3_writephy(tp, MII_TG3_CTRL, 0);
2822 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2823 new_adv |= ADVERTISE_CSMA;
2825 /* Asking for a specific link mode. */
2826 if (tp->link_config.speed == SPEED_1000) {
2827 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2829 if (tp->link_config.duplex == DUPLEX_FULL)
2830 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2832 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2833 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2834 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2835 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2836 MII_TG3_CTRL_ENABLE_AS_MASTER);
2838 if (tp->link_config.speed == SPEED_100) {
2839 if (tp->link_config.duplex == DUPLEX_FULL)
2840 new_adv |= ADVERTISE_100FULL;
2842 new_adv |= ADVERTISE_100HALF;
2844 if (tp->link_config.duplex == DUPLEX_FULL)
2845 new_adv |= ADVERTISE_10FULL;
2847 new_adv |= ADVERTISE_10HALF;
2849 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2854 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2857 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2858 tp->link_config.speed != SPEED_INVALID) {
2859 u32 bmcr, orig_bmcr;
2861 tp->link_config.active_speed = tp->link_config.speed;
2862 tp->link_config.active_duplex = tp->link_config.duplex;
2865 switch (tp->link_config.speed) {
2871 bmcr |= BMCR_SPEED100;
2875 bmcr |= TG3_BMCR_SPEED1000;
2879 if (tp->link_config.duplex == DUPLEX_FULL)
2880 bmcr |= BMCR_FULLDPLX;
2882 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2883 (bmcr != orig_bmcr)) {
2884 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2885 for (i = 0; i < 1500; i++) {
2889 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2890 tg3_readphy(tp, MII_BMSR, &tmp))
2892 if (!(tmp & BMSR_LSTATUS)) {
2897 tg3_writephy(tp, MII_BMCR, bmcr);
2901 tg3_writephy(tp, MII_BMCR,
2902 BMCR_ANENABLE | BMCR_ANRESTART);
2906 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2910 /* Turn off tap power management. */
2911 /* Set Extended packet length bit */
2912 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2914 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2915 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2917 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2918 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2920 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2921 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2923 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2924 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2926 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2927 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2934 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2936 u32 adv_reg, all_mask = 0;
2938 if (mask & ADVERTISED_10baseT_Half)
2939 all_mask |= ADVERTISE_10HALF;
2940 if (mask & ADVERTISED_10baseT_Full)
2941 all_mask |= ADVERTISE_10FULL;
2942 if (mask & ADVERTISED_100baseT_Half)
2943 all_mask |= ADVERTISE_100HALF;
2944 if (mask & ADVERTISED_100baseT_Full)
2945 all_mask |= ADVERTISE_100FULL;
2947 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2950 if ((adv_reg & all_mask) != all_mask)
2952 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2956 if (mask & ADVERTISED_1000baseT_Half)
2957 all_mask |= ADVERTISE_1000HALF;
2958 if (mask & ADVERTISED_1000baseT_Full)
2959 all_mask |= ADVERTISE_1000FULL;
2961 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2964 if ((tg3_ctrl & all_mask) != all_mask)
2970 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2974 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2977 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2978 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2980 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2981 if (curadv != reqadv)
2984 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2985 tg3_readphy(tp, MII_LPA, rmtadv);
2987 /* Reprogram the advertisement register, even if it
2988 * does not affect the current link. If the link
2989 * gets renegotiated in the future, we can save an
2990 * additional renegotiation cycle by advertising
2991 * it correctly in the first place.
2993 if (curadv != reqadv) {
2994 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2995 ADVERTISE_PAUSE_ASYM);
2996 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3003 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3005 int current_link_up;
3007 u32 lcl_adv, rmt_adv;
3015 (MAC_STATUS_SYNC_CHANGED |
3016 MAC_STATUS_CFG_CHANGED |
3017 MAC_STATUS_MI_COMPLETION |
3018 MAC_STATUS_LNKSTATE_CHANGED));
3021 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3023 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3027 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3029 /* Some third-party PHYs need to be reset on link going
3032 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3035 netif_carrier_ok(tp->dev)) {
3036 tg3_readphy(tp, MII_BMSR, &bmsr);
3037 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3038 !(bmsr & BMSR_LSTATUS))
3044 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
3045 tg3_readphy(tp, MII_BMSR, &bmsr);
3046 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3047 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3050 if (!(bmsr & BMSR_LSTATUS)) {
3051 err = tg3_init_5401phy_dsp(tp);
3055 tg3_readphy(tp, MII_BMSR, &bmsr);
3056 for (i = 0; i < 1000; i++) {
3058 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3059 (bmsr & BMSR_LSTATUS)) {
3065 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3066 !(bmsr & BMSR_LSTATUS) &&
3067 tp->link_config.active_speed == SPEED_1000) {
3068 err = tg3_phy_reset(tp);
3070 err = tg3_init_5401phy_dsp(tp);
3075 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3076 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3077 /* 5701 {A0,B0} CRC bug workaround */
3078 tg3_writephy(tp, 0x15, 0x0a75);
3079 tg3_writephy(tp, 0x1c, 0x8c68);
3080 tg3_writephy(tp, 0x1c, 0x8d68);
3081 tg3_writephy(tp, 0x1c, 0x8c68);
3084 /* Clear pending interrupts... */
3085 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3086 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3088 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3089 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3090 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3091 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3093 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3094 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3095 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3096 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3097 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3099 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3102 current_link_up = 0;
3103 current_speed = SPEED_INVALID;
3104 current_duplex = DUPLEX_INVALID;
3106 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3109 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3110 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3111 if (!(val & (1 << 10))) {
3113 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3119 for (i = 0; i < 100; i++) {
3120 tg3_readphy(tp, MII_BMSR, &bmsr);
3121 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3122 (bmsr & BMSR_LSTATUS))
3127 if (bmsr & BMSR_LSTATUS) {
3130 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3131 for (i = 0; i < 2000; i++) {
3133 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3138 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3143 for (i = 0; i < 200; i++) {
3144 tg3_readphy(tp, MII_BMCR, &bmcr);
3145 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3147 if (bmcr && bmcr != 0x7fff)
3155 tp->link_config.active_speed = current_speed;
3156 tp->link_config.active_duplex = current_duplex;
3158 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3159 if ((bmcr & BMCR_ANENABLE) &&
3160 tg3_copper_is_advertising_all(tp,
3161 tp->link_config.advertising)) {
3162 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3164 current_link_up = 1;
3167 if (!(bmcr & BMCR_ANENABLE) &&
3168 tp->link_config.speed == current_speed &&
3169 tp->link_config.duplex == current_duplex &&
3170 tp->link_config.flowctrl ==
3171 tp->link_config.active_flowctrl) {
3172 current_link_up = 1;
3176 if (current_link_up == 1 &&
3177 tp->link_config.active_duplex == DUPLEX_FULL)
3178 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3182 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3185 tg3_phy_copper_begin(tp);
3187 tg3_readphy(tp, MII_BMSR, &tmp);
3188 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3189 (tmp & BMSR_LSTATUS))
3190 current_link_up = 1;
3193 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3194 if (current_link_up == 1) {
3195 if (tp->link_config.active_speed == SPEED_100 ||
3196 tp->link_config.active_speed == SPEED_10)
3197 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3199 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3200 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3201 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3203 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3205 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3206 if (tp->link_config.active_duplex == DUPLEX_HALF)
3207 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3209 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3210 if (current_link_up == 1 &&
3211 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3212 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3214 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3217 /* ??? Without this setting Netgear GA302T PHY does not
3218 * ??? send/receive packets...
3220 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3221 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3222 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3223 tw32_f(MAC_MI_MODE, tp->mi_mode);
3227 tw32_f(MAC_MODE, tp->mac_mode);
3230 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3231 /* Polled via timer. */
3232 tw32_f(MAC_EVENT, 0);
3234 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3238 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3239 current_link_up == 1 &&
3240 tp->link_config.active_speed == SPEED_1000 &&
3241 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3242 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3245 (MAC_STATUS_SYNC_CHANGED |
3246 MAC_STATUS_CFG_CHANGED));
3249 NIC_SRAM_FIRMWARE_MBOX,
3250 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3253 /* Prevent send BD corruption. */
3254 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3255 u16 oldlnkctl, newlnkctl;
3257 pci_read_config_word(tp->pdev,
3258 tp->pcie_cap + PCI_EXP_LNKCTL,
3260 if (tp->link_config.active_speed == SPEED_100 ||
3261 tp->link_config.active_speed == SPEED_10)
3262 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3264 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3265 if (newlnkctl != oldlnkctl)
3266 pci_write_config_word(tp->pdev,
3267 tp->pcie_cap + PCI_EXP_LNKCTL,
3271 if (current_link_up != netif_carrier_ok(tp->dev)) {
3272 if (current_link_up)
3273 netif_carrier_on(tp->dev);
3275 netif_carrier_off(tp->dev);
3276 tg3_link_report(tp);
3282 struct tg3_fiber_aneginfo {
3284 #define ANEG_STATE_UNKNOWN 0
3285 #define ANEG_STATE_AN_ENABLE 1
3286 #define ANEG_STATE_RESTART_INIT 2
3287 #define ANEG_STATE_RESTART 3
3288 #define ANEG_STATE_DISABLE_LINK_OK 4
3289 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3290 #define ANEG_STATE_ABILITY_DETECT 6
3291 #define ANEG_STATE_ACK_DETECT_INIT 7
3292 #define ANEG_STATE_ACK_DETECT 8
3293 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3294 #define ANEG_STATE_COMPLETE_ACK 10
3295 #define ANEG_STATE_IDLE_DETECT_INIT 11
3296 #define ANEG_STATE_IDLE_DETECT 12
3297 #define ANEG_STATE_LINK_OK 13
3298 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3299 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3302 #define MR_AN_ENABLE 0x00000001
3303 #define MR_RESTART_AN 0x00000002
3304 #define MR_AN_COMPLETE 0x00000004
3305 #define MR_PAGE_RX 0x00000008
3306 #define MR_NP_LOADED 0x00000010
3307 #define MR_TOGGLE_TX 0x00000020
3308 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3309 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3310 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3311 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3312 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3313 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3314 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3315 #define MR_TOGGLE_RX 0x00002000
3316 #define MR_NP_RX 0x00004000
3318 #define MR_LINK_OK 0x80000000
3320 unsigned long link_time, cur_time;
3322 u32 ability_match_cfg;
3323 int ability_match_count;
3325 char ability_match, idle_match, ack_match;
3327 u32 txconfig, rxconfig;
3328 #define ANEG_CFG_NP 0x00000080
3329 #define ANEG_CFG_ACK 0x00000040
3330 #define ANEG_CFG_RF2 0x00000020
3331 #define ANEG_CFG_RF1 0x00000010
3332 #define ANEG_CFG_PS2 0x00000001
3333 #define ANEG_CFG_PS1 0x00008000
3334 #define ANEG_CFG_HD 0x00004000
3335 #define ANEG_CFG_FD 0x00002000
3336 #define ANEG_CFG_INVAL 0x00001f06
3341 #define ANEG_TIMER_ENAB 2
3342 #define ANEG_FAILED -1
3344 #define ANEG_STATE_SETTLE_TIME 10000
3346 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3347 struct tg3_fiber_aneginfo *ap)
3350 unsigned long delta;
3354 if (ap->state == ANEG_STATE_UNKNOWN) {
3358 ap->ability_match_cfg = 0;
3359 ap->ability_match_count = 0;
3360 ap->ability_match = 0;
3366 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3367 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3369 if (rx_cfg_reg != ap->ability_match_cfg) {
3370 ap->ability_match_cfg = rx_cfg_reg;
3371 ap->ability_match = 0;
3372 ap->ability_match_count = 0;
3374 if (++ap->ability_match_count > 1) {
3375 ap->ability_match = 1;
3376 ap->ability_match_cfg = rx_cfg_reg;
3379 if (rx_cfg_reg & ANEG_CFG_ACK)
3387 ap->ability_match_cfg = 0;
3388 ap->ability_match_count = 0;
3389 ap->ability_match = 0;
3395 ap->rxconfig = rx_cfg_reg;
3399 case ANEG_STATE_UNKNOWN:
3400 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3401 ap->state = ANEG_STATE_AN_ENABLE;
3404 case ANEG_STATE_AN_ENABLE:
3405 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3406 if (ap->flags & MR_AN_ENABLE) {
3409 ap->ability_match_cfg = 0;
3410 ap->ability_match_count = 0;
3411 ap->ability_match = 0;
3415 ap->state = ANEG_STATE_RESTART_INIT;
3417 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3421 case ANEG_STATE_RESTART_INIT:
3422 ap->link_time = ap->cur_time;
3423 ap->flags &= ~(MR_NP_LOADED);
3425 tw32(MAC_TX_AUTO_NEG, 0);
3426 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3427 tw32_f(MAC_MODE, tp->mac_mode);
3430 ret = ANEG_TIMER_ENAB;
3431 ap->state = ANEG_STATE_RESTART;
3434 case ANEG_STATE_RESTART:
3435 delta = ap->cur_time - ap->link_time;
3436 if (delta > ANEG_STATE_SETTLE_TIME) {
3437 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3439 ret = ANEG_TIMER_ENAB;
3443 case ANEG_STATE_DISABLE_LINK_OK:
3447 case ANEG_STATE_ABILITY_DETECT_INIT:
3448 ap->flags &= ~(MR_TOGGLE_TX);
3449 ap->txconfig = ANEG_CFG_FD;
3450 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3451 if (flowctrl & ADVERTISE_1000XPAUSE)
3452 ap->txconfig |= ANEG_CFG_PS1;
3453 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3454 ap->txconfig |= ANEG_CFG_PS2;
3455 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3456 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3457 tw32_f(MAC_MODE, tp->mac_mode);
3460 ap->state = ANEG_STATE_ABILITY_DETECT;
3463 case ANEG_STATE_ABILITY_DETECT:
3464 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3465 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3469 case ANEG_STATE_ACK_DETECT_INIT:
3470 ap->txconfig |= ANEG_CFG_ACK;
3471 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3472 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3473 tw32_f(MAC_MODE, tp->mac_mode);
3476 ap->state = ANEG_STATE_ACK_DETECT;
3479 case ANEG_STATE_ACK_DETECT:
3480 if (ap->ack_match != 0) {
3481 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3482 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3483 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3485 ap->state = ANEG_STATE_AN_ENABLE;
3487 } else if (ap->ability_match != 0 &&
3488 ap->rxconfig == 0) {
3489 ap->state = ANEG_STATE_AN_ENABLE;
3493 case ANEG_STATE_COMPLETE_ACK_INIT:
3494 if (ap->rxconfig & ANEG_CFG_INVAL) {
3498 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3499 MR_LP_ADV_HALF_DUPLEX |
3500 MR_LP_ADV_SYM_PAUSE |
3501 MR_LP_ADV_ASYM_PAUSE |
3502 MR_LP_ADV_REMOTE_FAULT1 |
3503 MR_LP_ADV_REMOTE_FAULT2 |
3504 MR_LP_ADV_NEXT_PAGE |
3507 if (ap->rxconfig & ANEG_CFG_FD)
3508 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3509 if (ap->rxconfig & ANEG_CFG_HD)
3510 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3511 if (ap->rxconfig & ANEG_CFG_PS1)
3512 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3513 if (ap->rxconfig & ANEG_CFG_PS2)
3514 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3515 if (ap->rxconfig & ANEG_CFG_RF1)
3516 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3517 if (ap->rxconfig & ANEG_CFG_RF2)
3518 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3519 if (ap->rxconfig & ANEG_CFG_NP)
3520 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3522 ap->link_time = ap->cur_time;
3524 ap->flags ^= (MR_TOGGLE_TX);
3525 if (ap->rxconfig & 0x0008)
3526 ap->flags |= MR_TOGGLE_RX;
3527 if (ap->rxconfig & ANEG_CFG_NP)
3528 ap->flags |= MR_NP_RX;
3529 ap->flags |= MR_PAGE_RX;
3531 ap->state = ANEG_STATE_COMPLETE_ACK;
3532 ret = ANEG_TIMER_ENAB;
3535 case ANEG_STATE_COMPLETE_ACK:
3536 if (ap->ability_match != 0 &&
3537 ap->rxconfig == 0) {
3538 ap->state = ANEG_STATE_AN_ENABLE;
3541 delta = ap->cur_time - ap->link_time;
3542 if (delta > ANEG_STATE_SETTLE_TIME) {
3543 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3544 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3546 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3547 !(ap->flags & MR_NP_RX)) {
3548 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3556 case ANEG_STATE_IDLE_DETECT_INIT:
3557 ap->link_time = ap->cur_time;
3558 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3559 tw32_f(MAC_MODE, tp->mac_mode);
3562 ap->state = ANEG_STATE_IDLE_DETECT;
3563 ret = ANEG_TIMER_ENAB;
3566 case ANEG_STATE_IDLE_DETECT:
3567 if (ap->ability_match != 0 &&
3568 ap->rxconfig == 0) {
3569 ap->state = ANEG_STATE_AN_ENABLE;
3572 delta = ap->cur_time - ap->link_time;
3573 if (delta > ANEG_STATE_SETTLE_TIME) {
3574 /* XXX another gem from the Broadcom driver :( */
3575 ap->state = ANEG_STATE_LINK_OK;
3579 case ANEG_STATE_LINK_OK:
3580 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3584 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3585 /* ??? unimplemented */
3588 case ANEG_STATE_NEXT_PAGE_WAIT:
3589 /* ??? unimplemented */
3600 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3603 struct tg3_fiber_aneginfo aninfo;
3604 int status = ANEG_FAILED;
3608 tw32_f(MAC_TX_AUTO_NEG, 0);
3610 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3611 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3614 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3617 memset(&aninfo, 0, sizeof(aninfo));
3618 aninfo.flags |= MR_AN_ENABLE;
3619 aninfo.state = ANEG_STATE_UNKNOWN;
3620 aninfo.cur_time = 0;
3622 while (++tick < 195000) {
3623 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3624 if (status == ANEG_DONE || status == ANEG_FAILED)
3630 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3631 tw32_f(MAC_MODE, tp->mac_mode);
3634 *txflags = aninfo.txconfig;
3635 *rxflags = aninfo.flags;
3637 if (status == ANEG_DONE &&
3638 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3639 MR_LP_ADV_FULL_DUPLEX)))
3645 static void tg3_init_bcm8002(struct tg3 *tp)
3647 u32 mac_status = tr32(MAC_STATUS);
3650 /* Reset when initting first time or we have a link. */
3651 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3652 !(mac_status & MAC_STATUS_PCS_SYNCED))
3655 /* Set PLL lock range. */
3656 tg3_writephy(tp, 0x16, 0x8007);
3659 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3661 /* Wait for reset to complete. */
3662 /* XXX schedule_timeout() ... */
3663 for (i = 0; i < 500; i++)
3666 /* Config mode; select PMA/Ch 1 regs. */
3667 tg3_writephy(tp, 0x10, 0x8411);
3669 /* Enable auto-lock and comdet, select txclk for tx. */
3670 tg3_writephy(tp, 0x11, 0x0a10);
3672 tg3_writephy(tp, 0x18, 0x00a0);
3673 tg3_writephy(tp, 0x16, 0x41ff);
3675 /* Assert and deassert POR. */
3676 tg3_writephy(tp, 0x13, 0x0400);
3678 tg3_writephy(tp, 0x13, 0x0000);
3680 tg3_writephy(tp, 0x11, 0x0a50);
3682 tg3_writephy(tp, 0x11, 0x0a10);
3684 /* Wait for signal to stabilize */
3685 /* XXX schedule_timeout() ... */
3686 for (i = 0; i < 15000; i++)
3689 /* Deselect the channel register so we can read the PHYID
3692 tg3_writephy(tp, 0x10, 0x8011);
3695 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3698 u32 sg_dig_ctrl, sg_dig_status;
3699 u32 serdes_cfg, expected_sg_dig_ctrl;
3700 int workaround, port_a;
3701 int current_link_up;
3704 expected_sg_dig_ctrl = 0;
3707 current_link_up = 0;
3709 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3710 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3712 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3715 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3716 /* preserve bits 20-23 for voltage regulator */
3717 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3720 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3722 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3723 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3725 u32 val = serdes_cfg;
3731 tw32_f(MAC_SERDES_CFG, val);
3734 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3736 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3737 tg3_setup_flow_control(tp, 0, 0);
3738 current_link_up = 1;
3743 /* Want auto-negotiation. */
3744 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3746 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3747 if (flowctrl & ADVERTISE_1000XPAUSE)
3748 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3749 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3750 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3752 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3753 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3754 tp->serdes_counter &&
3755 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3756 MAC_STATUS_RCVD_CFG)) ==
3757 MAC_STATUS_PCS_SYNCED)) {
3758 tp->serdes_counter--;
3759 current_link_up = 1;
3764 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3765 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3767 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3769 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3770 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3771 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3772 MAC_STATUS_SIGNAL_DET)) {
3773 sg_dig_status = tr32(SG_DIG_STATUS);
3774 mac_status = tr32(MAC_STATUS);
3776 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3777 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3778 u32 local_adv = 0, remote_adv = 0;
3780 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3781 local_adv |= ADVERTISE_1000XPAUSE;
3782 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3783 local_adv |= ADVERTISE_1000XPSE_ASYM;
3785 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3786 remote_adv |= LPA_1000XPAUSE;
3787 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3788 remote_adv |= LPA_1000XPAUSE_ASYM;
3790 tg3_setup_flow_control(tp, local_adv, remote_adv);
3791 current_link_up = 1;
3792 tp->serdes_counter = 0;
3793 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3794 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3795 if (tp->serdes_counter)
3796 tp->serdes_counter--;
3799 u32 val = serdes_cfg;
3806 tw32_f(MAC_SERDES_CFG, val);
3809 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3812 /* Link parallel detection - link is up */
3813 /* only if we have PCS_SYNC and not */
3814 /* receiving config code words */
3815 mac_status = tr32(MAC_STATUS);
3816 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3817 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3818 tg3_setup_flow_control(tp, 0, 0);
3819 current_link_up = 1;
3821 TG3_FLG2_PARALLEL_DETECT;
3822 tp->serdes_counter =
3823 SERDES_PARALLEL_DET_TIMEOUT;
3825 goto restart_autoneg;
3829 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3830 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3834 return current_link_up;
3837 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3839 int current_link_up = 0;
3841 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3844 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3845 u32 txflags, rxflags;
3848 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3849 u32 local_adv = 0, remote_adv = 0;
3851 if (txflags & ANEG_CFG_PS1)
3852 local_adv |= ADVERTISE_1000XPAUSE;
3853 if (txflags & ANEG_CFG_PS2)
3854 local_adv |= ADVERTISE_1000XPSE_ASYM;
3856 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3857 remote_adv |= LPA_1000XPAUSE;
3858 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3859 remote_adv |= LPA_1000XPAUSE_ASYM;
3861 tg3_setup_flow_control(tp, local_adv, remote_adv);
3863 current_link_up = 1;
3865 for (i = 0; i < 30; i++) {
3868 (MAC_STATUS_SYNC_CHANGED |
3869 MAC_STATUS_CFG_CHANGED));
3871 if ((tr32(MAC_STATUS) &
3872 (MAC_STATUS_SYNC_CHANGED |
3873 MAC_STATUS_CFG_CHANGED)) == 0)
3877 mac_status = tr32(MAC_STATUS);
3878 if (current_link_up == 0 &&
3879 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3880 !(mac_status & MAC_STATUS_RCVD_CFG))
3881 current_link_up = 1;
3883 tg3_setup_flow_control(tp, 0, 0);
3885 /* Forcing 1000FD link up. */
3886 current_link_up = 1;
3888 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3891 tw32_f(MAC_MODE, tp->mac_mode);
3896 return current_link_up;
3899 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3902 u16 orig_active_speed;
3903 u8 orig_active_duplex;
3905 int current_link_up;
3908 orig_pause_cfg = tp->link_config.active_flowctrl;
3909 orig_active_speed = tp->link_config.active_speed;
3910 orig_active_duplex = tp->link_config.active_duplex;
3912 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3913 netif_carrier_ok(tp->dev) &&
3914 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3915 mac_status = tr32(MAC_STATUS);
3916 mac_status &= (MAC_STATUS_PCS_SYNCED |
3917 MAC_STATUS_SIGNAL_DET |
3918 MAC_STATUS_CFG_CHANGED |
3919 MAC_STATUS_RCVD_CFG);
3920 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3921 MAC_STATUS_SIGNAL_DET)) {
3922 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3923 MAC_STATUS_CFG_CHANGED));
3928 tw32_f(MAC_TX_AUTO_NEG, 0);
3930 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3931 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3932 tw32_f(MAC_MODE, tp->mac_mode);
3935 if (tp->phy_id == PHY_ID_BCM8002)
3936 tg3_init_bcm8002(tp);
3938 /* Enable link change event even when serdes polling. */
3939 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3942 current_link_up = 0;
3943 mac_status = tr32(MAC_STATUS);
3945 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3946 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3948 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3950 tp->napi[0].hw_status->status =
3951 (SD_STATUS_UPDATED |
3952 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3954 for (i = 0; i < 100; i++) {
3955 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3956 MAC_STATUS_CFG_CHANGED));
3958 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3959 MAC_STATUS_CFG_CHANGED |
3960 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3964 mac_status = tr32(MAC_STATUS);
3965 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3966 current_link_up = 0;
3967 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3968 tp->serdes_counter == 0) {
3969 tw32_f(MAC_MODE, (tp->mac_mode |
3970 MAC_MODE_SEND_CONFIGS));
3972 tw32_f(MAC_MODE, tp->mac_mode);
3976 if (current_link_up == 1) {
3977 tp->link_config.active_speed = SPEED_1000;
3978 tp->link_config.active_duplex = DUPLEX_FULL;
3979 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3980 LED_CTRL_LNKLED_OVERRIDE |
3981 LED_CTRL_1000MBPS_ON));
3983 tp->link_config.active_speed = SPEED_INVALID;
3984 tp->link_config.active_duplex = DUPLEX_INVALID;
3985 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3986 LED_CTRL_LNKLED_OVERRIDE |
3987 LED_CTRL_TRAFFIC_OVERRIDE));
3990 if (current_link_up != netif_carrier_ok(tp->dev)) {
3991 if (current_link_up)
3992 netif_carrier_on(tp->dev);
3994 netif_carrier_off(tp->dev);
3995 tg3_link_report(tp);
3997 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3998 if (orig_pause_cfg != now_pause_cfg ||
3999 orig_active_speed != tp->link_config.active_speed ||
4000 orig_active_duplex != tp->link_config.active_duplex)
4001 tg3_link_report(tp);
4007 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4009 int current_link_up, err = 0;
4013 u32 local_adv, remote_adv;
4015 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4016 tw32_f(MAC_MODE, tp->mac_mode);
4022 (MAC_STATUS_SYNC_CHANGED |
4023 MAC_STATUS_CFG_CHANGED |
4024 MAC_STATUS_MI_COMPLETION |
4025 MAC_STATUS_LNKSTATE_CHANGED));
4031 current_link_up = 0;
4032 current_speed = SPEED_INVALID;
4033 current_duplex = DUPLEX_INVALID;
4035 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4036 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4037 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4038 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4039 bmsr |= BMSR_LSTATUS;
4041 bmsr &= ~BMSR_LSTATUS;
4044 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4046 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4047 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4048 /* do nothing, just check for link up at the end */
4049 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4052 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4053 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4054 ADVERTISE_1000XPAUSE |
4055 ADVERTISE_1000XPSE_ASYM |
4058 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4060 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4061 new_adv |= ADVERTISE_1000XHALF;
4062 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4063 new_adv |= ADVERTISE_1000XFULL;
4065 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4066 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4067 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4068 tg3_writephy(tp, MII_BMCR, bmcr);
4070 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4071 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4072 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4079 bmcr &= ~BMCR_SPEED1000;
4080 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4082 if (tp->link_config.duplex == DUPLEX_FULL)
4083 new_bmcr |= BMCR_FULLDPLX;
4085 if (new_bmcr != bmcr) {
4086 /* BMCR_SPEED1000 is a reserved bit that needs
4087 * to be set on write.
4089 new_bmcr |= BMCR_SPEED1000;
4091 /* Force a linkdown */
4092 if (netif_carrier_ok(tp->dev)) {
4095 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4096 adv &= ~(ADVERTISE_1000XFULL |
4097 ADVERTISE_1000XHALF |
4099 tg3_writephy(tp, MII_ADVERTISE, adv);
4100 tg3_writephy(tp, MII_BMCR, bmcr |
4104 netif_carrier_off(tp->dev);
4106 tg3_writephy(tp, MII_BMCR, new_bmcr);
4108 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4109 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4110 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4112 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4113 bmsr |= BMSR_LSTATUS;
4115 bmsr &= ~BMSR_LSTATUS;
4117 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4121 if (bmsr & BMSR_LSTATUS) {
4122 current_speed = SPEED_1000;
4123 current_link_up = 1;
4124 if (bmcr & BMCR_FULLDPLX)
4125 current_duplex = DUPLEX_FULL;
4127 current_duplex = DUPLEX_HALF;
4132 if (bmcr & BMCR_ANENABLE) {
4135 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4136 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4137 common = local_adv & remote_adv;
4138 if (common & (ADVERTISE_1000XHALF |
4139 ADVERTISE_1000XFULL)) {
4140 if (common & ADVERTISE_1000XFULL)
4141 current_duplex = DUPLEX_FULL;
4143 current_duplex = DUPLEX_HALF;
4146 current_link_up = 0;
4150 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4151 tg3_setup_flow_control(tp, local_adv, remote_adv);
4153 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4154 if (tp->link_config.active_duplex == DUPLEX_HALF)
4155 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4157 tw32_f(MAC_MODE, tp->mac_mode);
4160 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4162 tp->link_config.active_speed = current_speed;
4163 tp->link_config.active_duplex = current_duplex;
4165 if (current_link_up != netif_carrier_ok(tp->dev)) {
4166 if (current_link_up)
4167 netif_carrier_on(tp->dev);
4169 netif_carrier_off(tp->dev);
4170 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4172 tg3_link_report(tp);
4177 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4179 if (tp->serdes_counter) {
4180 /* Give autoneg time to complete. */
4181 tp->serdes_counter--;
4184 if (!netif_carrier_ok(tp->dev) &&
4185 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4188 tg3_readphy(tp, MII_BMCR, &bmcr);
4189 if (bmcr & BMCR_ANENABLE) {
4192 /* Select shadow register 0x1f */
4193 tg3_writephy(tp, 0x1c, 0x7c00);
4194 tg3_readphy(tp, 0x1c, &phy1);
4196 /* Select expansion interrupt status register */
4197 tg3_writephy(tp, 0x17, 0x0f01);
4198 tg3_readphy(tp, 0x15, &phy2);
4199 tg3_readphy(tp, 0x15, &phy2);
4201 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4202 /* We have signal detect and not receiving
4203 * config code words, link is up by parallel
4207 bmcr &= ~BMCR_ANENABLE;
4208 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4209 tg3_writephy(tp, MII_BMCR, bmcr);
4210 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4214 else if (netif_carrier_ok(tp->dev) &&
4215 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4216 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4219 /* Select expansion interrupt status register */
4220 tg3_writephy(tp, 0x17, 0x0f01);
4221 tg3_readphy(tp, 0x15, &phy2);
4225 /* Config code words received, turn on autoneg. */
4226 tg3_readphy(tp, MII_BMCR, &bmcr);
4227 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4229 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4235 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4239 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4240 err = tg3_setup_fiber_phy(tp, force_reset);
4241 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4242 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4244 err = tg3_setup_copper_phy(tp, force_reset);
4247 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4250 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4251 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4253 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4258 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4259 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4260 tw32(GRC_MISC_CFG, val);
4263 if (tp->link_config.active_speed == SPEED_1000 &&
4264 tp->link_config.active_duplex == DUPLEX_HALF)
4265 tw32(MAC_TX_LENGTHS,
4266 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4267 (6 << TX_LENGTHS_IPG_SHIFT) |
4268 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4270 tw32(MAC_TX_LENGTHS,
4271 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4272 (6 << TX_LENGTHS_IPG_SHIFT) |
4273 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4275 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4276 if (netif_carrier_ok(tp->dev)) {
4277 tw32(HOSTCC_STAT_COAL_TICKS,
4278 tp->coal.stats_block_coalesce_usecs);
4280 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4284 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4285 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4286 if (!netif_carrier_ok(tp->dev))
4287 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4290 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4291 tw32(PCIE_PWR_MGMT_THRESH, val);
4297 /* This is called whenever we suspect that the system chipset is re-
4298 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4299 * is bogus tx completions. We try to recover by setting the
4300 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4303 static void tg3_tx_recover(struct tg3 *tp)
4305 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4306 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4308 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4309 "mapped I/O cycles to the network device, attempting to "
4310 "recover. Please report the problem to the driver maintainer "
4311 "and include system chipset information.\n", tp->dev->name);
4313 spin_lock(&tp->lock);
4314 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4315 spin_unlock(&tp->lock);
4318 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4321 return tnapi->tx_pending -
4322 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4325 /* Tigon3 never reports partial packet sends. So we do not
4326 * need special logic to handle SKBs that have not had all
4327 * of their frags sent yet, like SunGEM does.
4329 static void tg3_tx(struct tg3_napi *tnapi)
4331 struct tg3 *tp = tnapi->tp;
4332 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4333 u32 sw_idx = tnapi->tx_cons;
4334 struct netdev_queue *txq;
4335 int index = tnapi - tp->napi;
4337 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
4340 txq = netdev_get_tx_queue(tp->dev, index);
4342 while (sw_idx != hw_idx) {
4343 struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4344 struct sk_buff *skb = ri->skb;
4347 if (unlikely(skb == NULL)) {
4352 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4356 sw_idx = NEXT_TX(sw_idx);
4358 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4359 ri = &tnapi->tx_buffers[sw_idx];
4360 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4362 sw_idx = NEXT_TX(sw_idx);
4367 if (unlikely(tx_bug)) {
4373 tnapi->tx_cons = sw_idx;
4375 /* Need to make the tx_cons update visible to tg3_start_xmit()
4376 * before checking for netif_queue_stopped(). Without the
4377 * memory barrier, there is a small possibility that tg3_start_xmit()
4378 * will miss it and cause the queue to be stopped forever.
4382 if (unlikely(netif_tx_queue_stopped(txq) &&
4383 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4384 __netif_tx_lock(txq, smp_processor_id());
4385 if (netif_tx_queue_stopped(txq) &&
4386 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4387 netif_tx_wake_queue(txq);
4388 __netif_tx_unlock(txq);
4392 /* Returns size of skb allocated or < 0 on error.
4394 * We only need to fill in the address because the other members
4395 * of the RX descriptor are invariant, see tg3_init_rings.
4397 * Note the purposeful assymetry of cpu vs. chip accesses. For
4398 * posting buffers we only dirty the first cache line of the RX
4399 * descriptor (containing the address). Whereas for the RX status
4400 * buffers the cpu only reads the last cacheline of the RX descriptor
4401 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4403 static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4404 int src_idx, u32 dest_idx_unmasked)
4406 struct tg3 *tp = tnapi->tp;
4407 struct tg3_rx_buffer_desc *desc;
4408 struct ring_info *map, *src_map;
4409 struct sk_buff *skb;
4411 int skb_size, dest_idx;
4412 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4415 switch (opaque_key) {
4416 case RXD_OPAQUE_RING_STD:
4417 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4418 desc = &tpr->rx_std[dest_idx];
4419 map = &tpr->rx_std_buffers[dest_idx];
4421 src_map = &tpr->rx_std_buffers[src_idx];
4422 skb_size = tp->rx_pkt_map_sz;
4425 case RXD_OPAQUE_RING_JUMBO:
4426 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4427 desc = &tpr->rx_jmb[dest_idx].std;
4428 map = &tpr->rx_jmb_buffers[dest_idx];
4430 src_map = &tpr->rx_jmb_buffers[src_idx];
4431 skb_size = TG3_RX_JMB_MAP_SZ;
4438 /* Do not overwrite any of the map or rp information
4439 * until we are sure we can commit to a new buffer.
4441 * Callers depend upon this behavior and assume that
4442 * we leave everything unchanged if we fail.
4444 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4448 skb_reserve(skb, tp->rx_offset);
4450 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4451 PCI_DMA_FROMDEVICE);
4452 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4458 pci_unmap_addr_set(map, mapping, mapping);
4460 if (src_map != NULL)
4461 src_map->skb = NULL;
4463 desc->addr_hi = ((u64)mapping >> 32);
4464 desc->addr_lo = ((u64)mapping & 0xffffffff);
4469 /* We only need to move over in the address because the other
4470 * members of the RX descriptor are invariant. See notes above
4471 * tg3_alloc_rx_skb for full details.
4473 static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
4474 int src_idx, u32 dest_idx_unmasked)
4476 struct tg3 *tp = tnapi->tp;
4477 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4478 struct ring_info *src_map, *dest_map;
4480 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4482 switch (opaque_key) {
4483 case RXD_OPAQUE_RING_STD:
4484 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4485 dest_desc = &tpr->rx_std[dest_idx];
4486 dest_map = &tpr->rx_std_buffers[dest_idx];
4487 src_desc = &tpr->rx_std[src_idx];
4488 src_map = &tpr->rx_std_buffers[src_idx];
4491 case RXD_OPAQUE_RING_JUMBO:
4492 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4493 dest_desc = &tpr->rx_jmb[dest_idx].std;
4494 dest_map = &tpr->rx_jmb_buffers[dest_idx];
4495 src_desc = &tpr->rx_jmb[src_idx].std;
4496 src_map = &tpr->rx_jmb_buffers[src_idx];
4503 dest_map->skb = src_map->skb;
4504 pci_unmap_addr_set(dest_map, mapping,
4505 pci_unmap_addr(src_map, mapping));
4506 dest_desc->addr_hi = src_desc->addr_hi;
4507 dest_desc->addr_lo = src_desc->addr_lo;
4509 src_map->skb = NULL;
4512 /* The RX ring scheme is composed of multiple rings which post fresh
4513 * buffers to the chip, and one special ring the chip uses to report
4514 * status back to the host.
4516 * The special ring reports the status of received packets to the
4517 * host. The chip does not write into the original descriptor the
4518 * RX buffer was obtained from. The chip simply takes the original
4519 * descriptor as provided by the host, updates the status and length
4520 * field, then writes this into the next status ring entry.
4522 * Each ring the host uses to post buffers to the chip is described
4523 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4524 * it is first placed into the on-chip ram. When the packet's length
4525 * is known, it walks down the TG3_BDINFO entries to select the ring.
4526 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4527 * which is within the range of the new packet's length is chosen.
4529 * The "separate ring for rx status" scheme may sound queer, but it makes
4530 * sense from a cache coherency perspective. If only the host writes
4531 * to the buffer post rings, and only the chip writes to the rx status
4532 * rings, then cache lines never move beyond shared-modified state.
4533 * If both the host and chip were to write into the same ring, cache line
4534 * eviction could occur since both entities want it in an exclusive state.
4536 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4538 struct tg3 *tp = tnapi->tp;
4539 u32 work_mask, rx_std_posted = 0;
4540 u32 sw_idx = tnapi->rx_rcb_ptr;
4543 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4545 hw_idx = *(tnapi->rx_rcb_prod_idx);
4547 * We need to order the read of hw_idx and the read of
4548 * the opaque cookie.
4553 while (sw_idx != hw_idx && budget > 0) {
4554 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4556 struct sk_buff *skb;
4557 dma_addr_t dma_addr;
4558 u32 opaque_key, desc_idx, *post_ptr;
4560 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4561 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4562 if (opaque_key == RXD_OPAQUE_RING_STD) {
4563 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx];
4564 dma_addr = pci_unmap_addr(ri, mapping);
4566 post_ptr = &tpr->rx_std_ptr;
4568 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4569 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx];
4570 dma_addr = pci_unmap_addr(ri, mapping);
4572 post_ptr = &tpr->rx_jmb_ptr;
4574 goto next_pkt_nopost;
4576 work_mask |= opaque_key;
4578 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4579 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4581 tg3_recycle_rx(tnapi, opaque_key,
4582 desc_idx, *post_ptr);
4584 /* Other statistics kept track of by card. */
4585 tp->net_stats.rx_dropped++;
4589 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4592 if (len > RX_COPY_THRESHOLD
4593 && tp->rx_offset == NET_IP_ALIGN
4594 /* rx_offset will likely not equal NET_IP_ALIGN
4595 * if this is a 5701 card running in PCI-X mode
4596 * [see tg3_get_invariants()]
4601 skb_size = tg3_alloc_rx_skb(tnapi, opaque_key,
4602 desc_idx, *post_ptr);
4606 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4607 PCI_DMA_FROMDEVICE);
4611 struct sk_buff *copy_skb;
4613 tg3_recycle_rx(tnapi, opaque_key,
4614 desc_idx, *post_ptr);
4616 copy_skb = netdev_alloc_skb(tp->dev,
4617 len + TG3_RAW_IP_ALIGN);
4618 if (copy_skb == NULL)
4619 goto drop_it_no_recycle;
4621 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4622 skb_put(copy_skb, len);
4623 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4624 skb_copy_from_linear_data(skb, copy_skb->data, len);
4625 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4627 /* We'll reuse the original ring buffer. */
4631 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4632 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4633 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4634 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4635 skb->ip_summed = CHECKSUM_UNNECESSARY;
4637 skb->ip_summed = CHECKSUM_NONE;
4639 skb->protocol = eth_type_trans(skb, tp->dev);
4641 if (len > (tp->dev->mtu + ETH_HLEN) &&
4642 skb->protocol != htons(ETH_P_8021Q)) {
4647 #if TG3_VLAN_TAG_USED
4648 if (tp->vlgrp != NULL &&
4649 desc->type_flags & RXD_FLAG_VLAN) {
4650 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4651 desc->err_vlan & RXD_VLAN_MASK, skb);
4654 napi_gro_receive(&tnapi->napi, skb);
4662 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4663 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4665 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4666 TG3_64BIT_REG_LOW, idx);
4667 work_mask &= ~RXD_OPAQUE_RING_STD;
4672 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4674 /* Refresh hw_idx to see if there is new work */
4675 if (sw_idx == hw_idx) {
4676 hw_idx = *(tnapi->rx_rcb_prod_idx);
4681 /* ACK the status ring. */
4682 tnapi->rx_rcb_ptr = sw_idx;
4683 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4685 /* Refill RX ring(s). */
4686 if (work_mask & RXD_OPAQUE_RING_STD) {
4687 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE;
4688 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4691 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4692 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE;
4693 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4701 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4703 struct tg3 *tp = tnapi->tp;
4704 struct tg3_hw_status *sblk = tnapi->hw_status;
4706 /* handle link change and other phy events */
4707 if (!(tp->tg3_flags &
4708 (TG3_FLAG_USE_LINKCHG_REG |
4709 TG3_FLAG_POLL_SERDES))) {
4710 if (sblk->status & SD_STATUS_LINK_CHG) {
4711 sblk->status = SD_STATUS_UPDATED |
4712 (sblk->status & ~SD_STATUS_LINK_CHG);
4713 spin_lock(&tp->lock);
4714 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4716 (MAC_STATUS_SYNC_CHANGED |
4717 MAC_STATUS_CFG_CHANGED |
4718 MAC_STATUS_MI_COMPLETION |
4719 MAC_STATUS_LNKSTATE_CHANGED));
4722 tg3_setup_phy(tp, 0);
4723 spin_unlock(&tp->lock);
4727 /* run TX completion thread */
4728 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4730 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4734 /* run RX thread, within the bounds set by NAPI.
4735 * All RX "locking" is done by ensuring outside
4736 * code synchronizes with tg3->napi.poll()
4738 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4739 work_done += tg3_rx(tnapi, budget - work_done);
4744 static int tg3_poll(struct napi_struct *napi, int budget)
4746 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4747 struct tg3 *tp = tnapi->tp;
4749 struct tg3_hw_status *sblk = tnapi->hw_status;
4752 work_done = tg3_poll_work(tnapi, work_done, budget);
4754 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4757 if (unlikely(work_done >= budget))
4760 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4761 /* tp->last_tag is used in tg3_int_reenable() below
4762 * to tell the hw how much work has been processed,
4763 * so we must read it before checking for more work.
4765 tnapi->last_tag = sblk->status_tag;
4766 tnapi->last_irq_tag = tnapi->last_tag;
4769 sblk->status &= ~SD_STATUS_UPDATED;
4771 if (likely(!tg3_has_work(tnapi))) {
4772 napi_complete(napi);
4773 tg3_int_reenable(tnapi);
4781 /* work_done is guaranteed to be less than budget. */
4782 napi_complete(napi);
4783 schedule_work(&tp->reset_task);
4787 static void tg3_irq_quiesce(struct tg3 *tp)
4791 BUG_ON(tp->irq_sync);
4796 for (i = 0; i < tp->irq_cnt; i++)
4797 synchronize_irq(tp->napi[i].irq_vec);
4800 static inline int tg3_irq_sync(struct tg3 *tp)
4802 return tp->irq_sync;
4805 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4806 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4807 * with as well. Most of the time, this is not necessary except when
4808 * shutting down the device.
4810 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4812 spin_lock_bh(&tp->lock);
4814 tg3_irq_quiesce(tp);
4817 static inline void tg3_full_unlock(struct tg3 *tp)
4819 spin_unlock_bh(&tp->lock);
4822 /* One-shot MSI handler - Chip automatically disables interrupt
4823 * after sending MSI so driver doesn't have to do it.
4825 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4827 struct tg3_napi *tnapi = dev_id;
4828 struct tg3 *tp = tnapi->tp;
4830 prefetch(tnapi->hw_status);
4832 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4834 if (likely(!tg3_irq_sync(tp)))
4835 napi_schedule(&tnapi->napi);
4840 /* MSI ISR - No need to check for interrupt sharing and no need to
4841 * flush status block and interrupt mailbox. PCI ordering rules
4842 * guarantee that MSI will arrive after the status block.
4844 static irqreturn_t tg3_msi(int irq, void *dev_id)
4846 struct tg3_napi *tnapi = dev_id;
4847 struct tg3 *tp = tnapi->tp;
4849 prefetch(tnapi->hw_status);
4851 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4853 * Writing any value to intr-mbox-0 clears PCI INTA# and
4854 * chip-internal interrupt pending events.
4855 * Writing non-zero to intr-mbox-0 additional tells the
4856 * NIC to stop sending us irqs, engaging "in-intr-handler"
4859 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4860 if (likely(!tg3_irq_sync(tp)))
4861 napi_schedule(&tnapi->napi);
4863 return IRQ_RETVAL(1);
4866 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4868 struct tg3_napi *tnapi = dev_id;
4869 struct tg3 *tp = tnapi->tp;
4870 struct tg3_hw_status *sblk = tnapi->hw_status;
4871 unsigned int handled = 1;
4873 /* In INTx mode, it is possible for the interrupt to arrive at
4874 * the CPU before the status block posted prior to the interrupt.
4875 * Reading the PCI State register will confirm whether the
4876 * interrupt is ours and will flush the status block.
4878 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4879 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4880 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4887 * Writing any value to intr-mbox-0 clears PCI INTA# and
4888 * chip-internal interrupt pending events.
4889 * Writing non-zero to intr-mbox-0 additional tells the
4890 * NIC to stop sending us irqs, engaging "in-intr-handler"
4893 * Flush the mailbox to de-assert the IRQ immediately to prevent
4894 * spurious interrupts. The flush impacts performance but
4895 * excessive spurious interrupts can be worse in some cases.
4897 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4898 if (tg3_irq_sync(tp))
4900 sblk->status &= ~SD_STATUS_UPDATED;
4901 if (likely(tg3_has_work(tnapi))) {
4902 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4903 napi_schedule(&tnapi->napi);
4905 /* No work, shared interrupt perhaps? re-enable
4906 * interrupts, and flush that PCI write
4908 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4912 return IRQ_RETVAL(handled);
4915 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4917 struct tg3_napi *tnapi = dev_id;
4918 struct tg3 *tp = tnapi->tp;
4919 struct tg3_hw_status *sblk = tnapi->hw_status;
4920 unsigned int handled = 1;
4922 /* In INTx mode, it is possible for the interrupt to arrive at
4923 * the CPU before the status block posted prior to the interrupt.
4924 * Reading the PCI State register will confirm whether the
4925 * interrupt is ours and will flush the status block.
4927 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
4928 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4929 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4936 * writing any value to intr-mbox-0 clears PCI INTA# and
4937 * chip-internal interrupt pending events.
4938 * writing non-zero to intr-mbox-0 additional tells the
4939 * NIC to stop sending us irqs, engaging "in-intr-handler"
4942 * Flush the mailbox to de-assert the IRQ immediately to prevent
4943 * spurious interrupts. The flush impacts performance but
4944 * excessive spurious interrupts can be worse in some cases.
4946 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4949 * In a shared interrupt configuration, sometimes other devices'
4950 * interrupts will scream. We record the current status tag here
4951 * so that the above check can report that the screaming interrupts
4952 * are unhandled. Eventually they will be silenced.
4954 tnapi->last_irq_tag = sblk->status_tag;
4956 if (tg3_irq_sync(tp))
4959 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4961 napi_schedule(&tnapi->napi);
4964 return IRQ_RETVAL(handled);
4967 /* ISR for interrupt test */
4968 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4970 struct tg3_napi *tnapi = dev_id;
4971 struct tg3 *tp = tnapi->tp;
4972 struct tg3_hw_status *sblk = tnapi->hw_status;
4974 if ((sblk->status & SD_STATUS_UPDATED) ||
4975 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4976 tg3_disable_ints(tp);
4977 return IRQ_RETVAL(1);
4979 return IRQ_RETVAL(0);
4982 static int tg3_init_hw(struct tg3 *, int);
4983 static int tg3_halt(struct tg3 *, int, int);
4985 /* Restart hardware after configuration changes, self-test, etc.
4986 * Invoked with tp->lock held.
4988 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4989 __releases(tp->lock)
4990 __acquires(tp->lock)
4994 err = tg3_init_hw(tp, reset_phy);
4996 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4997 "aborting.\n", tp->dev->name);
4998 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4999 tg3_full_unlock(tp);
5000 del_timer_sync(&tp->timer);
5002 tg3_napi_enable(tp);
5004 tg3_full_lock(tp, 0);
5009 #ifdef CONFIG_NET_POLL_CONTROLLER
5010 static void tg3_poll_controller(struct net_device *dev)
5013 struct tg3 *tp = netdev_priv(dev);
5015 for (i = 0; i < tp->irq_cnt; i++)
5016 tg3_interrupt(tp->napi[i].irq_vec, dev);
5020 static void tg3_reset_task(struct work_struct *work)
5022 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5024 unsigned int restart_timer;
5026 tg3_full_lock(tp, 0);
5028 if (!netif_running(tp->dev)) {
5029 tg3_full_unlock(tp);
5033 tg3_full_unlock(tp);
5039 tg3_full_lock(tp, 1);
5041 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5042 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5044 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5045 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5046 tp->write32_rx_mbox = tg3_write_flush_reg32;
5047 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5048 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5051 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5052 err = tg3_init_hw(tp, 1);
5056 tg3_netif_start(tp);
5059 mod_timer(&tp->timer, jiffies + 1);
5062 tg3_full_unlock(tp);
5068 static void tg3_dump_short_state(struct tg3 *tp)
5070 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5071 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5072 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5073 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5076 static void tg3_tx_timeout(struct net_device *dev)
5078 struct tg3 *tp = netdev_priv(dev);
5080 if (netif_msg_tx_err(tp)) {
5081 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5083 tg3_dump_short_state(tp);
5086 schedule_work(&tp->reset_task);
5089 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5090 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5092 u32 base = (u32) mapping & 0xffffffff;
5094 return ((base > 0xffffdcc0) &&
5095 (base + len + 8 < base));
5098 /* Test for DMA addresses > 40-bit */
5099 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5102 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5103 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5104 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5111 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5113 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5114 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5115 u32 last_plus_one, u32 *start,
5116 u32 base_flags, u32 mss)
5118 struct tg3_napi *tnapi = &tp->napi[0];
5119 struct sk_buff *new_skb;
5120 dma_addr_t new_addr = 0;
5124 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5125 new_skb = skb_copy(skb, GFP_ATOMIC);
5127 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5129 new_skb = skb_copy_expand(skb,
5130 skb_headroom(skb) + more_headroom,
5131 skb_tailroom(skb), GFP_ATOMIC);
5137 /* New SKB is guaranteed to be linear. */
5139 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
5140 new_addr = skb_shinfo(new_skb)->dma_head;
5142 /* Make sure new skb does not cross any 4G boundaries.
5143 * Drop the packet if it does.
5145 if (ret || ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5146 tg3_4g_overflow_test(new_addr, new_skb->len))) {
5148 skb_dma_unmap(&tp->pdev->dev, new_skb,
5151 dev_kfree_skb(new_skb);
5154 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5155 base_flags, 1 | (mss << 1));
5156 *start = NEXT_TX(entry);
5160 /* Now clean up the sw ring entries. */
5162 while (entry != last_plus_one) {
5164 tnapi->tx_buffers[entry].skb = new_skb;
5166 tnapi->tx_buffers[entry].skb = NULL;
5167 entry = NEXT_TX(entry);
5171 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5177 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5178 dma_addr_t mapping, int len, u32 flags,
5181 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5182 int is_end = (mss_and_is_end & 0x1);
5183 u32 mss = (mss_and_is_end >> 1);
5187 flags |= TXD_FLAG_END;
5188 if (flags & TXD_FLAG_VLAN) {
5189 vlan_tag = flags >> 16;
5192 vlan_tag |= (mss << TXD_MSS_SHIFT);
5194 txd->addr_hi = ((u64) mapping >> 32);
5195 txd->addr_lo = ((u64) mapping & 0xffffffff);
5196 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5197 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5200 /* hard_start_xmit for devices that don't have any bugs and
5201 * support TG3_FLG2_HW_TSO_2 only.
5203 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5204 struct net_device *dev)
5206 struct tg3 *tp = netdev_priv(dev);
5207 u32 len, entry, base_flags, mss;
5208 struct skb_shared_info *sp;
5210 struct tg3_napi *tnapi;
5211 struct netdev_queue *txq;
5213 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5214 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5215 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5218 /* We are running in BH disabled context with netif_tx_lock
5219 * and TX reclaim runs via tp->napi.poll inside of a software
5220 * interrupt. Furthermore, IRQ processing runs lockless so we have
5221 * no IRQ context deadlocks to worry about either. Rejoice!
5223 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5224 if (!netif_tx_queue_stopped(txq)) {
5225 netif_tx_stop_queue(txq);
5227 /* This is a hard error, log it. */
5228 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5229 "queue awake!\n", dev->name);
5231 return NETDEV_TX_BUSY;
5234 entry = tnapi->tx_prod;
5237 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5238 int tcp_opt_len, ip_tcp_len;
5241 if (skb_header_cloned(skb) &&
5242 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5247 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5248 hdrlen = skb_headlen(skb) - ETH_HLEN;
5250 struct iphdr *iph = ip_hdr(skb);
5252 tcp_opt_len = tcp_optlen(skb);
5253 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5256 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5257 hdrlen = ip_tcp_len + tcp_opt_len;
5260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
5261 mss |= (hdrlen & 0xc) << 12;
5263 base_flags |= 0x00000010;
5264 base_flags |= (hdrlen & 0x3e0) << 5;
5268 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5269 TXD_FLAG_CPU_POST_DMA);
5271 tcp_hdr(skb)->check = 0;
5274 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5275 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5276 #if TG3_VLAN_TAG_USED
5277 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5278 base_flags |= (TXD_FLAG_VLAN |
5279 (vlan_tx_tag_get(skb) << 16));
5282 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5287 sp = skb_shinfo(skb);
5289 mapping = sp->dma_head;
5291 tnapi->tx_buffers[entry].skb = skb;
5293 len = skb_headlen(skb);
5295 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
5296 !mss && skb->len > ETH_DATA_LEN)
5297 base_flags |= TXD_FLAG_JMB_PKT;
5299 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5300 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5302 entry = NEXT_TX(entry);
5304 /* Now loop through additional data fragments, and queue them. */
5305 if (skb_shinfo(skb)->nr_frags > 0) {
5306 unsigned int i, last;
5308 last = skb_shinfo(skb)->nr_frags - 1;
5309 for (i = 0; i <= last; i++) {
5310 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5313 mapping = sp->dma_maps[i];
5314 tnapi->tx_buffers[entry].skb = NULL;
5316 tg3_set_txd(tnapi, entry, mapping, len,
5317 base_flags, (i == last) | (mss << 1));
5319 entry = NEXT_TX(entry);
5323 /* Packets are ready, update Tx producer idx local and on card. */
5324 tw32_tx_mbox(tnapi->prodmbox, entry);
5326 tnapi->tx_prod = entry;
5327 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5328 netif_tx_stop_queue(txq);
5329 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5330 netif_tx_wake_queue(txq);
5336 return NETDEV_TX_OK;
5339 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5340 struct net_device *);
5342 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5343 * TSO header is greater than 80 bytes.
5345 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5347 struct sk_buff *segs, *nskb;
5348 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5350 /* Estimate the number of fragments in the worst case */
5351 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5352 netif_stop_queue(tp->dev);
5353 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5354 return NETDEV_TX_BUSY;
5356 netif_wake_queue(tp->dev);
5359 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5361 goto tg3_tso_bug_end;
5367 tg3_start_xmit_dma_bug(nskb, tp->dev);
5373 return NETDEV_TX_OK;
5376 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5377 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5379 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5380 struct net_device *dev)
5382 struct tg3 *tp = netdev_priv(dev);
5383 u32 len, entry, base_flags, mss;
5384 struct skb_shared_info *sp;
5385 int would_hit_hwbug;
5387 struct tg3_napi *tnapi = &tp->napi[0];
5389 len = skb_headlen(skb);
5391 /* We are running in BH disabled context with netif_tx_lock
5392 * and TX reclaim runs via tp->napi.poll inside of a software
5393 * interrupt. Furthermore, IRQ processing runs lockless so we have
5394 * no IRQ context deadlocks to worry about either. Rejoice!
5396 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5397 if (!netif_queue_stopped(dev)) {
5398 netif_stop_queue(dev);
5400 /* This is a hard error, log it. */
5401 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5402 "queue awake!\n", dev->name);
5404 return NETDEV_TX_BUSY;
5407 entry = tnapi->tx_prod;
5409 if (skb->ip_summed == CHECKSUM_PARTIAL)
5410 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5412 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5414 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5416 if (skb_header_cloned(skb) &&
5417 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5422 tcp_opt_len = tcp_optlen(skb);
5423 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5425 hdr_len = ip_tcp_len + tcp_opt_len;
5426 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5427 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5428 return (tg3_tso_bug(tp, skb));
5430 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5431 TXD_FLAG_CPU_POST_DMA);
5435 iph->tot_len = htons(mss + hdr_len);
5436 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5437 tcp_hdr(skb)->check = 0;
5438 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5440 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5445 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5446 mss |= hdr_len << 9;
5447 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5448 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5449 if (tcp_opt_len || iph->ihl > 5) {
5452 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5453 mss |= (tsflags << 11);
5456 if (tcp_opt_len || iph->ihl > 5) {
5459 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5460 base_flags |= tsflags << 12;
5464 #if TG3_VLAN_TAG_USED
5465 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5466 base_flags |= (TXD_FLAG_VLAN |
5467 (vlan_tx_tag_get(skb) << 16));
5470 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5475 sp = skb_shinfo(skb);
5477 mapping = sp->dma_head;
5479 tnapi->tx_buffers[entry].skb = skb;
5481 would_hit_hwbug = 0;
5483 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5484 would_hit_hwbug = 1;
5486 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5487 tg3_4g_overflow_test(mapping, len))
5488 would_hit_hwbug = 1;
5490 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5491 tg3_40bit_overflow_test(tp, mapping, len))
5492 would_hit_hwbug = 1;
5494 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5495 would_hit_hwbug = 1;
5497 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5498 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5500 entry = NEXT_TX(entry);
5502 /* Now loop through additional data fragments, and queue them. */
5503 if (skb_shinfo(skb)->nr_frags > 0) {
5504 unsigned int i, last;
5506 last = skb_shinfo(skb)->nr_frags - 1;
5507 for (i = 0; i <= last; i++) {
5508 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5511 mapping = sp->dma_maps[i];
5513 tnapi->tx_buffers[entry].skb = NULL;
5515 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5517 would_hit_hwbug = 1;
5519 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5520 tg3_4g_overflow_test(mapping, len))
5521 would_hit_hwbug = 1;
5523 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5524 tg3_40bit_overflow_test(tp, mapping, len))
5525 would_hit_hwbug = 1;
5527 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5528 tg3_set_txd(tnapi, entry, mapping, len,
5529 base_flags, (i == last)|(mss << 1));
5531 tg3_set_txd(tnapi, entry, mapping, len,
5532 base_flags, (i == last));
5534 entry = NEXT_TX(entry);
5538 if (would_hit_hwbug) {
5539 u32 last_plus_one = entry;
5542 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5543 start &= (TG3_TX_RING_SIZE - 1);
5545 /* If the workaround fails due to memory/mapping
5546 * failure, silently drop this packet.
5548 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5549 &start, base_flags, mss))
5555 /* Packets are ready, update Tx producer idx local and on card. */
5556 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry);
5558 tnapi->tx_prod = entry;
5559 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5560 netif_stop_queue(dev);
5561 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5562 netif_wake_queue(tp->dev);
5568 return NETDEV_TX_OK;
5571 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5576 if (new_mtu > ETH_DATA_LEN) {
5577 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5578 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5579 ethtool_op_set_tso(dev, 0);
5582 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5584 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5585 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5586 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5590 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5592 struct tg3 *tp = netdev_priv(dev);
5595 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5598 if (!netif_running(dev)) {
5599 /* We'll just catch it later when the
5602 tg3_set_mtu(dev, tp, new_mtu);
5610 tg3_full_lock(tp, 1);
5612 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5614 tg3_set_mtu(dev, tp, new_mtu);
5616 err = tg3_restart_hw(tp, 0);
5619 tg3_netif_start(tp);
5621 tg3_full_unlock(tp);
5629 static void tg3_rx_prodring_free(struct tg3 *tp,
5630 struct tg3_rx_prodring_set *tpr)
5633 struct ring_info *rxp;
5635 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5636 rxp = &tpr->rx_std_buffers[i];
5638 if (rxp->skb == NULL)
5641 pci_unmap_single(tp->pdev,
5642 pci_unmap_addr(rxp, mapping),
5644 PCI_DMA_FROMDEVICE);
5645 dev_kfree_skb_any(rxp->skb);
5649 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5650 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5651 rxp = &tpr->rx_jmb_buffers[i];
5653 if (rxp->skb == NULL)
5656 pci_unmap_single(tp->pdev,
5657 pci_unmap_addr(rxp, mapping),
5659 PCI_DMA_FROMDEVICE);
5660 dev_kfree_skb_any(rxp->skb);
5666 /* Initialize tx/rx rings for packet processing.
5668 * The chip has been shut down and the driver detached from
5669 * the networking, so no interrupts or new tx packets will
5670 * end up in the driver. tp->{tx,}lock are held and thus
5673 static int tg3_rx_prodring_alloc(struct tg3 *tp,
5674 struct tg3_rx_prodring_set *tpr)
5676 u32 i, rx_pkt_dma_sz;
5677 struct tg3_napi *tnapi = &tp->napi[0];
5679 /* Zero out all descriptors. */
5680 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
5682 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
5683 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5684 tp->dev->mtu > ETH_DATA_LEN)
5685 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
5686 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
5688 /* Initialize invariants of the rings, we only set this
5689 * stuff once. This works because the card does not
5690 * write into the rx buffer posting rings.
5692 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5693 struct tg3_rx_buffer_desc *rxd;
5695 rxd = &tpr->rx_std[i];
5696 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
5697 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5698 rxd->opaque = (RXD_OPAQUE_RING_STD |
5699 (i << RXD_OPAQUE_INDEX_SHIFT));
5702 /* Now allocate fresh SKBs for each rx ring. */
5703 for (i = 0; i < tp->rx_pending; i++) {
5704 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5705 printk(KERN_WARNING PFX
5706 "%s: Using a smaller RX standard ring, "
5707 "only %d out of %d buffers were allocated "
5709 tp->dev->name, i, tp->rx_pending);
5717 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
5720 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
5722 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5723 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5724 struct tg3_rx_buffer_desc *rxd;
5726 rxd = &tpr->rx_jmb[i].std;
5727 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
5728 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5730 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5731 (i << RXD_OPAQUE_INDEX_SHIFT));
5734 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5735 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO,
5737 printk(KERN_WARNING PFX
5738 "%s: Using a smaller RX jumbo ring, "
5739 "only %d out of %d buffers were "
5740 "allocated successfully.\n",
5741 tp->dev->name, i, tp->rx_jumbo_pending);
5744 tp->rx_jumbo_pending = i;
5754 tg3_rx_prodring_free(tp, tpr);
5758 static void tg3_rx_prodring_fini(struct tg3 *tp,
5759 struct tg3_rx_prodring_set *tpr)
5761 kfree(tpr->rx_std_buffers);
5762 tpr->rx_std_buffers = NULL;
5763 kfree(tpr->rx_jmb_buffers);
5764 tpr->rx_jmb_buffers = NULL;
5766 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5767 tpr->rx_std, tpr->rx_std_mapping);
5771 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5772 tpr->rx_jmb, tpr->rx_jmb_mapping);
5777 static int tg3_rx_prodring_init(struct tg3 *tp,
5778 struct tg3_rx_prodring_set *tpr)
5780 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
5781 TG3_RX_RING_SIZE, GFP_KERNEL);
5782 if (!tpr->rx_std_buffers)
5785 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5786 &tpr->rx_std_mapping);
5790 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5791 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
5792 TG3_RX_JUMBO_RING_SIZE,
5794 if (!tpr->rx_jmb_buffers)
5797 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
5798 TG3_RX_JUMBO_RING_BYTES,
5799 &tpr->rx_jmb_mapping);
5807 tg3_rx_prodring_fini(tp, tpr);
5811 /* Free up pending packets in all rx/tx rings.
5813 * The chip has been shut down and the driver detached from
5814 * the networking, so no interrupts or new tx packets will
5815 * end up in the driver. tp->{tx,}lock is not held and we are not
5816 * in an interrupt context and thus may sleep.
5818 static void tg3_free_rings(struct tg3 *tp)
5822 for (j = 0; j < tp->irq_cnt; j++) {
5823 struct tg3_napi *tnapi = &tp->napi[j];
5825 if (!tnapi->tx_buffers)
5828 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5829 struct tx_ring_info *txp;
5830 struct sk_buff *skb;
5832 txp = &tnapi->tx_buffers[i];
5840 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5844 i += skb_shinfo(skb)->nr_frags + 1;
5846 dev_kfree_skb_any(skb);
5850 tg3_rx_prodring_free(tp, &tp->prodring[0]);
5853 /* Initialize tx/rx rings for packet processing.
5855 * The chip has been shut down and the driver detached from
5856 * the networking, so no interrupts or new tx packets will
5857 * end up in the driver. tp->{tx,}lock are held and thus
5860 static int tg3_init_rings(struct tg3 *tp)
5864 /* Free up all the SKBs. */
5867 for (i = 0; i < tp->irq_cnt; i++) {
5868 struct tg3_napi *tnapi = &tp->napi[i];
5870 tnapi->last_tag = 0;
5871 tnapi->last_irq_tag = 0;
5872 tnapi->hw_status->status = 0;
5873 tnapi->hw_status->status_tag = 0;
5874 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5879 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
5881 tnapi->rx_rcb_ptr = 0;
5883 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5886 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
5890 * Must not be invoked with interrupt sources disabled and
5891 * the hardware shutdown down.
5893 static void tg3_free_consistent(struct tg3 *tp)
5897 for (i = 0; i < tp->irq_cnt; i++) {
5898 struct tg3_napi *tnapi = &tp->napi[i];
5900 if (tnapi->tx_ring) {
5901 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5902 tnapi->tx_ring, tnapi->tx_desc_mapping);
5903 tnapi->tx_ring = NULL;
5906 kfree(tnapi->tx_buffers);
5907 tnapi->tx_buffers = NULL;
5909 if (tnapi->rx_rcb) {
5910 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5912 tnapi->rx_rcb_mapping);
5913 tnapi->rx_rcb = NULL;
5916 if (tnapi->hw_status) {
5917 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5919 tnapi->status_mapping);
5920 tnapi->hw_status = NULL;
5925 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5926 tp->hw_stats, tp->stats_mapping);
5927 tp->hw_stats = NULL;
5930 tg3_rx_prodring_fini(tp, &tp->prodring[0]);
5934 * Must not be invoked with interrupt sources disabled and
5935 * the hardware shutdown down. Can sleep.
5937 static int tg3_alloc_consistent(struct tg3 *tp)
5941 if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
5944 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5945 sizeof(struct tg3_hw_stats),
5946 &tp->stats_mapping);
5950 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5952 for (i = 0; i < tp->irq_cnt; i++) {
5953 struct tg3_napi *tnapi = &tp->napi[i];
5954 struct tg3_hw_status *sblk;
5956 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
5958 &tnapi->status_mapping);
5959 if (!tnapi->hw_status)
5962 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5963 sblk = tnapi->hw_status;
5966 * When RSS is enabled, the status block format changes
5967 * slightly. The "rx_jumbo_consumer", "reserved",
5968 * and "rx_mini_consumer" members get mapped to the
5969 * other three rx return ring producer indexes.
5973 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
5976 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
5979 tnapi->rx_rcb_prod_idx = &sblk->reserved;
5982 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
5987 * If multivector RSS is enabled, vector 0 does not handle
5988 * rx or tx interrupts. Don't allocate any resources for it.
5990 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
5993 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
5994 TG3_RX_RCB_RING_BYTES(tp),
5995 &tnapi->rx_rcb_mapping);
5999 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6001 tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
6002 TG3_TX_RING_SIZE, GFP_KERNEL);
6003 if (!tnapi->tx_buffers)
6006 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6008 &tnapi->tx_desc_mapping);
6009 if (!tnapi->tx_ring)
6016 tg3_free_consistent(tp);
6020 #define MAX_WAIT_CNT 1000
6022 /* To stop a block, clear the enable bit and poll till it
6023 * clears. tp->lock is held.
6025 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6030 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6037 /* We can't enable/disable these bits of the
6038 * 5705/5750, just say success.
6051 for (i = 0; i < MAX_WAIT_CNT; i++) {
6054 if ((val & enable_bit) == 0)
6058 if (i == MAX_WAIT_CNT && !silent) {
6059 printk(KERN_ERR PFX "tg3_stop_block timed out, "
6060 "ofs=%lx enable_bit=%x\n",
6068 /* tp->lock is held. */
6069 static int tg3_abort_hw(struct tg3 *tp, int silent)
6073 tg3_disable_ints(tp);
6075 tp->rx_mode &= ~RX_MODE_ENABLE;
6076 tw32_f(MAC_RX_MODE, tp->rx_mode);
6079 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6080 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6081 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6082 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6083 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6084 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6086 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6087 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6088 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6089 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6090 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6091 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6092 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6094 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6095 tw32_f(MAC_MODE, tp->mac_mode);
6098 tp->tx_mode &= ~TX_MODE_ENABLE;
6099 tw32_f(MAC_TX_MODE, tp->tx_mode);
6101 for (i = 0; i < MAX_WAIT_CNT; i++) {
6103 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6106 if (i >= MAX_WAIT_CNT) {
6107 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
6108 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
6109 tp->dev->name, tr32(MAC_TX_MODE));
6113 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6114 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6115 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6117 tw32(FTQ_RESET, 0xffffffff);
6118 tw32(FTQ_RESET, 0x00000000);
6120 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6121 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6123 for (i = 0; i < tp->irq_cnt; i++) {
6124 struct tg3_napi *tnapi = &tp->napi[i];
6125 if (tnapi->hw_status)
6126 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6129 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6134 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6139 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6140 if (apedata != APE_SEG_SIG_MAGIC)
6143 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6144 if (!(apedata & APE_FW_STATUS_READY))
6147 /* Wait for up to 1 millisecond for APE to service previous event. */
6148 for (i = 0; i < 10; i++) {
6149 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6152 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6154 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6155 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6156 event | APE_EVENT_STATUS_EVENT_PENDING);
6158 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6160 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6166 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6167 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6170 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6175 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6179 case RESET_KIND_INIT:
6180 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6181 APE_HOST_SEG_SIG_MAGIC);
6182 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6183 APE_HOST_SEG_LEN_MAGIC);
6184 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6185 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6186 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6187 APE_HOST_DRIVER_ID_MAGIC);
6188 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6189 APE_HOST_BEHAV_NO_PHYLOCK);
6191 event = APE_EVENT_STATUS_STATE_START;
6193 case RESET_KIND_SHUTDOWN:
6194 /* With the interface we are currently using,
6195 * APE does not track driver state. Wiping
6196 * out the HOST SEGMENT SIGNATURE forces
6197 * the APE to assume OS absent status.
6199 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6201 event = APE_EVENT_STATUS_STATE_UNLOAD;
6203 case RESET_KIND_SUSPEND:
6204 event = APE_EVENT_STATUS_STATE_SUSPEND;
6210 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6212 tg3_ape_send_event(tp, event);
6215 /* tp->lock is held. */
6216 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6218 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6219 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6221 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6223 case RESET_KIND_INIT:
6224 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6228 case RESET_KIND_SHUTDOWN:
6229 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6233 case RESET_KIND_SUSPEND:
6234 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6243 if (kind == RESET_KIND_INIT ||
6244 kind == RESET_KIND_SUSPEND)
6245 tg3_ape_driver_state_change(tp, kind);
6248 /* tp->lock is held. */
6249 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6251 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6253 case RESET_KIND_INIT:
6254 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6255 DRV_STATE_START_DONE);
6258 case RESET_KIND_SHUTDOWN:
6259 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6260 DRV_STATE_UNLOAD_DONE);
6268 if (kind == RESET_KIND_SHUTDOWN)
6269 tg3_ape_driver_state_change(tp, kind);
6272 /* tp->lock is held. */
6273 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6275 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6277 case RESET_KIND_INIT:
6278 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6282 case RESET_KIND_SHUTDOWN:
6283 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6287 case RESET_KIND_SUSPEND:
6288 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6298 static int tg3_poll_fw(struct tg3 *tp)
6303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6304 /* Wait up to 20ms for init done. */
6305 for (i = 0; i < 200; i++) {
6306 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6313 /* Wait for firmware initialization to complete. */
6314 for (i = 0; i < 100000; i++) {
6315 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6316 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6321 /* Chip might not be fitted with firmware. Some Sun onboard
6322 * parts are configured like that. So don't signal the timeout
6323 * of the above loop as an error, but do report the lack of
6324 * running firmware once.
6327 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6328 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6330 printk(KERN_INFO PFX "%s: No firmware running.\n",
6337 /* Save PCI command register before chip reset */
6338 static void tg3_save_pci_state(struct tg3 *tp)
6340 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6343 /* Restore PCI state after chip reset */
6344 static void tg3_restore_pci_state(struct tg3 *tp)
6348 /* Re-enable indirect register accesses. */
6349 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6350 tp->misc_host_ctrl);
6352 /* Set MAX PCI retry to zero. */
6353 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6354 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6355 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6356 val |= PCISTATE_RETRY_SAME_DMA;
6357 /* Allow reads and writes to the APE register and memory space. */
6358 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6359 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6360 PCISTATE_ALLOW_APE_SHMEM_WR;
6361 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6363 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6365 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6366 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6367 pcie_set_readrq(tp->pdev, 4096);
6369 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6370 tp->pci_cacheline_sz);
6371 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6376 /* Make sure PCI-X relaxed ordering bit is clear. */
6377 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6380 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6382 pcix_cmd &= ~PCI_X_CMD_ERO;
6383 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6387 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6389 /* Chip reset on 5780 will reset MSI enable bit,
6390 * so need to restore it.
6392 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6395 pci_read_config_word(tp->pdev,
6396 tp->msi_cap + PCI_MSI_FLAGS,
6398 pci_write_config_word(tp->pdev,
6399 tp->msi_cap + PCI_MSI_FLAGS,
6400 ctrl | PCI_MSI_FLAGS_ENABLE);
6401 val = tr32(MSGINT_MODE);
6402 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6407 static void tg3_stop_fw(struct tg3 *);
6409 /* tp->lock is held. */
6410 static int tg3_chip_reset(struct tg3 *tp)
6413 void (*write_op)(struct tg3 *, u32, u32);
6418 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6420 /* No matching tg3_nvram_unlock() after this because
6421 * chip reset below will undo the nvram lock.
6423 tp->nvram_lock_cnt = 0;
6425 /* GRC_MISC_CFG core clock reset will clear the memory
6426 * enable bit in PCI register 4 and the MSI enable bit
6427 * on some chips, so we save relevant registers here.
6429 tg3_save_pci_state(tp);
6431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6432 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6433 tw32(GRC_FASTBOOT_PC, 0);
6436 * We must avoid the readl() that normally takes place.
6437 * It locks machines, causes machine checks, and other
6438 * fun things. So, temporarily disable the 5701
6439 * hardware workaround, while we do the reset.
6441 write_op = tp->write32;
6442 if (write_op == tg3_write_flush_reg32)
6443 tp->write32 = tg3_write32;
6445 /* Prevent the irq handler from reading or writing PCI registers
6446 * during chip reset when the memory enable bit in the PCI command
6447 * register may be cleared. The chip does not generate interrupt
6448 * at this time, but the irq handler may still be called due to irq
6449 * sharing or irqpoll.
6451 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6452 for (i = 0; i < tp->irq_cnt; i++) {
6453 struct tg3_napi *tnapi = &tp->napi[i];
6454 if (tnapi->hw_status) {
6455 tnapi->hw_status->status = 0;
6456 tnapi->hw_status->status_tag = 0;
6458 tnapi->last_tag = 0;
6459 tnapi->last_irq_tag = 0;
6463 for (i = 0; i < tp->irq_cnt; i++)
6464 synchronize_irq(tp->napi[i].irq_vec);
6466 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6467 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6468 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6472 val = GRC_MISC_CFG_CORECLK_RESET;
6474 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6475 if (tr32(0x7e2c) == 0x60) {
6478 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6479 tw32(GRC_MISC_CFG, (1 << 29));
6484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6485 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6486 tw32(GRC_VCPU_EXT_CTRL,
6487 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6490 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6491 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6492 tw32(GRC_MISC_CFG, val);
6494 /* restore 5701 hardware bug workaround write method */
6495 tp->write32 = write_op;
6497 /* Unfortunately, we have to delay before the PCI read back.
6498 * Some 575X chips even will not respond to a PCI cfg access
6499 * when the reset command is given to the chip.
6501 * How do these hardware designers expect things to work
6502 * properly if the PCI write is posted for a long period
6503 * of time? It is always necessary to have some method by
6504 * which a register read back can occur to push the write
6505 * out which does the reset.
6507 * For most tg3 variants the trick below was working.
6512 /* Flush PCI posted writes. The normal MMIO registers
6513 * are inaccessible at this time so this is the only
6514 * way to make this reliably (actually, this is no longer
6515 * the case, see above). I tried to use indirect
6516 * register read/write but this upset some 5701 variants.
6518 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6522 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6525 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6529 /* Wait for link training to complete. */
6530 for (i = 0; i < 5000; i++)
6533 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6534 pci_write_config_dword(tp->pdev, 0xc4,
6535 cfg_val | (1 << 15));
6538 /* Clear the "no snoop" and "relaxed ordering" bits. */
6539 pci_read_config_word(tp->pdev,
6540 tp->pcie_cap + PCI_EXP_DEVCTL,
6542 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6543 PCI_EXP_DEVCTL_NOSNOOP_EN);
6545 * Older PCIe devices only support the 128 byte
6546 * MPS setting. Enforce the restriction.
6548 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6549 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6550 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6551 pci_write_config_word(tp->pdev,
6552 tp->pcie_cap + PCI_EXP_DEVCTL,
6555 pcie_set_readrq(tp->pdev, 4096);
6557 /* Clear error status */
6558 pci_write_config_word(tp->pdev,
6559 tp->pcie_cap + PCI_EXP_DEVSTA,
6560 PCI_EXP_DEVSTA_CED |
6561 PCI_EXP_DEVSTA_NFED |
6562 PCI_EXP_DEVSTA_FED |
6563 PCI_EXP_DEVSTA_URD);
6566 tg3_restore_pci_state(tp);
6568 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6571 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6572 val = tr32(MEMARB_MODE);
6573 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6575 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6577 tw32(0x5000, 0x400);
6580 tw32(GRC_MODE, tp->grc_mode);
6582 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6585 tw32(0xc4, val | (1 << 15));
6588 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6589 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6590 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6591 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6592 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6593 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6596 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6597 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6598 tw32_f(MAC_MODE, tp->mac_mode);
6599 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6600 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6601 tw32_f(MAC_MODE, tp->mac_mode);
6602 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6603 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6604 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6605 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6606 tw32_f(MAC_MODE, tp->mac_mode);
6608 tw32_f(MAC_MODE, 0);
6611 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6613 err = tg3_poll_fw(tp);
6619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6622 phy_addr = tp->phy_addr;
6623 tp->phy_addr = TG3_PHY_PCIE_ADDR;
6625 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6626 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
6627 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
6628 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
6629 TG3_PCIEPHY_TX0CTRL1_NB_EN;
6630 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
6633 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6634 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
6635 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
6636 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
6637 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
6640 tp->phy_addr = phy_addr;
6643 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6644 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
6645 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
6646 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
6649 tw32(0x7c00, val | (1 << 25));
6652 /* Reprobe ASF enable state. */
6653 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6654 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6655 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6656 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6659 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6660 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6661 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6662 tp->last_event_jiffies = jiffies;
6663 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6664 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6671 /* tp->lock is held. */
6672 static void tg3_stop_fw(struct tg3 *tp)
6674 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6675 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6676 /* Wait for RX cpu to ACK the previous event. */
6677 tg3_wait_for_event_ack(tp);
6679 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6681 tg3_generate_fw_event(tp);
6683 /* Wait for RX cpu to ACK this event. */
6684 tg3_wait_for_event_ack(tp);
6688 /* tp->lock is held. */
6689 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6695 tg3_write_sig_pre_reset(tp, kind);
6697 tg3_abort_hw(tp, silent);
6698 err = tg3_chip_reset(tp);
6700 __tg3_set_mac_addr(tp, 0);
6702 tg3_write_sig_legacy(tp, kind);
6703 tg3_write_sig_post_reset(tp, kind);
6711 #define RX_CPU_SCRATCH_BASE 0x30000
6712 #define RX_CPU_SCRATCH_SIZE 0x04000
6713 #define TX_CPU_SCRATCH_BASE 0x34000
6714 #define TX_CPU_SCRATCH_SIZE 0x04000
6716 /* tp->lock is held. */
6717 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6721 BUG_ON(offset == TX_CPU_BASE &&
6722 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6724 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6725 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6727 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6730 if (offset == RX_CPU_BASE) {
6731 for (i = 0; i < 10000; i++) {
6732 tw32(offset + CPU_STATE, 0xffffffff);
6733 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6734 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6738 tw32(offset + CPU_STATE, 0xffffffff);
6739 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6742 for (i = 0; i < 10000; i++) {
6743 tw32(offset + CPU_STATE, 0xffffffff);
6744 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6745 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6751 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6754 (offset == RX_CPU_BASE ? "RX" : "TX"));
6758 /* Clear firmware's nvram arbitration. */
6759 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6760 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6765 unsigned int fw_base;
6766 unsigned int fw_len;
6767 const __be32 *fw_data;
6770 /* tp->lock is held. */
6771 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6772 int cpu_scratch_size, struct fw_info *info)
6774 int err, lock_err, i;
6775 void (*write_op)(struct tg3 *, u32, u32);
6777 if (cpu_base == TX_CPU_BASE &&
6778 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6779 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6780 "TX cpu firmware on %s which is 5705.\n",
6785 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6786 write_op = tg3_write_mem;
6788 write_op = tg3_write_indirect_reg32;
6790 /* It is possible that bootcode is still loading at this point.
6791 * Get the nvram lock first before halting the cpu.
6793 lock_err = tg3_nvram_lock(tp);
6794 err = tg3_halt_cpu(tp, cpu_base);
6796 tg3_nvram_unlock(tp);
6800 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6801 write_op(tp, cpu_scratch_base + i, 0);
6802 tw32(cpu_base + CPU_STATE, 0xffffffff);
6803 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6804 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
6805 write_op(tp, (cpu_scratch_base +
6806 (info->fw_base & 0xffff) +
6808 be32_to_cpu(info->fw_data[i]));
6816 /* tp->lock is held. */
6817 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6819 struct fw_info info;
6820 const __be32 *fw_data;
6823 fw_data = (void *)tp->fw->data;
6825 /* Firmware blob starts with version numbers, followed by
6826 start address and length. We are setting complete length.
6827 length = end_address_of_bss - start_address_of_text.
6828 Remainder is the blob to be loaded contiguously
6829 from start address. */
6831 info.fw_base = be32_to_cpu(fw_data[1]);
6832 info.fw_len = tp->fw->size - 12;
6833 info.fw_data = &fw_data[3];
6835 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6836 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6841 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6842 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6847 /* Now startup only the RX cpu. */
6848 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6849 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6851 for (i = 0; i < 5; i++) {
6852 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
6854 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6855 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6856 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6860 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6861 "to set RX CPU PC, is %08x should be %08x\n",
6862 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6866 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6867 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6872 /* 5705 needs a special version of the TSO firmware. */
6874 /* tp->lock is held. */
6875 static int tg3_load_tso_firmware(struct tg3 *tp)
6877 struct fw_info info;
6878 const __be32 *fw_data;
6879 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6882 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6885 fw_data = (void *)tp->fw->data;
6887 /* Firmware blob starts with version numbers, followed by
6888 start address and length. We are setting complete length.
6889 length = end_address_of_bss - start_address_of_text.
6890 Remainder is the blob to be loaded contiguously
6891 from start address. */
6893 info.fw_base = be32_to_cpu(fw_data[1]);
6894 cpu_scratch_size = tp->fw_len;
6895 info.fw_len = tp->fw->size - 12;
6896 info.fw_data = &fw_data[3];
6898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6899 cpu_base = RX_CPU_BASE;
6900 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6902 cpu_base = TX_CPU_BASE;
6903 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6904 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6907 err = tg3_load_firmware_cpu(tp, cpu_base,
6908 cpu_scratch_base, cpu_scratch_size,
6913 /* Now startup the cpu. */
6914 tw32(cpu_base + CPU_STATE, 0xffffffff);
6915 tw32_f(cpu_base + CPU_PC, info.fw_base);
6917 for (i = 0; i < 5; i++) {
6918 if (tr32(cpu_base + CPU_PC) == info.fw_base)
6920 tw32(cpu_base + CPU_STATE, 0xffffffff);
6921 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6922 tw32_f(cpu_base + CPU_PC, info.fw_base);
6926 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6927 "to set CPU PC, is %08x should be %08x\n",
6928 tp->dev->name, tr32(cpu_base + CPU_PC),
6932 tw32(cpu_base + CPU_STATE, 0xffffffff);
6933 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6938 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6940 struct tg3 *tp = netdev_priv(dev);
6941 struct sockaddr *addr = p;
6942 int err = 0, skip_mac_1 = 0;
6944 if (!is_valid_ether_addr(addr->sa_data))
6947 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6949 if (!netif_running(dev))
6952 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6953 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6955 addr0_high = tr32(MAC_ADDR_0_HIGH);
6956 addr0_low = tr32(MAC_ADDR_0_LOW);
6957 addr1_high = tr32(MAC_ADDR_1_HIGH);
6958 addr1_low = tr32(MAC_ADDR_1_LOW);
6960 /* Skip MAC addr 1 if ASF is using it. */
6961 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6962 !(addr1_high == 0 && addr1_low == 0))
6965 spin_lock_bh(&tp->lock);
6966 __tg3_set_mac_addr(tp, skip_mac_1);
6967 spin_unlock_bh(&tp->lock);
6972 /* tp->lock is held. */
6973 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6974 dma_addr_t mapping, u32 maxlen_flags,
6978 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6979 ((u64) mapping >> 32));
6981 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6982 ((u64) mapping & 0xffffffff));
6984 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6987 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6989 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6993 static void __tg3_set_rx_mode(struct net_device *);
6994 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6998 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
6999 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7000 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7001 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7003 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7004 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7005 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7007 tw32(HOSTCC_TXCOL_TICKS, 0);
7008 tw32(HOSTCC_TXMAX_FRAMES, 0);
7009 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7011 tw32(HOSTCC_RXCOL_TICKS, 0);
7012 tw32(HOSTCC_RXMAX_FRAMES, 0);
7013 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7016 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7017 u32 val = ec->stats_block_coalesce_usecs;
7019 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7020 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7022 if (!netif_carrier_ok(tp->dev))
7025 tw32(HOSTCC_STAT_COAL_TICKS, val);
7028 for (i = 0; i < tp->irq_cnt - 1; i++) {
7031 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7032 tw32(reg, ec->rx_coalesce_usecs);
7033 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7034 tw32(reg, ec->tx_coalesce_usecs);
7035 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7036 tw32(reg, ec->rx_max_coalesced_frames);
7037 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7038 tw32(reg, ec->tx_max_coalesced_frames);
7039 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7040 tw32(reg, ec->rx_max_coalesced_frames_irq);
7041 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7042 tw32(reg, ec->tx_max_coalesced_frames_irq);
7045 for (; i < tp->irq_max - 1; i++) {
7046 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7047 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7048 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7049 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7050 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7051 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7055 /* tp->lock is held. */
7056 static void tg3_rings_reset(struct tg3 *tp)
7059 u32 stblk, txrcb, rxrcb, limit;
7060 struct tg3_napi *tnapi = &tp->napi[0];
7062 /* Disable all transmit rings but the first. */
7063 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7064 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7066 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7068 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7069 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7070 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7071 BDINFO_FLAGS_DISABLED);
7074 /* Disable all receive return rings but the first. */
7075 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7076 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7077 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7078 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7079 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7080 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7082 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7084 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7085 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7086 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7087 BDINFO_FLAGS_DISABLED);
7089 /* Disable interrupts */
7090 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7092 /* Zero mailbox registers. */
7093 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7094 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7095 tp->napi[i].tx_prod = 0;
7096 tp->napi[i].tx_cons = 0;
7097 tw32_mailbox(tp->napi[i].prodmbox, 0);
7098 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7099 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7102 tp->napi[0].tx_prod = 0;
7103 tp->napi[0].tx_cons = 0;
7104 tw32_mailbox(tp->napi[0].prodmbox, 0);
7105 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7108 /* Make sure the NIC-based send BD rings are disabled. */
7109 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7110 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7111 for (i = 0; i < 16; i++)
7112 tw32_tx_mbox(mbox + i * 8, 0);
7115 txrcb = NIC_SRAM_SEND_RCB;
7116 rxrcb = NIC_SRAM_RCV_RET_RCB;
7118 /* Clear status block in ram. */
7119 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7121 /* Set status block DMA address */
7122 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7123 ((u64) tnapi->status_mapping >> 32));
7124 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7125 ((u64) tnapi->status_mapping & 0xffffffff));
7127 if (tnapi->tx_ring) {
7128 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7129 (TG3_TX_RING_SIZE <<
7130 BDINFO_FLAGS_MAXLEN_SHIFT),
7131 NIC_SRAM_TX_BUFFER_DESC);
7132 txrcb += TG3_BDINFO_SIZE;
7135 if (tnapi->rx_rcb) {
7136 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7137 (TG3_RX_RCB_RING_SIZE(tp) <<
7138 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7139 rxrcb += TG3_BDINFO_SIZE;
7142 stblk = HOSTCC_STATBLCK_RING1;
7144 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7145 u64 mapping = (u64)tnapi->status_mapping;
7146 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7147 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7149 /* Clear status block in ram. */
7150 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7152 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7153 (TG3_TX_RING_SIZE <<
7154 BDINFO_FLAGS_MAXLEN_SHIFT),
7155 NIC_SRAM_TX_BUFFER_DESC);
7157 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7158 (TG3_RX_RCB_RING_SIZE(tp) <<
7159 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7162 txrcb += TG3_BDINFO_SIZE;
7163 rxrcb += TG3_BDINFO_SIZE;
7167 /* tp->lock is held. */
7168 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7170 u32 val, rdmac_mode;
7172 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
7174 tg3_disable_ints(tp);
7178 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7180 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7181 tg3_abort_hw(tp, 1);
7185 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7188 err = tg3_chip_reset(tp);
7192 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7194 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7195 val = tr32(TG3_CPMU_CTRL);
7196 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7197 tw32(TG3_CPMU_CTRL, val);
7199 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7200 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7201 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7202 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7204 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7205 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7206 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7207 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7209 val = tr32(TG3_CPMU_HST_ACC);
7210 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7211 val |= CPMU_HST_ACC_MACCLK_6_25;
7212 tw32(TG3_CPMU_HST_ACC, val);
7215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7216 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7217 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7218 PCIE_PWR_MGMT_L1_THRESH_4MS;
7219 tw32(PCIE_PWR_MGMT_THRESH, val);
7221 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7222 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7224 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7226 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7227 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7230 /* This works around an issue with Athlon chipsets on
7231 * B3 tigon3 silicon. This bit has no effect on any
7232 * other revision. But do not set this on PCI Express
7233 * chips and don't even touch the clocks if the CPMU is present.
7235 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7236 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7237 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7238 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7241 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7242 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7243 val = tr32(TG3PCI_PCISTATE);
7244 val |= PCISTATE_RETRY_SAME_DMA;
7245 tw32(TG3PCI_PCISTATE, val);
7248 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7249 /* Allow reads and writes to the
7250 * APE register and memory space.
7252 val = tr32(TG3PCI_PCISTATE);
7253 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7254 PCISTATE_ALLOW_APE_SHMEM_WR;
7255 tw32(TG3PCI_PCISTATE, val);
7258 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7259 /* Enable some hw fixes. */
7260 val = tr32(TG3PCI_MSI_DATA);
7261 val |= (1 << 26) | (1 << 28) | (1 << 29);
7262 tw32(TG3PCI_MSI_DATA, val);
7265 /* Descriptor ring init may make accesses to the
7266 * NIC SRAM area to setup the TX descriptors, so we
7267 * can only do this after the hardware has been
7268 * successfully reset.
7270 err = tg3_init_rings(tp);
7274 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7275 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
7276 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
7277 /* This value is determined during the probe time DMA
7278 * engine test, tg3_test_dma.
7280 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7283 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7284 GRC_MODE_4X_NIC_SEND_RINGS |
7285 GRC_MODE_NO_TX_PHDR_CSUM |
7286 GRC_MODE_NO_RX_PHDR_CSUM);
7287 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7289 /* Pseudo-header checksum is done by hardware logic and not
7290 * the offload processers, so make the chip do the pseudo-
7291 * header checksums on receive. For transmit it is more
7292 * convenient to do the pseudo-header checksum in software
7293 * as Linux does that on transmit for us in all cases.
7295 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7299 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7301 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7302 val = tr32(GRC_MISC_CFG);
7304 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7305 tw32(GRC_MISC_CFG, val);
7307 /* Initialize MBUF/DESC pool. */
7308 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7310 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7311 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7312 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7313 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7315 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7316 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7317 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7319 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7322 fw_len = tp->fw_len;
7323 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7324 tw32(BUFMGR_MB_POOL_ADDR,
7325 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7326 tw32(BUFMGR_MB_POOL_SIZE,
7327 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7330 if (tp->dev->mtu <= ETH_DATA_LEN) {
7331 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7332 tp->bufmgr_config.mbuf_read_dma_low_water);
7333 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7334 tp->bufmgr_config.mbuf_mac_rx_low_water);
7335 tw32(BUFMGR_MB_HIGH_WATER,
7336 tp->bufmgr_config.mbuf_high_water);
7338 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7339 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7340 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7341 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7342 tw32(BUFMGR_MB_HIGH_WATER,
7343 tp->bufmgr_config.mbuf_high_water_jumbo);
7345 tw32(BUFMGR_DMA_LOW_WATER,
7346 tp->bufmgr_config.dma_low_water);
7347 tw32(BUFMGR_DMA_HIGH_WATER,
7348 tp->bufmgr_config.dma_high_water);
7350 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7351 for (i = 0; i < 2000; i++) {
7352 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7357 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7362 /* Setup replenish threshold. */
7363 val = tp->rx_pending / 8;
7366 else if (val > tp->rx_std_max_post)
7367 val = tp->rx_std_max_post;
7368 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7369 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7370 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7372 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7373 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7376 tw32(RCVBDI_STD_THRESH, val);
7378 /* Initialize TG3_BDINFO's at:
7379 * RCVDBDI_STD_BD: standard eth size rx ring
7380 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7381 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7384 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7385 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7386 * ring attribute flags
7387 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7389 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7390 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7392 * The size of each ring is fixed in the firmware, but the location is
7395 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7396 ((u64) tpr->rx_std_mapping >> 32));
7397 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7398 ((u64) tpr->rx_std_mapping & 0xffffffff));
7399 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7400 NIC_SRAM_RX_BUFFER_DESC);
7402 /* Disable the mini ring */
7403 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7404 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7405 BDINFO_FLAGS_DISABLED);
7407 /* Program the jumbo buffer descriptor ring control
7408 * blocks on those devices that have them.
7410 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7411 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7412 /* Setup replenish threshold. */
7413 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7415 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7416 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7417 ((u64) tpr->rx_jmb_mapping >> 32));
7418 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7419 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7420 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7421 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7422 BDINFO_FLAGS_USE_EXT_RECV);
7423 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7424 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7426 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7427 BDINFO_FLAGS_DISABLED);
7430 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
7431 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7432 (RX_STD_MAX_SIZE << 2);
7434 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7436 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7438 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7440 tpr->rx_std_ptr = tp->rx_pending;
7441 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7444 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7445 tp->rx_jumbo_pending : 0;
7446 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7449 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7450 tw32(STD_REPLENISH_LWM, 32);
7451 tw32(JMB_REPLENISH_LWM, 16);
7454 tg3_rings_reset(tp);
7456 /* Initialize MAC address and backoff seed. */
7457 __tg3_set_mac_addr(tp, 0);
7459 /* MTU + ethernet header + FCS + optional VLAN tag */
7460 tw32(MAC_RX_MTU_SIZE,
7461 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7463 /* The slot time is changed by tg3_setup_phy if we
7464 * run at gigabit with half duplex.
7466 tw32(MAC_TX_LENGTHS,
7467 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7468 (6 << TX_LENGTHS_IPG_SHIFT) |
7469 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7471 /* Receive rules. */
7472 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7473 tw32(RCVLPC_CONFIG, 0x0181);
7475 /* Calculate RDMAC_MODE setting early, we need it to determine
7476 * the RCVLPC_STATE_ENABLE mask.
7478 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7479 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7480 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7481 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7482 RDMAC_MODE_LNGREAD_ENAB);
7484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7485 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7486 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7487 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7488 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7489 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7491 /* If statement applies to 5705 and 5750 PCI devices only */
7492 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7493 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7494 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7495 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7497 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7498 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7499 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7500 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7504 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7505 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7507 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7508 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7510 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7511 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7512 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7514 /* Receive/send statistics. */
7515 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7516 val = tr32(RCVLPC_STATS_ENABLE);
7517 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7518 tw32(RCVLPC_STATS_ENABLE, val);
7519 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7520 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7521 val = tr32(RCVLPC_STATS_ENABLE);
7522 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7523 tw32(RCVLPC_STATS_ENABLE, val);
7525 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7527 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7528 tw32(SNDDATAI_STATSENAB, 0xffffff);
7529 tw32(SNDDATAI_STATSCTRL,
7530 (SNDDATAI_SCTRL_ENABLE |
7531 SNDDATAI_SCTRL_FASTUPD));
7533 /* Setup host coalescing engine. */
7534 tw32(HOSTCC_MODE, 0);
7535 for (i = 0; i < 2000; i++) {
7536 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7541 __tg3_set_coalesce(tp, &tp->coal);
7543 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7544 /* Status/statistics block address. See tg3_timer,
7545 * the tg3_periodic_fetch_stats call there, and
7546 * tg3_get_stats to see how this works for 5705/5750 chips.
7548 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7549 ((u64) tp->stats_mapping >> 32));
7550 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7551 ((u64) tp->stats_mapping & 0xffffffff));
7552 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7554 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7556 /* Clear statistics and status block memory areas */
7557 for (i = NIC_SRAM_STATS_BLK;
7558 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7560 tg3_write_mem(tp, i, 0);
7565 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7567 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7568 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7569 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7570 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7572 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7573 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7574 /* reset to prevent losing 1st rx packet intermittently */
7575 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7579 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7580 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7583 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7584 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7585 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7586 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7587 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7588 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7589 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7592 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7593 * If TG3_FLG2_IS_NIC is zero, we should read the
7594 * register to preserve the GPIO settings for LOMs. The GPIOs,
7595 * whether used as inputs or outputs, are set by boot code after
7598 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7601 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7602 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7603 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7605 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7606 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7607 GRC_LCLCTRL_GPIO_OUTPUT3;
7609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7610 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7612 tp->grc_local_ctrl &= ~gpio_mask;
7613 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7615 /* GPIO1 must be driven high for eeprom write protect */
7616 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7617 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7618 GRC_LCLCTRL_GPIO_OUTPUT1);
7620 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7623 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
7624 val = tr32(MSGINT_MODE);
7625 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
7626 tw32(MSGINT_MODE, val);
7629 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7630 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7634 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7635 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7636 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7637 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7638 WDMAC_MODE_LNGREAD_ENAB);
7640 /* If statement applies to 5705 and 5750 PCI devices only */
7641 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7642 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7643 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7644 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
7645 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7646 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7648 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7649 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7650 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7651 val |= WDMAC_MODE_RX_ACCEL;
7655 /* Enable host coalescing bug fix */
7656 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7657 val |= WDMAC_MODE_STATUS_TAG_FIX;
7659 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7660 val |= WDMAC_MODE_BURST_ALL_DATA;
7662 tw32_f(WDMAC_MODE, val);
7665 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7668 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7671 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7672 pcix_cmd |= PCI_X_CMD_READ_2K;
7673 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7674 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7675 pcix_cmd |= PCI_X_CMD_READ_2K;
7677 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7681 tw32_f(RDMAC_MODE, rdmac_mode);
7684 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7685 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7686 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7688 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7690 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7692 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7694 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7695 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7696 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7697 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7698 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7699 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7700 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
7701 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
7702 val |= SNDBDI_MODE_MULTI_TXQ_EN;
7703 tw32(SNDBDI_MODE, val);
7704 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7706 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7707 err = tg3_load_5701_a0_firmware_fix(tp);
7712 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7713 err = tg3_load_tso_firmware(tp);
7718 tp->tx_mode = TX_MODE_ENABLE;
7719 tw32_f(MAC_TX_MODE, tp->tx_mode);
7722 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
7723 u32 reg = MAC_RSS_INDIR_TBL_0;
7724 u8 *ent = (u8 *)&val;
7726 /* Setup the indirection table */
7727 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
7728 int idx = i % sizeof(val);
7730 ent[idx] = i % (tp->irq_cnt - 1);
7731 if (idx == sizeof(val) - 1) {
7737 /* Setup the "secret" hash key. */
7738 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
7739 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
7740 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
7741 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
7742 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
7743 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
7744 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
7745 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
7746 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
7747 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
7750 tp->rx_mode = RX_MODE_ENABLE;
7751 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7752 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7754 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
7755 tp->rx_mode |= RX_MODE_RSS_ENABLE |
7756 RX_MODE_RSS_ITBL_HASH_BITS_7 |
7757 RX_MODE_RSS_IPV6_HASH_EN |
7758 RX_MODE_RSS_TCP_IPV6_HASH_EN |
7759 RX_MODE_RSS_IPV4_HASH_EN |
7760 RX_MODE_RSS_TCP_IPV4_HASH_EN;
7762 tw32_f(MAC_RX_MODE, tp->rx_mode);
7765 tw32(MAC_LED_CTRL, tp->led_ctrl);
7767 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7768 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7769 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7772 tw32_f(MAC_RX_MODE, tp->rx_mode);
7775 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7776 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7777 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7778 /* Set drive transmission level to 1.2V */
7779 /* only if the signal pre-emphasis bit is not set */
7780 val = tr32(MAC_SERDES_CFG);
7783 tw32(MAC_SERDES_CFG, val);
7785 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7786 tw32(MAC_SERDES_CFG, 0x616000);
7789 /* Prevent chip from dropping frames when flow control
7792 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7794 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7795 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7796 /* Use hardware link auto-negotiation */
7797 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7800 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7801 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7804 tmp = tr32(SERDES_RX_CTRL);
7805 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7806 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7807 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7808 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7811 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7812 if (tp->link_config.phy_is_low_power) {
7813 tp->link_config.phy_is_low_power = 0;
7814 tp->link_config.speed = tp->link_config.orig_speed;
7815 tp->link_config.duplex = tp->link_config.orig_duplex;
7816 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7819 err = tg3_setup_phy(tp, 0);
7823 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7824 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
7827 /* Clear CRC stats. */
7828 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7829 tg3_writephy(tp, MII_TG3_TEST1,
7830 tmp | MII_TG3_TEST1_CRC_EN);
7831 tg3_readphy(tp, 0x14, &tmp);
7836 __tg3_set_rx_mode(tp->dev);
7838 /* Initialize receive rules. */
7839 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7840 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7841 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7842 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7844 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7845 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7849 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7853 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7855 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7857 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7859 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7861 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7863 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7865 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7867 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7869 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7871 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7873 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7875 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7877 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7879 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7887 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7888 /* Write our heartbeat update interval to APE. */
7889 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7890 APE_HOST_HEARTBEAT_INT_DISABLE);
7892 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7897 /* Called at device open time to get the chip ready for
7898 * packet processing. Invoked with tp->lock held.
7900 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7902 tg3_switch_clocks(tp);
7904 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7906 return tg3_reset_hw(tp, reset_phy);
7909 #define TG3_STAT_ADD32(PSTAT, REG) \
7910 do { u32 __val = tr32(REG); \
7911 (PSTAT)->low += __val; \
7912 if ((PSTAT)->low < __val) \
7913 (PSTAT)->high += 1; \
7916 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7918 struct tg3_hw_stats *sp = tp->hw_stats;
7920 if (!netif_carrier_ok(tp->dev))
7923 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7924 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7925 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7926 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7927 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7928 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7929 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7930 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7931 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7932 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7933 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7934 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7935 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7937 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7938 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7939 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7940 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7941 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7942 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7943 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7944 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7945 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7946 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7947 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7948 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7949 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7950 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7952 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7953 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7954 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7957 static void tg3_timer(unsigned long __opaque)
7959 struct tg3 *tp = (struct tg3 *) __opaque;
7964 spin_lock(&tp->lock);
7966 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7967 /* All of this garbage is because when using non-tagged
7968 * IRQ status the mailbox/status_block protocol the chip
7969 * uses with the cpu is race prone.
7971 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
7972 tw32(GRC_LOCAL_CTRL,
7973 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7975 tw32(HOSTCC_MODE, tp->coalesce_mode |
7976 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
7979 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7980 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7981 spin_unlock(&tp->lock);
7982 schedule_work(&tp->reset_task);
7987 /* This part only runs once per second. */
7988 if (!--tp->timer_counter) {
7989 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7990 tg3_periodic_fetch_stats(tp);
7992 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7996 mac_stat = tr32(MAC_STATUS);
7999 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
8000 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8002 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8006 tg3_setup_phy(tp, 0);
8007 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8008 u32 mac_stat = tr32(MAC_STATUS);
8011 if (netif_carrier_ok(tp->dev) &&
8012 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8015 if (! netif_carrier_ok(tp->dev) &&
8016 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8017 MAC_STATUS_SIGNAL_DET))) {
8021 if (!tp->serdes_counter) {
8024 ~MAC_MODE_PORT_MODE_MASK));
8026 tw32_f(MAC_MODE, tp->mac_mode);
8029 tg3_setup_phy(tp, 0);
8031 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8032 tg3_serdes_parallel_detect(tp);
8034 tp->timer_counter = tp->timer_multiplier;
8037 /* Heartbeat is only sent once every 2 seconds.
8039 * The heartbeat is to tell the ASF firmware that the host
8040 * driver is still alive. In the event that the OS crashes,
8041 * ASF needs to reset the hardware to free up the FIFO space
8042 * that may be filled with rx packets destined for the host.
8043 * If the FIFO is full, ASF will no longer function properly.
8045 * Unintended resets have been reported on real time kernels
8046 * where the timer doesn't run on time. Netpoll will also have
8049 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8050 * to check the ring condition when the heartbeat is expiring
8051 * before doing the reset. This will prevent most unintended
8054 if (!--tp->asf_counter) {
8055 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8056 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8057 tg3_wait_for_event_ack(tp);
8059 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8060 FWCMD_NICDRV_ALIVE3);
8061 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8062 /* 5 seconds timeout */
8063 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
8065 tg3_generate_fw_event(tp);
8067 tp->asf_counter = tp->asf_multiplier;
8070 spin_unlock(&tp->lock);
8073 tp->timer.expires = jiffies + tp->timer_offset;
8074 add_timer(&tp->timer);
8077 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8080 unsigned long flags;
8082 struct tg3_napi *tnapi = &tp->napi[irq_num];
8084 if (tp->irq_cnt == 1)
8085 name = tp->dev->name;
8087 name = &tnapi->irq_lbl[0];
8088 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8089 name[IFNAMSIZ-1] = 0;
8092 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8094 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8096 flags = IRQF_SAMPLE_RANDOM;
8099 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8100 fn = tg3_interrupt_tagged;
8101 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8104 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8107 static int tg3_test_interrupt(struct tg3 *tp)
8109 struct tg3_napi *tnapi = &tp->napi[0];
8110 struct net_device *dev = tp->dev;
8111 int err, i, intr_ok = 0;
8114 if (!netif_running(dev))
8117 tg3_disable_ints(tp);
8119 free_irq(tnapi->irq_vec, tnapi);
8122 * Turn off MSI one shot mode. Otherwise this test has no
8123 * observable way to know whether the interrupt was delivered.
8125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
8126 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8127 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8128 tw32(MSGINT_MODE, val);
8131 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8132 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8136 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8137 tg3_enable_ints(tp);
8139 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8142 for (i = 0; i < 5; i++) {
8143 u32 int_mbox, misc_host_ctrl;
8145 int_mbox = tr32_mailbox(tnapi->int_mbox);
8146 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8148 if ((int_mbox != 0) ||
8149 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8157 tg3_disable_ints(tp);
8159 free_irq(tnapi->irq_vec, tnapi);
8161 err = tg3_request_irq(tp, 0);
8167 /* Reenable MSI one shot mode. */
8168 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
8169 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8170 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8171 tw32(MSGINT_MODE, val);
8179 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8180 * successfully restored
8182 static int tg3_test_msi(struct tg3 *tp)
8187 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8190 /* Turn off SERR reporting in case MSI terminates with Master
8193 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8194 pci_write_config_word(tp->pdev, PCI_COMMAND,
8195 pci_cmd & ~PCI_COMMAND_SERR);
8197 err = tg3_test_interrupt(tp);
8199 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8204 /* other failures */
8208 /* MSI test failed, go back to INTx mode */
8209 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8210 "switching to INTx mode. Please report this failure to "
8211 "the PCI maintainer and include system chipset information.\n",
8214 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8216 pci_disable_msi(tp->pdev);
8218 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8220 err = tg3_request_irq(tp, 0);
8224 /* Need to reset the chip because the MSI cycle may have terminated
8225 * with Master Abort.
8227 tg3_full_lock(tp, 1);
8229 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8230 err = tg3_init_hw(tp, 1);
8232 tg3_full_unlock(tp);
8235 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8240 static int tg3_request_firmware(struct tg3 *tp)
8242 const __be32 *fw_data;
8244 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8245 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
8246 tp->dev->name, tp->fw_needed);
8250 fw_data = (void *)tp->fw->data;
8252 /* Firmware blob starts with version numbers, followed by
8253 * start address and _full_ length including BSS sections
8254 * (which must be longer than the actual data, of course
8257 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8258 if (tp->fw_len < (tp->fw->size - 12)) {
8259 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
8260 tp->dev->name, tp->fw_len, tp->fw_needed);
8261 release_firmware(tp->fw);
8266 /* We no longer need firmware; we have it. */
8267 tp->fw_needed = NULL;
8271 static bool tg3_enable_msix(struct tg3 *tp)
8273 int i, rc, cpus = num_online_cpus();
8274 struct msix_entry msix_ent[tp->irq_max];
8277 /* Just fallback to the simpler MSI mode. */
8281 * We want as many rx rings enabled as there are cpus.
8282 * The first MSIX vector only deals with link interrupts, etc,
8283 * so we add one to the number of vectors we are requesting.
8285 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8287 for (i = 0; i < tp->irq_max; i++) {
8288 msix_ent[i].entry = i;
8289 msix_ent[i].vector = 0;
8292 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8294 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
8296 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8299 "%s: Requested %d MSI-X vectors, received %d\n",
8300 tp->dev->name, tp->irq_cnt, rc);
8304 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8306 for (i = 0; i < tp->irq_max; i++)
8307 tp->napi[i].irq_vec = msix_ent[i].vector;
8309 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8314 static void tg3_ints_init(struct tg3 *tp)
8316 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8317 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8318 /* All MSI supporting chips should support tagged
8319 * status. Assert that this is the case.
8321 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8322 "Not using MSI.\n", tp->dev->name);
8326 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8327 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8328 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8329 pci_enable_msi(tp->pdev) == 0)
8330 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8332 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8333 u32 msi_mode = tr32(MSGINT_MODE);
8334 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8335 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8336 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8339 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8341 tp->napi[0].irq_vec = tp->pdev->irq;
8342 tp->dev->real_num_tx_queues = 1;
8346 static void tg3_ints_fini(struct tg3 *tp)
8348 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8349 pci_disable_msix(tp->pdev);
8350 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8351 pci_disable_msi(tp->pdev);
8352 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8353 tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
8356 static int tg3_open(struct net_device *dev)
8358 struct tg3 *tp = netdev_priv(dev);
8361 if (tp->fw_needed) {
8362 err = tg3_request_firmware(tp);
8363 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8367 printk(KERN_WARNING "%s: TSO capability disabled.\n",
8369 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8370 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8371 printk(KERN_NOTICE "%s: TSO capability restored.\n",
8373 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8377 netif_carrier_off(tp->dev);
8379 err = tg3_set_power_state(tp, PCI_D0);
8383 tg3_full_lock(tp, 0);
8385 tg3_disable_ints(tp);
8386 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8388 tg3_full_unlock(tp);
8391 * Setup interrupts first so we know how
8392 * many NAPI resources to allocate
8396 /* The placement of this call is tied
8397 * to the setup and use of Host TX descriptors.
8399 err = tg3_alloc_consistent(tp);
8403 tg3_napi_enable(tp);
8405 for (i = 0; i < tp->irq_cnt; i++) {
8406 struct tg3_napi *tnapi = &tp->napi[i];
8407 err = tg3_request_irq(tp, i);
8409 for (i--; i >= 0; i--)
8410 free_irq(tnapi->irq_vec, tnapi);
8418 tg3_full_lock(tp, 0);
8420 err = tg3_init_hw(tp, 1);
8422 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8425 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8426 tp->timer_offset = HZ;
8428 tp->timer_offset = HZ / 10;
8430 BUG_ON(tp->timer_offset > HZ);
8431 tp->timer_counter = tp->timer_multiplier =
8432 (HZ / tp->timer_offset);
8433 tp->asf_counter = tp->asf_multiplier =
8434 ((HZ / tp->timer_offset) * 2);
8436 init_timer(&tp->timer);
8437 tp->timer.expires = jiffies + tp->timer_offset;
8438 tp->timer.data = (unsigned long) tp;
8439 tp->timer.function = tg3_timer;
8442 tg3_full_unlock(tp);
8447 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8448 err = tg3_test_msi(tp);
8451 tg3_full_lock(tp, 0);
8452 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8454 tg3_full_unlock(tp);
8459 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8460 (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
8461 (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
8462 u32 val = tr32(PCIE_TRANSACTION_CFG);
8464 tw32(PCIE_TRANSACTION_CFG,
8465 val | PCIE_TRANS_CFG_1SHOT_MSI);
8471 tg3_full_lock(tp, 0);
8473 add_timer(&tp->timer);
8474 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8475 tg3_enable_ints(tp);
8477 tg3_full_unlock(tp);
8479 netif_tx_start_all_queues(dev);
8484 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8485 struct tg3_napi *tnapi = &tp->napi[i];
8486 free_irq(tnapi->irq_vec, tnapi);
8490 tg3_napi_disable(tp);
8491 tg3_free_consistent(tp);
8499 /*static*/ void tg3_dump_state(struct tg3 *tp)
8501 u32 val32, val32_2, val32_3, val32_4, val32_5;
8504 struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
8506 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8507 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8508 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8512 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8513 tr32(MAC_MODE), tr32(MAC_STATUS));
8514 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8515 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8516 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8517 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8518 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8519 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8521 /* Send data initiator control block */
8522 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8523 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8524 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8525 tr32(SNDDATAI_STATSCTRL));
8527 /* Send data completion control block */
8528 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8530 /* Send BD ring selector block */
8531 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8532 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8534 /* Send BD initiator control block */
8535 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8536 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8538 /* Send BD completion control block */
8539 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8541 /* Receive list placement control block */
8542 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8543 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8544 printk(" RCVLPC_STATSCTRL[%08x]\n",
8545 tr32(RCVLPC_STATSCTRL));
8547 /* Receive data and receive BD initiator control block */
8548 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8549 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8551 /* Receive data completion control block */
8552 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8555 /* Receive BD initiator control block */
8556 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8557 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8559 /* Receive BD completion control block */
8560 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8561 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8563 /* Receive list selector control block */
8564 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8565 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8567 /* Mbuf cluster free block */
8568 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8569 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8571 /* Host coalescing control block */
8572 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8573 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8574 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8575 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8576 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8577 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8578 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8579 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8580 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8581 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8582 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8583 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8585 /* Memory arbiter control block */
8586 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8587 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8589 /* Buffer manager control block */
8590 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8591 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8592 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8593 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8594 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8595 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8596 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8597 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8599 /* Read DMA control block */
8600 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8601 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8603 /* Write DMA control block */
8604 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8605 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8607 /* DMA completion block */
8608 printk("DEBUG: DMAC_MODE[%08x]\n",
8612 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8613 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8614 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8615 tr32(GRC_LOCAL_CTRL));
8618 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8619 tr32(RCVDBDI_JUMBO_BD + 0x0),
8620 tr32(RCVDBDI_JUMBO_BD + 0x4),
8621 tr32(RCVDBDI_JUMBO_BD + 0x8),
8622 tr32(RCVDBDI_JUMBO_BD + 0xc));
8623 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8624 tr32(RCVDBDI_STD_BD + 0x0),
8625 tr32(RCVDBDI_STD_BD + 0x4),
8626 tr32(RCVDBDI_STD_BD + 0x8),
8627 tr32(RCVDBDI_STD_BD + 0xc));
8628 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8629 tr32(RCVDBDI_MINI_BD + 0x0),
8630 tr32(RCVDBDI_MINI_BD + 0x4),
8631 tr32(RCVDBDI_MINI_BD + 0x8),
8632 tr32(RCVDBDI_MINI_BD + 0xc));
8634 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8635 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8636 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8637 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8638 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8639 val32, val32_2, val32_3, val32_4);
8641 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8642 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8643 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8644 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8645 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8646 val32, val32_2, val32_3, val32_4);
8648 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8649 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8650 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8651 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8652 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8653 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8654 val32, val32_2, val32_3, val32_4, val32_5);
8656 /* SW status block */
8658 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8661 sblk->rx_jumbo_consumer,
8663 sblk->rx_mini_consumer,
8664 sblk->idx[0].rx_producer,
8665 sblk->idx[0].tx_consumer);
8667 /* SW statistics block */
8668 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8669 ((u32 *)tp->hw_stats)[0],
8670 ((u32 *)tp->hw_stats)[1],
8671 ((u32 *)tp->hw_stats)[2],
8672 ((u32 *)tp->hw_stats)[3]);
8675 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8676 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8677 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8678 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8679 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8681 /* NIC side send descriptors. */
8682 for (i = 0; i < 6; i++) {
8685 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8686 + (i * sizeof(struct tg3_tx_buffer_desc));
8687 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8689 readl(txd + 0x0), readl(txd + 0x4),
8690 readl(txd + 0x8), readl(txd + 0xc));
8693 /* NIC side RX descriptors. */
8694 for (i = 0; i < 6; i++) {
8697 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8698 + (i * sizeof(struct tg3_rx_buffer_desc));
8699 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8701 readl(rxd + 0x0), readl(rxd + 0x4),
8702 readl(rxd + 0x8), readl(rxd + 0xc));
8703 rxd += (4 * sizeof(u32));
8704 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8706 readl(rxd + 0x0), readl(rxd + 0x4),
8707 readl(rxd + 0x8), readl(rxd + 0xc));
8710 for (i = 0; i < 6; i++) {
8713 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8714 + (i * sizeof(struct tg3_rx_buffer_desc));
8715 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8717 readl(rxd + 0x0), readl(rxd + 0x4),
8718 readl(rxd + 0x8), readl(rxd + 0xc));
8719 rxd += (4 * sizeof(u32));
8720 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8722 readl(rxd + 0x0), readl(rxd + 0x4),
8723 readl(rxd + 0x8), readl(rxd + 0xc));
8728 static struct net_device_stats *tg3_get_stats(struct net_device *);
8729 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8731 static int tg3_close(struct net_device *dev)
8734 struct tg3 *tp = netdev_priv(dev);
8736 tg3_napi_disable(tp);
8737 cancel_work_sync(&tp->reset_task);
8739 netif_tx_stop_all_queues(dev);
8741 del_timer_sync(&tp->timer);
8745 tg3_full_lock(tp, 1);
8750 tg3_disable_ints(tp);
8752 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8754 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8756 tg3_full_unlock(tp);
8758 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8759 struct tg3_napi *tnapi = &tp->napi[i];
8760 free_irq(tnapi->irq_vec, tnapi);
8765 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8766 sizeof(tp->net_stats_prev));
8767 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8768 sizeof(tp->estats_prev));
8770 tg3_free_consistent(tp);
8772 tg3_set_power_state(tp, PCI_D3hot);
8774 netif_carrier_off(tp->dev);
8779 static inline unsigned long get_stat64(tg3_stat64_t *val)
8783 #if (BITS_PER_LONG == 32)
8786 ret = ((u64)val->high << 32) | ((u64)val->low);
8791 static inline u64 get_estat64(tg3_stat64_t *val)
8793 return ((u64)val->high << 32) | ((u64)val->low);
8796 static unsigned long calc_crc_errors(struct tg3 *tp)
8798 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8800 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8801 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8802 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8805 spin_lock_bh(&tp->lock);
8806 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8807 tg3_writephy(tp, MII_TG3_TEST1,
8808 val | MII_TG3_TEST1_CRC_EN);
8809 tg3_readphy(tp, 0x14, &val);
8812 spin_unlock_bh(&tp->lock);
8814 tp->phy_crc_errors += val;
8816 return tp->phy_crc_errors;
8819 return get_stat64(&hw_stats->rx_fcs_errors);
8822 #define ESTAT_ADD(member) \
8823 estats->member = old_estats->member + \
8824 get_estat64(&hw_stats->member)
8826 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8828 struct tg3_ethtool_stats *estats = &tp->estats;
8829 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8830 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8835 ESTAT_ADD(rx_octets);
8836 ESTAT_ADD(rx_fragments);
8837 ESTAT_ADD(rx_ucast_packets);
8838 ESTAT_ADD(rx_mcast_packets);
8839 ESTAT_ADD(rx_bcast_packets);
8840 ESTAT_ADD(rx_fcs_errors);
8841 ESTAT_ADD(rx_align_errors);
8842 ESTAT_ADD(rx_xon_pause_rcvd);
8843 ESTAT_ADD(rx_xoff_pause_rcvd);
8844 ESTAT_ADD(rx_mac_ctrl_rcvd);
8845 ESTAT_ADD(rx_xoff_entered);
8846 ESTAT_ADD(rx_frame_too_long_errors);
8847 ESTAT_ADD(rx_jabbers);
8848 ESTAT_ADD(rx_undersize_packets);
8849 ESTAT_ADD(rx_in_length_errors);
8850 ESTAT_ADD(rx_out_length_errors);
8851 ESTAT_ADD(rx_64_or_less_octet_packets);
8852 ESTAT_ADD(rx_65_to_127_octet_packets);
8853 ESTAT_ADD(rx_128_to_255_octet_packets);
8854 ESTAT_ADD(rx_256_to_511_octet_packets);
8855 ESTAT_ADD(rx_512_to_1023_octet_packets);
8856 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8857 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8858 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8859 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8860 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8862 ESTAT_ADD(tx_octets);
8863 ESTAT_ADD(tx_collisions);
8864 ESTAT_ADD(tx_xon_sent);
8865 ESTAT_ADD(tx_xoff_sent);
8866 ESTAT_ADD(tx_flow_control);
8867 ESTAT_ADD(tx_mac_errors);
8868 ESTAT_ADD(tx_single_collisions);
8869 ESTAT_ADD(tx_mult_collisions);
8870 ESTAT_ADD(tx_deferred);
8871 ESTAT_ADD(tx_excessive_collisions);
8872 ESTAT_ADD(tx_late_collisions);
8873 ESTAT_ADD(tx_collide_2times);
8874 ESTAT_ADD(tx_collide_3times);
8875 ESTAT_ADD(tx_collide_4times);
8876 ESTAT_ADD(tx_collide_5times);
8877 ESTAT_ADD(tx_collide_6times);
8878 ESTAT_ADD(tx_collide_7times);
8879 ESTAT_ADD(tx_collide_8times);
8880 ESTAT_ADD(tx_collide_9times);
8881 ESTAT_ADD(tx_collide_10times);
8882 ESTAT_ADD(tx_collide_11times);
8883 ESTAT_ADD(tx_collide_12times);
8884 ESTAT_ADD(tx_collide_13times);
8885 ESTAT_ADD(tx_collide_14times);
8886 ESTAT_ADD(tx_collide_15times);
8887 ESTAT_ADD(tx_ucast_packets);
8888 ESTAT_ADD(tx_mcast_packets);
8889 ESTAT_ADD(tx_bcast_packets);
8890 ESTAT_ADD(tx_carrier_sense_errors);
8891 ESTAT_ADD(tx_discards);
8892 ESTAT_ADD(tx_errors);
8894 ESTAT_ADD(dma_writeq_full);
8895 ESTAT_ADD(dma_write_prioq_full);
8896 ESTAT_ADD(rxbds_empty);
8897 ESTAT_ADD(rx_discards);
8898 ESTAT_ADD(rx_errors);
8899 ESTAT_ADD(rx_threshold_hit);
8901 ESTAT_ADD(dma_readq_full);
8902 ESTAT_ADD(dma_read_prioq_full);
8903 ESTAT_ADD(tx_comp_queue_full);
8905 ESTAT_ADD(ring_set_send_prod_index);
8906 ESTAT_ADD(ring_status_update);
8907 ESTAT_ADD(nic_irqs);
8908 ESTAT_ADD(nic_avoided_irqs);
8909 ESTAT_ADD(nic_tx_threshold_hit);
8914 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8916 struct tg3 *tp = netdev_priv(dev);
8917 struct net_device_stats *stats = &tp->net_stats;
8918 struct net_device_stats *old_stats = &tp->net_stats_prev;
8919 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8924 stats->rx_packets = old_stats->rx_packets +
8925 get_stat64(&hw_stats->rx_ucast_packets) +
8926 get_stat64(&hw_stats->rx_mcast_packets) +
8927 get_stat64(&hw_stats->rx_bcast_packets);
8929 stats->tx_packets = old_stats->tx_packets +
8930 get_stat64(&hw_stats->tx_ucast_packets) +
8931 get_stat64(&hw_stats->tx_mcast_packets) +
8932 get_stat64(&hw_stats->tx_bcast_packets);
8934 stats->rx_bytes = old_stats->rx_bytes +
8935 get_stat64(&hw_stats->rx_octets);
8936 stats->tx_bytes = old_stats->tx_bytes +
8937 get_stat64(&hw_stats->tx_octets);
8939 stats->rx_errors = old_stats->rx_errors +
8940 get_stat64(&hw_stats->rx_errors);
8941 stats->tx_errors = old_stats->tx_errors +
8942 get_stat64(&hw_stats->tx_errors) +
8943 get_stat64(&hw_stats->tx_mac_errors) +
8944 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8945 get_stat64(&hw_stats->tx_discards);
8947 stats->multicast = old_stats->multicast +
8948 get_stat64(&hw_stats->rx_mcast_packets);
8949 stats->collisions = old_stats->collisions +
8950 get_stat64(&hw_stats->tx_collisions);
8952 stats->rx_length_errors = old_stats->rx_length_errors +
8953 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8954 get_stat64(&hw_stats->rx_undersize_packets);
8956 stats->rx_over_errors = old_stats->rx_over_errors +
8957 get_stat64(&hw_stats->rxbds_empty);
8958 stats->rx_frame_errors = old_stats->rx_frame_errors +
8959 get_stat64(&hw_stats->rx_align_errors);
8960 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8961 get_stat64(&hw_stats->tx_discards);
8962 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8963 get_stat64(&hw_stats->tx_carrier_sense_errors);
8965 stats->rx_crc_errors = old_stats->rx_crc_errors +
8966 calc_crc_errors(tp);
8968 stats->rx_missed_errors = old_stats->rx_missed_errors +
8969 get_stat64(&hw_stats->rx_discards);
8974 static inline u32 calc_crc(unsigned char *buf, int len)
8982 for (j = 0; j < len; j++) {
8985 for (k = 0; k < 8; k++) {
8999 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9001 /* accept or reject all multicast frames */
9002 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9003 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9004 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9005 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9008 static void __tg3_set_rx_mode(struct net_device *dev)
9010 struct tg3 *tp = netdev_priv(dev);
9013 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9014 RX_MODE_KEEP_VLAN_TAG);
9016 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9019 #if TG3_VLAN_TAG_USED
9021 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9022 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9024 /* By definition, VLAN is disabled always in this
9027 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9028 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9031 if (dev->flags & IFF_PROMISC) {
9032 /* Promiscuous mode. */
9033 rx_mode |= RX_MODE_PROMISC;
9034 } else if (dev->flags & IFF_ALLMULTI) {
9035 /* Accept all multicast. */
9036 tg3_set_multi (tp, 1);
9037 } else if (dev->mc_count < 1) {
9038 /* Reject all multicast. */
9039 tg3_set_multi (tp, 0);
9041 /* Accept one or more multicast(s). */
9042 struct dev_mc_list *mclist;
9044 u32 mc_filter[4] = { 0, };
9049 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
9050 i++, mclist = mclist->next) {
9052 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
9054 regidx = (bit & 0x60) >> 5;
9056 mc_filter[regidx] |= (1 << bit);
9059 tw32(MAC_HASH_REG_0, mc_filter[0]);
9060 tw32(MAC_HASH_REG_1, mc_filter[1]);
9061 tw32(MAC_HASH_REG_2, mc_filter[2]);
9062 tw32(MAC_HASH_REG_3, mc_filter[3]);
9065 if (rx_mode != tp->rx_mode) {
9066 tp->rx_mode = rx_mode;
9067 tw32_f(MAC_RX_MODE, rx_mode);
9072 static void tg3_set_rx_mode(struct net_device *dev)
9074 struct tg3 *tp = netdev_priv(dev);
9076 if (!netif_running(dev))
9079 tg3_full_lock(tp, 0);
9080 __tg3_set_rx_mode(dev);
9081 tg3_full_unlock(tp);
9084 #define TG3_REGDUMP_LEN (32 * 1024)
9086 static int tg3_get_regs_len(struct net_device *dev)
9088 return TG3_REGDUMP_LEN;
9091 static void tg3_get_regs(struct net_device *dev,
9092 struct ethtool_regs *regs, void *_p)
9095 struct tg3 *tp = netdev_priv(dev);
9101 memset(p, 0, TG3_REGDUMP_LEN);
9103 if (tp->link_config.phy_is_low_power)
9106 tg3_full_lock(tp, 0);
9108 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9109 #define GET_REG32_LOOP(base,len) \
9110 do { p = (u32 *)(orig_p + (base)); \
9111 for (i = 0; i < len; i += 4) \
9112 __GET_REG32((base) + i); \
9114 #define GET_REG32_1(reg) \
9115 do { p = (u32 *)(orig_p + (reg)); \
9116 __GET_REG32((reg)); \
9119 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9120 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9121 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9122 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9123 GET_REG32_1(SNDDATAC_MODE);
9124 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9125 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9126 GET_REG32_1(SNDBDC_MODE);
9127 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9128 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9129 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9130 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9131 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9132 GET_REG32_1(RCVDCC_MODE);
9133 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9134 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9135 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9136 GET_REG32_1(MBFREE_MODE);
9137 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9138 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9139 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9140 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9141 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9142 GET_REG32_1(RX_CPU_MODE);
9143 GET_REG32_1(RX_CPU_STATE);
9144 GET_REG32_1(RX_CPU_PGMCTR);
9145 GET_REG32_1(RX_CPU_HWBKPT);
9146 GET_REG32_1(TX_CPU_MODE);
9147 GET_REG32_1(TX_CPU_STATE);
9148 GET_REG32_1(TX_CPU_PGMCTR);
9149 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9150 GET_REG32_LOOP(FTQ_RESET, 0x120);
9151 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9152 GET_REG32_1(DMAC_MODE);
9153 GET_REG32_LOOP(GRC_MODE, 0x4c);
9154 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9155 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9158 #undef GET_REG32_LOOP
9161 tg3_full_unlock(tp);
9164 static int tg3_get_eeprom_len(struct net_device *dev)
9166 struct tg3 *tp = netdev_priv(dev);
9168 return tp->nvram_size;
9171 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9173 struct tg3 *tp = netdev_priv(dev);
9176 u32 i, offset, len, b_offset, b_count;
9179 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9182 if (tp->link_config.phy_is_low_power)
9185 offset = eeprom->offset;
9189 eeprom->magic = TG3_EEPROM_MAGIC;
9192 /* adjustments to start on required 4 byte boundary */
9193 b_offset = offset & 3;
9194 b_count = 4 - b_offset;
9195 if (b_count > len) {
9196 /* i.e. offset=1 len=2 */
9199 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9202 memcpy(data, ((char*)&val) + b_offset, b_count);
9205 eeprom->len += b_count;
9208 /* read bytes upto the last 4 byte boundary */
9209 pd = &data[eeprom->len];
9210 for (i = 0; i < (len - (len & 3)); i += 4) {
9211 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9216 memcpy(pd + i, &val, 4);
9221 /* read last bytes not ending on 4 byte boundary */
9222 pd = &data[eeprom->len];
9224 b_offset = offset + len - b_count;
9225 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9228 memcpy(pd, &val, b_count);
9229 eeprom->len += b_count;
9234 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9236 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9238 struct tg3 *tp = netdev_priv(dev);
9240 u32 offset, len, b_offset, odd_len;
9244 if (tp->link_config.phy_is_low_power)
9247 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9248 eeprom->magic != TG3_EEPROM_MAGIC)
9251 offset = eeprom->offset;
9254 if ((b_offset = (offset & 3))) {
9255 /* adjustments to start on required 4 byte boundary */
9256 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9267 /* adjustments to end on required 4 byte boundary */
9269 len = (len + 3) & ~3;
9270 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9276 if (b_offset || odd_len) {
9277 buf = kmalloc(len, GFP_KERNEL);
9281 memcpy(buf, &start, 4);
9283 memcpy(buf+len-4, &end, 4);
9284 memcpy(buf + b_offset, data, eeprom->len);
9287 ret = tg3_nvram_write_block(tp, offset, len, buf);
9295 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9297 struct tg3 *tp = netdev_priv(dev);
9299 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9300 struct phy_device *phydev;
9301 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9303 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9304 return phy_ethtool_gset(phydev, cmd);
9307 cmd->supported = (SUPPORTED_Autoneg);
9309 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9310 cmd->supported |= (SUPPORTED_1000baseT_Half |
9311 SUPPORTED_1000baseT_Full);
9313 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9314 cmd->supported |= (SUPPORTED_100baseT_Half |
9315 SUPPORTED_100baseT_Full |
9316 SUPPORTED_10baseT_Half |
9317 SUPPORTED_10baseT_Full |
9319 cmd->port = PORT_TP;
9321 cmd->supported |= SUPPORTED_FIBRE;
9322 cmd->port = PORT_FIBRE;
9325 cmd->advertising = tp->link_config.advertising;
9326 if (netif_running(dev)) {
9327 cmd->speed = tp->link_config.active_speed;
9328 cmd->duplex = tp->link_config.active_duplex;
9330 cmd->phy_address = tp->phy_addr;
9331 cmd->transceiver = XCVR_INTERNAL;
9332 cmd->autoneg = tp->link_config.autoneg;
9338 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9340 struct tg3 *tp = netdev_priv(dev);
9342 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9343 struct phy_device *phydev;
9344 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9346 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9347 return phy_ethtool_sset(phydev, cmd);
9350 if (cmd->autoneg != AUTONEG_ENABLE &&
9351 cmd->autoneg != AUTONEG_DISABLE)
9354 if (cmd->autoneg == AUTONEG_DISABLE &&
9355 cmd->duplex != DUPLEX_FULL &&
9356 cmd->duplex != DUPLEX_HALF)
9359 if (cmd->autoneg == AUTONEG_ENABLE) {
9360 u32 mask = ADVERTISED_Autoneg |
9362 ADVERTISED_Asym_Pause;
9364 if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
9365 mask |= ADVERTISED_1000baseT_Half |
9366 ADVERTISED_1000baseT_Full;
9368 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9369 mask |= ADVERTISED_100baseT_Half |
9370 ADVERTISED_100baseT_Full |
9371 ADVERTISED_10baseT_Half |
9372 ADVERTISED_10baseT_Full |
9375 mask |= ADVERTISED_FIBRE;
9377 if (cmd->advertising & ~mask)
9380 mask &= (ADVERTISED_1000baseT_Half |
9381 ADVERTISED_1000baseT_Full |
9382 ADVERTISED_100baseT_Half |
9383 ADVERTISED_100baseT_Full |
9384 ADVERTISED_10baseT_Half |
9385 ADVERTISED_10baseT_Full);
9387 cmd->advertising &= mask;
9389 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9390 if (cmd->speed != SPEED_1000)
9393 if (cmd->duplex != DUPLEX_FULL)
9396 if (cmd->speed != SPEED_100 &&
9397 cmd->speed != SPEED_10)
9402 tg3_full_lock(tp, 0);
9404 tp->link_config.autoneg = cmd->autoneg;
9405 if (cmd->autoneg == AUTONEG_ENABLE) {
9406 tp->link_config.advertising = (cmd->advertising |
9407 ADVERTISED_Autoneg);
9408 tp->link_config.speed = SPEED_INVALID;
9409 tp->link_config.duplex = DUPLEX_INVALID;
9411 tp->link_config.advertising = 0;
9412 tp->link_config.speed = cmd->speed;
9413 tp->link_config.duplex = cmd->duplex;
9416 tp->link_config.orig_speed = tp->link_config.speed;
9417 tp->link_config.orig_duplex = tp->link_config.duplex;
9418 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9420 if (netif_running(dev))
9421 tg3_setup_phy(tp, 1);
9423 tg3_full_unlock(tp);
9428 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9430 struct tg3 *tp = netdev_priv(dev);
9432 strcpy(info->driver, DRV_MODULE_NAME);
9433 strcpy(info->version, DRV_MODULE_VERSION);
9434 strcpy(info->fw_version, tp->fw_ver);
9435 strcpy(info->bus_info, pci_name(tp->pdev));
9438 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9440 struct tg3 *tp = netdev_priv(dev);
9442 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9443 device_can_wakeup(&tp->pdev->dev))
9444 wol->supported = WAKE_MAGIC;
9448 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9449 device_can_wakeup(&tp->pdev->dev))
9450 wol->wolopts = WAKE_MAGIC;
9451 memset(&wol->sopass, 0, sizeof(wol->sopass));
9454 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9456 struct tg3 *tp = netdev_priv(dev);
9457 struct device *dp = &tp->pdev->dev;
9459 if (wol->wolopts & ~WAKE_MAGIC)
9461 if ((wol->wolopts & WAKE_MAGIC) &&
9462 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9465 spin_lock_bh(&tp->lock);
9466 if (wol->wolopts & WAKE_MAGIC) {
9467 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9468 device_set_wakeup_enable(dp, true);
9470 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9471 device_set_wakeup_enable(dp, false);
9473 spin_unlock_bh(&tp->lock);
9478 static u32 tg3_get_msglevel(struct net_device *dev)
9480 struct tg3 *tp = netdev_priv(dev);
9481 return tp->msg_enable;
9484 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9486 struct tg3 *tp = netdev_priv(dev);
9487 tp->msg_enable = value;
9490 static int tg3_set_tso(struct net_device *dev, u32 value)
9492 struct tg3 *tp = netdev_priv(dev);
9494 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9499 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9500 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
9502 dev->features |= NETIF_F_TSO6;
9503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9504 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9505 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9508 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9509 dev->features |= NETIF_F_TSO_ECN;
9511 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9513 return ethtool_op_set_tso(dev, value);
9516 static int tg3_nway_reset(struct net_device *dev)
9518 struct tg3 *tp = netdev_priv(dev);
9521 if (!netif_running(dev))
9524 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9527 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9528 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9530 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9534 spin_lock_bh(&tp->lock);
9536 tg3_readphy(tp, MII_BMCR, &bmcr);
9537 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9538 ((bmcr & BMCR_ANENABLE) ||
9539 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9540 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9544 spin_unlock_bh(&tp->lock);
9550 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9552 struct tg3 *tp = netdev_priv(dev);
9554 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9555 ering->rx_mini_max_pending = 0;
9556 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9557 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9559 ering->rx_jumbo_max_pending = 0;
9561 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9563 ering->rx_pending = tp->rx_pending;
9564 ering->rx_mini_pending = 0;
9565 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9566 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9568 ering->rx_jumbo_pending = 0;
9570 ering->tx_pending = tp->napi[0].tx_pending;
9573 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9575 struct tg3 *tp = netdev_priv(dev);
9576 int i, irq_sync = 0, err = 0;
9578 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9579 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9580 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9581 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9582 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9583 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9586 if (netif_running(dev)) {
9592 tg3_full_lock(tp, irq_sync);
9594 tp->rx_pending = ering->rx_pending;
9596 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9597 tp->rx_pending > 63)
9598 tp->rx_pending = 63;
9599 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9601 for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
9602 tp->napi[i].tx_pending = ering->tx_pending;
9604 if (netif_running(dev)) {
9605 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9606 err = tg3_restart_hw(tp, 1);
9608 tg3_netif_start(tp);
9611 tg3_full_unlock(tp);
9613 if (irq_sync && !err)
9619 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9621 struct tg3 *tp = netdev_priv(dev);
9623 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9625 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9626 epause->rx_pause = 1;
9628 epause->rx_pause = 0;
9630 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9631 epause->tx_pause = 1;
9633 epause->tx_pause = 0;
9636 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9638 struct tg3 *tp = netdev_priv(dev);
9641 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9642 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9645 if (epause->autoneg) {
9647 struct phy_device *phydev;
9649 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9651 if (epause->rx_pause) {
9652 if (epause->tx_pause)
9653 newadv = ADVERTISED_Pause;
9655 newadv = ADVERTISED_Pause |
9656 ADVERTISED_Asym_Pause;
9657 } else if (epause->tx_pause) {
9658 newadv = ADVERTISED_Asym_Pause;
9662 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9663 u32 oldadv = phydev->advertising &
9665 ADVERTISED_Asym_Pause);
9666 if (oldadv != newadv) {
9667 phydev->advertising &=
9668 ~(ADVERTISED_Pause |
9669 ADVERTISED_Asym_Pause);
9670 phydev->advertising |= newadv;
9671 err = phy_start_aneg(phydev);
9674 tp->link_config.advertising &=
9675 ~(ADVERTISED_Pause |
9676 ADVERTISED_Asym_Pause);
9677 tp->link_config.advertising |= newadv;
9680 if (epause->rx_pause)
9681 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9683 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9685 if (epause->tx_pause)
9686 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9688 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9690 if (netif_running(dev))
9691 tg3_setup_flow_control(tp, 0, 0);
9696 if (netif_running(dev)) {
9701 tg3_full_lock(tp, irq_sync);
9703 if (epause->autoneg)
9704 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9706 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9707 if (epause->rx_pause)
9708 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9710 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9711 if (epause->tx_pause)
9712 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9714 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9716 if (netif_running(dev)) {
9717 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9718 err = tg3_restart_hw(tp, 1);
9720 tg3_netif_start(tp);
9723 tg3_full_unlock(tp);
9729 static u32 tg3_get_rx_csum(struct net_device *dev)
9731 struct tg3 *tp = netdev_priv(dev);
9732 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9735 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9737 struct tg3 *tp = netdev_priv(dev);
9739 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9745 spin_lock_bh(&tp->lock);
9747 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9749 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9750 spin_unlock_bh(&tp->lock);
9755 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9757 struct tg3 *tp = netdev_priv(dev);
9759 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9765 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9766 ethtool_op_set_tx_ipv6_csum(dev, data);
9768 ethtool_op_set_tx_csum(dev, data);
9773 static int tg3_get_sset_count (struct net_device *dev, int sset)
9777 return TG3_NUM_TEST;
9779 return TG3_NUM_STATS;
9785 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9787 switch (stringset) {
9789 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
9792 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
9795 WARN_ON(1); /* we need a WARN() */
9800 static int tg3_phys_id(struct net_device *dev, u32 data)
9802 struct tg3 *tp = netdev_priv(dev);
9805 if (!netif_running(tp->dev))
9809 data = UINT_MAX / 2;
9811 for (i = 0; i < (data * 2); i++) {
9813 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9814 LED_CTRL_1000MBPS_ON |
9815 LED_CTRL_100MBPS_ON |
9816 LED_CTRL_10MBPS_ON |
9817 LED_CTRL_TRAFFIC_OVERRIDE |
9818 LED_CTRL_TRAFFIC_BLINK |
9819 LED_CTRL_TRAFFIC_LED);
9822 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9823 LED_CTRL_TRAFFIC_OVERRIDE);
9825 if (msleep_interruptible(500))
9828 tw32(MAC_LED_CTRL, tp->led_ctrl);
9832 static void tg3_get_ethtool_stats (struct net_device *dev,
9833 struct ethtool_stats *estats, u64 *tmp_stats)
9835 struct tg3 *tp = netdev_priv(dev);
9836 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9839 #define NVRAM_TEST_SIZE 0x100
9840 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9841 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9842 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
9843 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9844 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9846 static int tg3_test_nvram(struct tg3 *tp)
9850 int i, j, k, err = 0, size;
9852 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9855 if (tg3_nvram_read(tp, 0, &magic) != 0)
9858 if (magic == TG3_EEPROM_MAGIC)
9859 size = NVRAM_TEST_SIZE;
9860 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9861 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9862 TG3_EEPROM_SB_FORMAT_1) {
9863 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9864 case TG3_EEPROM_SB_REVISION_0:
9865 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9867 case TG3_EEPROM_SB_REVISION_2:
9868 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9870 case TG3_EEPROM_SB_REVISION_3:
9871 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9878 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9879 size = NVRAM_SELFBOOT_HW_SIZE;
9883 buf = kmalloc(size, GFP_KERNEL);
9888 for (i = 0, j = 0; i < size; i += 4, j++) {
9889 err = tg3_nvram_read_be32(tp, i, &buf[j]);
9896 /* Selfboot format */
9897 magic = be32_to_cpu(buf[0]);
9898 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9899 TG3_EEPROM_MAGIC_FW) {
9900 u8 *buf8 = (u8 *) buf, csum8 = 0;
9902 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9903 TG3_EEPROM_SB_REVISION_2) {
9904 /* For rev 2, the csum doesn't include the MBA. */
9905 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9907 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9910 for (i = 0; i < size; i++)
9923 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9924 TG3_EEPROM_MAGIC_HW) {
9925 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9926 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9927 u8 *buf8 = (u8 *) buf;
9929 /* Separate the parity bits and the data bytes. */
9930 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9931 if ((i == 0) || (i == 8)) {
9935 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9936 parity[k++] = buf8[i] & msk;
9943 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9944 parity[k++] = buf8[i] & msk;
9947 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9948 parity[k++] = buf8[i] & msk;
9951 data[j++] = buf8[i];
9955 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9956 u8 hw8 = hweight8(data[i]);
9958 if ((hw8 & 0x1) && parity[i])
9960 else if (!(hw8 & 0x1) && !parity[i])
9967 /* Bootstrap checksum at offset 0x10 */
9968 csum = calc_crc((unsigned char *) buf, 0x10);
9969 if (csum != be32_to_cpu(buf[0x10/4]))
9972 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9973 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9974 if (csum != be32_to_cpu(buf[0xfc/4]))
9984 #define TG3_SERDES_TIMEOUT_SEC 2
9985 #define TG3_COPPER_TIMEOUT_SEC 6
9987 static int tg3_test_link(struct tg3 *tp)
9991 if (!netif_running(tp->dev))
9994 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9995 max = TG3_SERDES_TIMEOUT_SEC;
9997 max = TG3_COPPER_TIMEOUT_SEC;
9999 for (i = 0; i < max; i++) {
10000 if (netif_carrier_ok(tp->dev))
10003 if (msleep_interruptible(1000))
10010 /* Only test the commonly used registers */
10011 static int tg3_test_registers(struct tg3 *tp)
10013 int i, is_5705, is_5750;
10014 u32 offset, read_mask, write_mask, val, save_val, read_val;
10018 #define TG3_FL_5705 0x1
10019 #define TG3_FL_NOT_5705 0x2
10020 #define TG3_FL_NOT_5788 0x4
10021 #define TG3_FL_NOT_5750 0x8
10025 /* MAC Control Registers */
10026 { MAC_MODE, TG3_FL_NOT_5705,
10027 0x00000000, 0x00ef6f8c },
10028 { MAC_MODE, TG3_FL_5705,
10029 0x00000000, 0x01ef6b8c },
10030 { MAC_STATUS, TG3_FL_NOT_5705,
10031 0x03800107, 0x00000000 },
10032 { MAC_STATUS, TG3_FL_5705,
10033 0x03800100, 0x00000000 },
10034 { MAC_ADDR_0_HIGH, 0x0000,
10035 0x00000000, 0x0000ffff },
10036 { MAC_ADDR_0_LOW, 0x0000,
10037 0x00000000, 0xffffffff },
10038 { MAC_RX_MTU_SIZE, 0x0000,
10039 0x00000000, 0x0000ffff },
10040 { MAC_TX_MODE, 0x0000,
10041 0x00000000, 0x00000070 },
10042 { MAC_TX_LENGTHS, 0x0000,
10043 0x00000000, 0x00003fff },
10044 { MAC_RX_MODE, TG3_FL_NOT_5705,
10045 0x00000000, 0x000007fc },
10046 { MAC_RX_MODE, TG3_FL_5705,
10047 0x00000000, 0x000007dc },
10048 { MAC_HASH_REG_0, 0x0000,
10049 0x00000000, 0xffffffff },
10050 { MAC_HASH_REG_1, 0x0000,
10051 0x00000000, 0xffffffff },
10052 { MAC_HASH_REG_2, 0x0000,
10053 0x00000000, 0xffffffff },
10054 { MAC_HASH_REG_3, 0x0000,
10055 0x00000000, 0xffffffff },
10057 /* Receive Data and Receive BD Initiator Control Registers. */
10058 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10059 0x00000000, 0xffffffff },
10060 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10061 0x00000000, 0xffffffff },
10062 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10063 0x00000000, 0x00000003 },
10064 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10065 0x00000000, 0xffffffff },
10066 { RCVDBDI_STD_BD+0, 0x0000,
10067 0x00000000, 0xffffffff },
10068 { RCVDBDI_STD_BD+4, 0x0000,
10069 0x00000000, 0xffffffff },
10070 { RCVDBDI_STD_BD+8, 0x0000,
10071 0x00000000, 0xffff0002 },
10072 { RCVDBDI_STD_BD+0xc, 0x0000,
10073 0x00000000, 0xffffffff },
10075 /* Receive BD Initiator Control Registers. */
10076 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10077 0x00000000, 0xffffffff },
10078 { RCVBDI_STD_THRESH, TG3_FL_5705,
10079 0x00000000, 0x000003ff },
10080 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10081 0x00000000, 0xffffffff },
10083 /* Host Coalescing Control Registers. */
10084 { HOSTCC_MODE, TG3_FL_NOT_5705,
10085 0x00000000, 0x00000004 },
10086 { HOSTCC_MODE, TG3_FL_5705,
10087 0x00000000, 0x000000f6 },
10088 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10089 0x00000000, 0xffffffff },
10090 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10091 0x00000000, 0x000003ff },
10092 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10093 0x00000000, 0xffffffff },
10094 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10095 0x00000000, 0x000003ff },
10096 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10097 0x00000000, 0xffffffff },
10098 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10099 0x00000000, 0x000000ff },
10100 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10101 0x00000000, 0xffffffff },
10102 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10103 0x00000000, 0x000000ff },
10104 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10105 0x00000000, 0xffffffff },
10106 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10107 0x00000000, 0xffffffff },
10108 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10109 0x00000000, 0xffffffff },
10110 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10111 0x00000000, 0x000000ff },
10112 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10113 0x00000000, 0xffffffff },
10114 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10115 0x00000000, 0x000000ff },
10116 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10117 0x00000000, 0xffffffff },
10118 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10119 0x00000000, 0xffffffff },
10120 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10121 0x00000000, 0xffffffff },
10122 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10123 0x00000000, 0xffffffff },
10124 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10125 0x00000000, 0xffffffff },
10126 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10127 0xffffffff, 0x00000000 },
10128 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10129 0xffffffff, 0x00000000 },
10131 /* Buffer Manager Control Registers. */
10132 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10133 0x00000000, 0x007fff80 },
10134 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10135 0x00000000, 0x007fffff },
10136 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10137 0x00000000, 0x0000003f },
10138 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10139 0x00000000, 0x000001ff },
10140 { BUFMGR_MB_HIGH_WATER, 0x0000,
10141 0x00000000, 0x000001ff },
10142 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10143 0xffffffff, 0x00000000 },
10144 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10145 0xffffffff, 0x00000000 },
10147 /* Mailbox Registers */
10148 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10149 0x00000000, 0x000001ff },
10150 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10151 0x00000000, 0x000001ff },
10152 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10153 0x00000000, 0x000007ff },
10154 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10155 0x00000000, 0x000001ff },
10157 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10160 is_5705 = is_5750 = 0;
10161 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10163 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10167 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10168 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10171 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10174 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10175 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10178 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10181 offset = (u32) reg_tbl[i].offset;
10182 read_mask = reg_tbl[i].read_mask;
10183 write_mask = reg_tbl[i].write_mask;
10185 /* Save the original register content */
10186 save_val = tr32(offset);
10188 /* Determine the read-only value. */
10189 read_val = save_val & read_mask;
10191 /* Write zero to the register, then make sure the read-only bits
10192 * are not changed and the read/write bits are all zeros.
10196 val = tr32(offset);
10198 /* Test the read-only and read/write bits. */
10199 if (((val & read_mask) != read_val) || (val & write_mask))
10202 /* Write ones to all the bits defined by RdMask and WrMask, then
10203 * make sure the read-only bits are not changed and the
10204 * read/write bits are all ones.
10206 tw32(offset, read_mask | write_mask);
10208 val = tr32(offset);
10210 /* Test the read-only bits. */
10211 if ((val & read_mask) != read_val)
10214 /* Test the read/write bits. */
10215 if ((val & write_mask) != write_mask)
10218 tw32(offset, save_val);
10224 if (netif_msg_hw(tp))
10225 printk(KERN_ERR PFX "Register test failed at offset %x\n",
10227 tw32(offset, save_val);
10231 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10233 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10237 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10238 for (j = 0; j < len; j += 4) {
10241 tg3_write_mem(tp, offset + j, test_pattern[i]);
10242 tg3_read_mem(tp, offset + j, &val);
10243 if (val != test_pattern[i])
10250 static int tg3_test_memory(struct tg3 *tp)
10252 static struct mem_entry {
10255 } mem_tbl_570x[] = {
10256 { 0x00000000, 0x00b50},
10257 { 0x00002000, 0x1c000},
10258 { 0xffffffff, 0x00000}
10259 }, mem_tbl_5705[] = {
10260 { 0x00000100, 0x0000c},
10261 { 0x00000200, 0x00008},
10262 { 0x00004000, 0x00800},
10263 { 0x00006000, 0x01000},
10264 { 0x00008000, 0x02000},
10265 { 0x00010000, 0x0e000},
10266 { 0xffffffff, 0x00000}
10267 }, mem_tbl_5755[] = {
10268 { 0x00000200, 0x00008},
10269 { 0x00004000, 0x00800},
10270 { 0x00006000, 0x00800},
10271 { 0x00008000, 0x02000},
10272 { 0x00010000, 0x0c000},
10273 { 0xffffffff, 0x00000}
10274 }, mem_tbl_5906[] = {
10275 { 0x00000200, 0x00008},
10276 { 0x00004000, 0x00400},
10277 { 0x00006000, 0x00400},
10278 { 0x00008000, 0x01000},
10279 { 0x00010000, 0x01000},
10280 { 0xffffffff, 0x00000}
10282 struct mem_entry *mem_tbl;
10286 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10287 mem_tbl = mem_tbl_5755;
10288 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10289 mem_tbl = mem_tbl_5906;
10290 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10291 mem_tbl = mem_tbl_5705;
10293 mem_tbl = mem_tbl_570x;
10295 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10296 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10297 mem_tbl[i].len)) != 0)
10304 #define TG3_MAC_LOOPBACK 0
10305 #define TG3_PHY_LOOPBACK 1
10307 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10309 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10310 u32 desc_idx, coal_now;
10311 struct sk_buff *skb, *rx_skb;
10314 int num_pkts, tx_len, rx_len, i, err;
10315 struct tg3_rx_buffer_desc *desc;
10316 struct tg3_napi *tnapi, *rnapi;
10317 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10319 if (tp->irq_cnt > 1) {
10320 tnapi = &tp->napi[1];
10321 rnapi = &tp->napi[1];
10323 tnapi = &tp->napi[0];
10324 rnapi = &tp->napi[0];
10326 coal_now = tnapi->coal_now | rnapi->coal_now;
10328 if (loopback_mode == TG3_MAC_LOOPBACK) {
10329 /* HW errata - mac loopback fails in some cases on 5780.
10330 * Normal traffic and PHY loopback are not affected by
10333 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10336 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10337 MAC_MODE_PORT_INT_LPBACK;
10338 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10339 mac_mode |= MAC_MODE_LINK_POLARITY;
10340 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10341 mac_mode |= MAC_MODE_PORT_MODE_MII;
10343 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10344 tw32(MAC_MODE, mac_mode);
10345 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10348 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10349 tg3_phy_fet_toggle_apd(tp, false);
10350 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10352 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10354 tg3_phy_toggle_automdix(tp, 0);
10356 tg3_writephy(tp, MII_BMCR, val);
10359 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10360 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10361 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10362 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
10363 mac_mode |= MAC_MODE_PORT_MODE_MII;
10365 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10367 /* reset to prevent losing 1st rx packet intermittently */
10368 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10369 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10371 tw32_f(MAC_RX_MODE, tp->rx_mode);
10373 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10374 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10375 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10376 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10377 mac_mode |= MAC_MODE_LINK_POLARITY;
10378 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10379 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10381 tw32(MAC_MODE, mac_mode);
10389 skb = netdev_alloc_skb(tp->dev, tx_len);
10393 tx_data = skb_put(skb, tx_len);
10394 memcpy(tx_data, tp->dev->dev_addr, 6);
10395 memset(tx_data + 6, 0x0, 8);
10397 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10399 for (i = 14; i < tx_len; i++)
10400 tx_data[i] = (u8) (i & 0xff);
10402 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
10403 dev_kfree_skb(skb);
10407 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10412 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10416 tg3_set_txd(tnapi, tnapi->tx_prod,
10417 skb_shinfo(skb)->dma_head, tx_len, 0, 1);
10422 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10423 tr32_mailbox(tnapi->prodmbox);
10427 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10428 for (i = 0; i < 35; i++) {
10429 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10434 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10435 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10436 if ((tx_idx == tnapi->tx_prod) &&
10437 (rx_idx == (rx_start_idx + num_pkts)))
10441 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
10442 dev_kfree_skb(skb);
10444 if (tx_idx != tnapi->tx_prod)
10447 if (rx_idx != rx_start_idx + num_pkts)
10450 desc = &rnapi->rx_rcb[rx_start_idx];
10451 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10452 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10453 if (opaque_key != RXD_OPAQUE_RING_STD)
10456 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10457 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10460 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10461 if (rx_len != tx_len)
10464 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10466 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10467 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10469 for (i = 14; i < tx_len; i++) {
10470 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10475 /* tg3_free_rings will unmap and free the rx_skb */
10480 #define TG3_MAC_LOOPBACK_FAILED 1
10481 #define TG3_PHY_LOOPBACK_FAILED 2
10482 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10483 TG3_PHY_LOOPBACK_FAILED)
10485 static int tg3_test_loopback(struct tg3 *tp)
10490 if (!netif_running(tp->dev))
10491 return TG3_LOOPBACK_FAILED;
10493 err = tg3_reset_hw(tp, 1);
10495 return TG3_LOOPBACK_FAILED;
10497 /* Turn off gphy autopowerdown. */
10498 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10499 tg3_phy_toggle_apd(tp, false);
10501 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10505 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10507 /* Wait for up to 40 microseconds to acquire lock. */
10508 for (i = 0; i < 4; i++) {
10509 status = tr32(TG3_CPMU_MUTEX_GNT);
10510 if (status == CPMU_MUTEX_GNT_DRIVER)
10515 if (status != CPMU_MUTEX_GNT_DRIVER)
10516 return TG3_LOOPBACK_FAILED;
10518 /* Turn off link-based power management. */
10519 cpmuctrl = tr32(TG3_CPMU_CTRL);
10520 tw32(TG3_CPMU_CTRL,
10521 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10522 CPMU_CTRL_LINK_AWARE_MODE));
10525 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10526 err |= TG3_MAC_LOOPBACK_FAILED;
10528 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10529 tw32(TG3_CPMU_CTRL, cpmuctrl);
10531 /* Release the mutex */
10532 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10535 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10536 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10537 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10538 err |= TG3_PHY_LOOPBACK_FAILED;
10541 /* Re-enable gphy autopowerdown. */
10542 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10543 tg3_phy_toggle_apd(tp, true);
10548 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10551 struct tg3 *tp = netdev_priv(dev);
10553 if (tp->link_config.phy_is_low_power)
10554 tg3_set_power_state(tp, PCI_D0);
10556 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10558 if (tg3_test_nvram(tp) != 0) {
10559 etest->flags |= ETH_TEST_FL_FAILED;
10562 if (tg3_test_link(tp) != 0) {
10563 etest->flags |= ETH_TEST_FL_FAILED;
10566 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10567 int err, err2 = 0, irq_sync = 0;
10569 if (netif_running(dev)) {
10571 tg3_netif_stop(tp);
10575 tg3_full_lock(tp, irq_sync);
10577 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10578 err = tg3_nvram_lock(tp);
10579 tg3_halt_cpu(tp, RX_CPU_BASE);
10580 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10581 tg3_halt_cpu(tp, TX_CPU_BASE);
10583 tg3_nvram_unlock(tp);
10585 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10588 if (tg3_test_registers(tp) != 0) {
10589 etest->flags |= ETH_TEST_FL_FAILED;
10592 if (tg3_test_memory(tp) != 0) {
10593 etest->flags |= ETH_TEST_FL_FAILED;
10596 if ((data[4] = tg3_test_loopback(tp)) != 0)
10597 etest->flags |= ETH_TEST_FL_FAILED;
10599 tg3_full_unlock(tp);
10601 if (tg3_test_interrupt(tp) != 0) {
10602 etest->flags |= ETH_TEST_FL_FAILED;
10606 tg3_full_lock(tp, 0);
10608 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10609 if (netif_running(dev)) {
10610 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10611 err2 = tg3_restart_hw(tp, 1);
10613 tg3_netif_start(tp);
10616 tg3_full_unlock(tp);
10618 if (irq_sync && !err2)
10621 if (tp->link_config.phy_is_low_power)
10622 tg3_set_power_state(tp, PCI_D3hot);
10626 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10628 struct mii_ioctl_data *data = if_mii(ifr);
10629 struct tg3 *tp = netdev_priv(dev);
10632 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10633 struct phy_device *phydev;
10634 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10636 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10637 return phy_mii_ioctl(phydev, data, cmd);
10642 data->phy_id = tp->phy_addr;
10645 case SIOCGMIIREG: {
10648 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10649 break; /* We have no PHY */
10651 if (tp->link_config.phy_is_low_power)
10654 spin_lock_bh(&tp->lock);
10655 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10656 spin_unlock_bh(&tp->lock);
10658 data->val_out = mii_regval;
10664 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10665 break; /* We have no PHY */
10667 if (tp->link_config.phy_is_low_power)
10670 spin_lock_bh(&tp->lock);
10671 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10672 spin_unlock_bh(&tp->lock);
10680 return -EOPNOTSUPP;
10683 #if TG3_VLAN_TAG_USED
10684 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10686 struct tg3 *tp = netdev_priv(dev);
10688 if (!netif_running(dev)) {
10693 tg3_netif_stop(tp);
10695 tg3_full_lock(tp, 0);
10699 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10700 __tg3_set_rx_mode(dev);
10702 tg3_netif_start(tp);
10704 tg3_full_unlock(tp);
10708 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10710 struct tg3 *tp = netdev_priv(dev);
10712 memcpy(ec, &tp->coal, sizeof(*ec));
10716 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10718 struct tg3 *tp = netdev_priv(dev);
10719 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10720 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10722 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10723 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10724 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10725 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10726 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10729 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10730 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10731 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10732 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10733 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10734 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10735 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10736 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10737 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10738 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10741 /* No rx interrupts will be generated if both are zero */
10742 if ((ec->rx_coalesce_usecs == 0) &&
10743 (ec->rx_max_coalesced_frames == 0))
10746 /* No tx interrupts will be generated if both are zero */
10747 if ((ec->tx_coalesce_usecs == 0) &&
10748 (ec->tx_max_coalesced_frames == 0))
10751 /* Only copy relevant parameters, ignore all others. */
10752 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10753 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10754 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10755 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10756 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10757 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10758 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10759 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10760 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10762 if (netif_running(dev)) {
10763 tg3_full_lock(tp, 0);
10764 __tg3_set_coalesce(tp, &tp->coal);
10765 tg3_full_unlock(tp);
10770 static const struct ethtool_ops tg3_ethtool_ops = {
10771 .get_settings = tg3_get_settings,
10772 .set_settings = tg3_set_settings,
10773 .get_drvinfo = tg3_get_drvinfo,
10774 .get_regs_len = tg3_get_regs_len,
10775 .get_regs = tg3_get_regs,
10776 .get_wol = tg3_get_wol,
10777 .set_wol = tg3_set_wol,
10778 .get_msglevel = tg3_get_msglevel,
10779 .set_msglevel = tg3_set_msglevel,
10780 .nway_reset = tg3_nway_reset,
10781 .get_link = ethtool_op_get_link,
10782 .get_eeprom_len = tg3_get_eeprom_len,
10783 .get_eeprom = tg3_get_eeprom,
10784 .set_eeprom = tg3_set_eeprom,
10785 .get_ringparam = tg3_get_ringparam,
10786 .set_ringparam = tg3_set_ringparam,
10787 .get_pauseparam = tg3_get_pauseparam,
10788 .set_pauseparam = tg3_set_pauseparam,
10789 .get_rx_csum = tg3_get_rx_csum,
10790 .set_rx_csum = tg3_set_rx_csum,
10791 .set_tx_csum = tg3_set_tx_csum,
10792 .set_sg = ethtool_op_set_sg,
10793 .set_tso = tg3_set_tso,
10794 .self_test = tg3_self_test,
10795 .get_strings = tg3_get_strings,
10796 .phys_id = tg3_phys_id,
10797 .get_ethtool_stats = tg3_get_ethtool_stats,
10798 .get_coalesce = tg3_get_coalesce,
10799 .set_coalesce = tg3_set_coalesce,
10800 .get_sset_count = tg3_get_sset_count,
10803 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10805 u32 cursize, val, magic;
10807 tp->nvram_size = EEPROM_CHIP_SIZE;
10809 if (tg3_nvram_read(tp, 0, &magic) != 0)
10812 if ((magic != TG3_EEPROM_MAGIC) &&
10813 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10814 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10818 * Size the chip by reading offsets at increasing powers of two.
10819 * When we encounter our validation signature, we know the addressing
10820 * has wrapped around, and thus have our chip size.
10824 while (cursize < tp->nvram_size) {
10825 if (tg3_nvram_read(tp, cursize, &val) != 0)
10834 tp->nvram_size = cursize;
10837 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10841 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
10842 tg3_nvram_read(tp, 0, &val) != 0)
10845 /* Selfboot format */
10846 if (val != TG3_EEPROM_MAGIC) {
10847 tg3_get_eeprom_size(tp);
10851 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10853 /* This is confusing. We want to operate on the
10854 * 16-bit value at offset 0xf2. The tg3_nvram_read()
10855 * call will read from NVRAM and byteswap the data
10856 * according to the byteswapping settings for all
10857 * other register accesses. This ensures the data we
10858 * want will always reside in the lower 16-bits.
10859 * However, the data in NVRAM is in LE format, which
10860 * means the data from the NVRAM read will always be
10861 * opposite the endianness of the CPU. The 16-bit
10862 * byteswap then brings the data to CPU endianness.
10864 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
10868 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10871 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10875 nvcfg1 = tr32(NVRAM_CFG1);
10876 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10877 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10879 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10880 tw32(NVRAM_CFG1, nvcfg1);
10883 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10884 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10885 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10886 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10887 tp->nvram_jedecnum = JEDEC_ATMEL;
10888 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10889 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10891 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10892 tp->nvram_jedecnum = JEDEC_ATMEL;
10893 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10895 case FLASH_VENDOR_ATMEL_EEPROM:
10896 tp->nvram_jedecnum = JEDEC_ATMEL;
10897 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10898 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10900 case FLASH_VENDOR_ST:
10901 tp->nvram_jedecnum = JEDEC_ST;
10902 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10903 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10905 case FLASH_VENDOR_SAIFUN:
10906 tp->nvram_jedecnum = JEDEC_SAIFUN;
10907 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10909 case FLASH_VENDOR_SST_SMALL:
10910 case FLASH_VENDOR_SST_LARGE:
10911 tp->nvram_jedecnum = JEDEC_SST;
10912 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10916 tp->nvram_jedecnum = JEDEC_ATMEL;
10917 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10918 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10922 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
10924 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10925 case FLASH_5752PAGE_SIZE_256:
10926 tp->nvram_pagesize = 256;
10928 case FLASH_5752PAGE_SIZE_512:
10929 tp->nvram_pagesize = 512;
10931 case FLASH_5752PAGE_SIZE_1K:
10932 tp->nvram_pagesize = 1024;
10934 case FLASH_5752PAGE_SIZE_2K:
10935 tp->nvram_pagesize = 2048;
10937 case FLASH_5752PAGE_SIZE_4K:
10938 tp->nvram_pagesize = 4096;
10940 case FLASH_5752PAGE_SIZE_264:
10941 tp->nvram_pagesize = 264;
10943 case FLASH_5752PAGE_SIZE_528:
10944 tp->nvram_pagesize = 528;
10949 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10953 nvcfg1 = tr32(NVRAM_CFG1);
10955 /* NVRAM protection for TPM */
10956 if (nvcfg1 & (1 << 27))
10957 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10959 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10960 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10961 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10962 tp->nvram_jedecnum = JEDEC_ATMEL;
10963 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10965 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10966 tp->nvram_jedecnum = JEDEC_ATMEL;
10967 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10968 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10970 case FLASH_5752VENDOR_ST_M45PE10:
10971 case FLASH_5752VENDOR_ST_M45PE20:
10972 case FLASH_5752VENDOR_ST_M45PE40:
10973 tp->nvram_jedecnum = JEDEC_ST;
10974 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10975 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10979 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10980 tg3_nvram_get_pagesize(tp, nvcfg1);
10982 /* For eeprom, set pagesize to maximum eeprom size */
10983 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10985 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10986 tw32(NVRAM_CFG1, nvcfg1);
10990 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10992 u32 nvcfg1, protect = 0;
10994 nvcfg1 = tr32(NVRAM_CFG1);
10996 /* NVRAM protection for TPM */
10997 if (nvcfg1 & (1 << 27)) {
10998 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
11002 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11004 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11005 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11006 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11007 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11008 tp->nvram_jedecnum = JEDEC_ATMEL;
11009 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11010 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11011 tp->nvram_pagesize = 264;
11012 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11013 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11014 tp->nvram_size = (protect ? 0x3e200 :
11015 TG3_NVRAM_SIZE_512KB);
11016 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11017 tp->nvram_size = (protect ? 0x1f200 :
11018 TG3_NVRAM_SIZE_256KB);
11020 tp->nvram_size = (protect ? 0x1f200 :
11021 TG3_NVRAM_SIZE_128KB);
11023 case FLASH_5752VENDOR_ST_M45PE10:
11024 case FLASH_5752VENDOR_ST_M45PE20:
11025 case FLASH_5752VENDOR_ST_M45PE40:
11026 tp->nvram_jedecnum = JEDEC_ST;
11027 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11028 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11029 tp->nvram_pagesize = 256;
11030 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11031 tp->nvram_size = (protect ?
11032 TG3_NVRAM_SIZE_64KB :
11033 TG3_NVRAM_SIZE_128KB);
11034 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11035 tp->nvram_size = (protect ?
11036 TG3_NVRAM_SIZE_64KB :
11037 TG3_NVRAM_SIZE_256KB);
11039 tp->nvram_size = (protect ?
11040 TG3_NVRAM_SIZE_128KB :
11041 TG3_NVRAM_SIZE_512KB);
11046 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11050 nvcfg1 = tr32(NVRAM_CFG1);
11052 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11053 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11054 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11055 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11056 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11057 tp->nvram_jedecnum = JEDEC_ATMEL;
11058 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11059 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11061 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11062 tw32(NVRAM_CFG1, nvcfg1);
11064 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11065 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11066 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11067 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11068 tp->nvram_jedecnum = JEDEC_ATMEL;
11069 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11070 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11071 tp->nvram_pagesize = 264;
11073 case FLASH_5752VENDOR_ST_M45PE10:
11074 case FLASH_5752VENDOR_ST_M45PE20:
11075 case FLASH_5752VENDOR_ST_M45PE40:
11076 tp->nvram_jedecnum = JEDEC_ST;
11077 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11078 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11079 tp->nvram_pagesize = 256;
11084 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11086 u32 nvcfg1, protect = 0;
11088 nvcfg1 = tr32(NVRAM_CFG1);
11090 /* NVRAM protection for TPM */
11091 if (nvcfg1 & (1 << 27)) {
11092 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
11096 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11098 case FLASH_5761VENDOR_ATMEL_ADB021D:
11099 case FLASH_5761VENDOR_ATMEL_ADB041D:
11100 case FLASH_5761VENDOR_ATMEL_ADB081D:
11101 case FLASH_5761VENDOR_ATMEL_ADB161D:
11102 case FLASH_5761VENDOR_ATMEL_MDB021D:
11103 case FLASH_5761VENDOR_ATMEL_MDB041D:
11104 case FLASH_5761VENDOR_ATMEL_MDB081D:
11105 case FLASH_5761VENDOR_ATMEL_MDB161D:
11106 tp->nvram_jedecnum = JEDEC_ATMEL;
11107 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11108 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11109 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11110 tp->nvram_pagesize = 256;
11112 case FLASH_5761VENDOR_ST_A_M45PE20:
11113 case FLASH_5761VENDOR_ST_A_M45PE40:
11114 case FLASH_5761VENDOR_ST_A_M45PE80:
11115 case FLASH_5761VENDOR_ST_A_M45PE16:
11116 case FLASH_5761VENDOR_ST_M_M45PE20:
11117 case FLASH_5761VENDOR_ST_M_M45PE40:
11118 case FLASH_5761VENDOR_ST_M_M45PE80:
11119 case FLASH_5761VENDOR_ST_M_M45PE16:
11120 tp->nvram_jedecnum = JEDEC_ST;
11121 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11122 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11123 tp->nvram_pagesize = 256;
11128 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11131 case FLASH_5761VENDOR_ATMEL_ADB161D:
11132 case FLASH_5761VENDOR_ATMEL_MDB161D:
11133 case FLASH_5761VENDOR_ST_A_M45PE16:
11134 case FLASH_5761VENDOR_ST_M_M45PE16:
11135 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11137 case FLASH_5761VENDOR_ATMEL_ADB081D:
11138 case FLASH_5761VENDOR_ATMEL_MDB081D:
11139 case FLASH_5761VENDOR_ST_A_M45PE80:
11140 case FLASH_5761VENDOR_ST_M_M45PE80:
11141 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11143 case FLASH_5761VENDOR_ATMEL_ADB041D:
11144 case FLASH_5761VENDOR_ATMEL_MDB041D:
11145 case FLASH_5761VENDOR_ST_A_M45PE40:
11146 case FLASH_5761VENDOR_ST_M_M45PE40:
11147 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11149 case FLASH_5761VENDOR_ATMEL_ADB021D:
11150 case FLASH_5761VENDOR_ATMEL_MDB021D:
11151 case FLASH_5761VENDOR_ST_A_M45PE20:
11152 case FLASH_5761VENDOR_ST_M_M45PE20:
11153 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11159 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11161 tp->nvram_jedecnum = JEDEC_ATMEL;
11162 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11163 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11166 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11170 nvcfg1 = tr32(NVRAM_CFG1);
11172 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11173 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11174 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11175 tp->nvram_jedecnum = JEDEC_ATMEL;
11176 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11177 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11179 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11180 tw32(NVRAM_CFG1, nvcfg1);
11182 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11183 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11184 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11185 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11186 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11187 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11188 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11189 tp->nvram_jedecnum = JEDEC_ATMEL;
11190 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11191 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11193 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11194 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11195 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11196 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11197 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11199 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11200 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11201 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11203 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11204 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11205 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11209 case FLASH_5752VENDOR_ST_M45PE10:
11210 case FLASH_5752VENDOR_ST_M45PE20:
11211 case FLASH_5752VENDOR_ST_M45PE40:
11212 tp->nvram_jedecnum = JEDEC_ST;
11213 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11214 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11216 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11217 case FLASH_5752VENDOR_ST_M45PE10:
11218 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11220 case FLASH_5752VENDOR_ST_M45PE20:
11221 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11223 case FLASH_5752VENDOR_ST_M45PE40:
11224 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11229 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11233 tg3_nvram_get_pagesize(tp, nvcfg1);
11234 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11235 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11239 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11243 nvcfg1 = tr32(NVRAM_CFG1);
11245 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11246 case FLASH_5717VENDOR_ATMEL_EEPROM:
11247 case FLASH_5717VENDOR_MICRO_EEPROM:
11248 tp->nvram_jedecnum = JEDEC_ATMEL;
11249 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11250 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11252 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11253 tw32(NVRAM_CFG1, nvcfg1);
11255 case FLASH_5717VENDOR_ATMEL_MDB011D:
11256 case FLASH_5717VENDOR_ATMEL_ADB011B:
11257 case FLASH_5717VENDOR_ATMEL_ADB011D:
11258 case FLASH_5717VENDOR_ATMEL_MDB021D:
11259 case FLASH_5717VENDOR_ATMEL_ADB021B:
11260 case FLASH_5717VENDOR_ATMEL_ADB021D:
11261 case FLASH_5717VENDOR_ATMEL_45USPT:
11262 tp->nvram_jedecnum = JEDEC_ATMEL;
11263 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11264 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11266 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11267 case FLASH_5717VENDOR_ATMEL_MDB021D:
11268 case FLASH_5717VENDOR_ATMEL_ADB021B:
11269 case FLASH_5717VENDOR_ATMEL_ADB021D:
11270 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11273 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11277 case FLASH_5717VENDOR_ST_M_M25PE10:
11278 case FLASH_5717VENDOR_ST_A_M25PE10:
11279 case FLASH_5717VENDOR_ST_M_M45PE10:
11280 case FLASH_5717VENDOR_ST_A_M45PE10:
11281 case FLASH_5717VENDOR_ST_M_M25PE20:
11282 case FLASH_5717VENDOR_ST_A_M25PE20:
11283 case FLASH_5717VENDOR_ST_M_M45PE20:
11284 case FLASH_5717VENDOR_ST_A_M45PE20:
11285 case FLASH_5717VENDOR_ST_25USPT:
11286 case FLASH_5717VENDOR_ST_45USPT:
11287 tp->nvram_jedecnum = JEDEC_ST;
11288 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11289 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11291 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11292 case FLASH_5717VENDOR_ST_M_M25PE20:
11293 case FLASH_5717VENDOR_ST_A_M25PE20:
11294 case FLASH_5717VENDOR_ST_M_M45PE20:
11295 case FLASH_5717VENDOR_ST_A_M45PE20:
11296 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11299 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11304 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11308 tg3_nvram_get_pagesize(tp, nvcfg1);
11309 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11310 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11313 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11314 static void __devinit tg3_nvram_init(struct tg3 *tp)
11316 tw32_f(GRC_EEPROM_ADDR,
11317 (EEPROM_ADDR_FSM_RESET |
11318 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11319 EEPROM_ADDR_CLKPERD_SHIFT)));
11323 /* Enable seeprom accesses. */
11324 tw32_f(GRC_LOCAL_CTRL,
11325 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11328 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11329 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11330 tp->tg3_flags |= TG3_FLAG_NVRAM;
11332 if (tg3_nvram_lock(tp)) {
11333 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
11334 "tg3_nvram_init failed.\n", tp->dev->name);
11337 tg3_enable_nvram_access(tp);
11339 tp->nvram_size = 0;
11341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11342 tg3_get_5752_nvram_info(tp);
11343 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11344 tg3_get_5755_nvram_info(tp);
11345 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11348 tg3_get_5787_nvram_info(tp);
11349 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11350 tg3_get_5761_nvram_info(tp);
11351 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11352 tg3_get_5906_nvram_info(tp);
11353 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11354 tg3_get_57780_nvram_info(tp);
11355 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
11356 tg3_get_5717_nvram_info(tp);
11358 tg3_get_nvram_info(tp);
11360 if (tp->nvram_size == 0)
11361 tg3_get_nvram_size(tp);
11363 tg3_disable_nvram_access(tp);
11364 tg3_nvram_unlock(tp);
11367 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11369 tg3_get_eeprom_size(tp);
11373 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11374 u32 offset, u32 len, u8 *buf)
11379 for (i = 0; i < len; i += 4) {
11385 memcpy(&data, buf + i, 4);
11388 * The SEEPROM interface expects the data to always be opposite
11389 * the native endian format. We accomplish this by reversing
11390 * all the operations that would have been performed on the
11391 * data from a call to tg3_nvram_read_be32().
11393 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11395 val = tr32(GRC_EEPROM_ADDR);
11396 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11398 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11400 tw32(GRC_EEPROM_ADDR, val |
11401 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11402 (addr & EEPROM_ADDR_ADDR_MASK) |
11403 EEPROM_ADDR_START |
11404 EEPROM_ADDR_WRITE);
11406 for (j = 0; j < 1000; j++) {
11407 val = tr32(GRC_EEPROM_ADDR);
11409 if (val & EEPROM_ADDR_COMPLETE)
11413 if (!(val & EEPROM_ADDR_COMPLETE)) {
11422 /* offset and length are dword aligned */
11423 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11427 u32 pagesize = tp->nvram_pagesize;
11428 u32 pagemask = pagesize - 1;
11432 tmp = kmalloc(pagesize, GFP_KERNEL);
11438 u32 phy_addr, page_off, size;
11440 phy_addr = offset & ~pagemask;
11442 for (j = 0; j < pagesize; j += 4) {
11443 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11444 (__be32 *) (tmp + j));
11451 page_off = offset & pagemask;
11458 memcpy(tmp + page_off, buf, size);
11460 offset = offset + (pagesize - page_off);
11462 tg3_enable_nvram_access(tp);
11465 * Before we can erase the flash page, we need
11466 * to issue a special "write enable" command.
11468 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11470 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11473 /* Erase the target page */
11474 tw32(NVRAM_ADDR, phy_addr);
11476 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11477 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11479 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11482 /* Issue another write enable to start the write. */
11483 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11485 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11488 for (j = 0; j < pagesize; j += 4) {
11491 data = *((__be32 *) (tmp + j));
11493 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11495 tw32(NVRAM_ADDR, phy_addr + j);
11497 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11501 nvram_cmd |= NVRAM_CMD_FIRST;
11502 else if (j == (pagesize - 4))
11503 nvram_cmd |= NVRAM_CMD_LAST;
11505 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11512 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11513 tg3_nvram_exec_cmd(tp, nvram_cmd);
11520 /* offset and length are dword aligned */
11521 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11526 for (i = 0; i < len; i += 4, offset += 4) {
11527 u32 page_off, phy_addr, nvram_cmd;
11530 memcpy(&data, buf + i, 4);
11531 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11533 page_off = offset % tp->nvram_pagesize;
11535 phy_addr = tg3_nvram_phys_addr(tp, offset);
11537 tw32(NVRAM_ADDR, phy_addr);
11539 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11541 if ((page_off == 0) || (i == 0))
11542 nvram_cmd |= NVRAM_CMD_FIRST;
11543 if (page_off == (tp->nvram_pagesize - 4))
11544 nvram_cmd |= NVRAM_CMD_LAST;
11546 if (i == (len - 4))
11547 nvram_cmd |= NVRAM_CMD_LAST;
11549 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11550 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11551 (tp->nvram_jedecnum == JEDEC_ST) &&
11552 (nvram_cmd & NVRAM_CMD_FIRST)) {
11554 if ((ret = tg3_nvram_exec_cmd(tp,
11555 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11560 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11561 /* We always do complete word writes to eeprom. */
11562 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11565 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11571 /* offset and length are dword aligned */
11572 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11576 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11577 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11578 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11582 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11583 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11588 ret = tg3_nvram_lock(tp);
11592 tg3_enable_nvram_access(tp);
11593 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11594 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11595 tw32(NVRAM_WRITE1, 0x406);
11597 grc_mode = tr32(GRC_MODE);
11598 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11600 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11601 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11603 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11607 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11611 grc_mode = tr32(GRC_MODE);
11612 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11614 tg3_disable_nvram_access(tp);
11615 tg3_nvram_unlock(tp);
11618 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11619 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11626 struct subsys_tbl_ent {
11627 u16 subsys_vendor, subsys_devid;
11631 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11632 /* Broadcom boards. */
11633 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11634 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11635 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11636 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11637 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11638 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11639 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11640 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11641 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11642 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11643 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11646 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11647 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11648 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11649 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11650 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11653 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11654 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11655 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11656 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11658 /* Compaq boards. */
11659 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11660 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11661 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11662 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11663 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11666 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11669 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11673 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11674 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11675 tp->pdev->subsystem_vendor) &&
11676 (subsys_id_to_phy_id[i].subsys_devid ==
11677 tp->pdev->subsystem_device))
11678 return &subsys_id_to_phy_id[i];
11683 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11688 /* On some early chips the SRAM cannot be accessed in D3hot state,
11689 * so need make sure we're in D0.
11691 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11692 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11693 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11696 /* Make sure register accesses (indirect or otherwise)
11697 * will function correctly.
11699 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11700 tp->misc_host_ctrl);
11702 /* The memory arbiter has to be enabled in order for SRAM accesses
11703 * to succeed. Normally on powerup the tg3 chip firmware will make
11704 * sure it is enabled, but other entities such as system netboot
11705 * code might disable it.
11707 val = tr32(MEMARB_MODE);
11708 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11710 tp->phy_id = PHY_ID_INVALID;
11711 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11713 /* Assume an onboard device and WOL capable by default. */
11714 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11717 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11718 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11719 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11721 val = tr32(VCPU_CFGSHDW);
11722 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11723 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11724 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11725 (val & VCPU_CFGSHDW_WOL_MAGPKT))
11726 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11730 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11731 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11732 u32 nic_cfg, led_cfg;
11733 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11734 int eeprom_phy_serdes = 0;
11736 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11737 tp->nic_sram_data_cfg = nic_cfg;
11739 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11740 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11741 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11742 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11743 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11744 (ver > 0) && (ver < 0x100))
11745 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11747 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11748 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11750 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11751 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11752 eeprom_phy_serdes = 1;
11754 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11755 if (nic_phy_id != 0) {
11756 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11757 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11759 eeprom_phy_id = (id1 >> 16) << 10;
11760 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11761 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11765 tp->phy_id = eeprom_phy_id;
11766 if (eeprom_phy_serdes) {
11767 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11768 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11770 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11773 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11774 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11775 SHASTA_EXT_LED_MODE_MASK);
11777 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11781 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11782 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11785 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11786 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11789 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11790 tp->led_ctrl = LED_CTRL_MODE_MAC;
11792 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11793 * read on some older 5700/5701 bootcode.
11795 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11797 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11799 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11803 case SHASTA_EXT_LED_SHARED:
11804 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11805 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11806 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11807 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11808 LED_CTRL_MODE_PHY_2);
11811 case SHASTA_EXT_LED_MAC:
11812 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11815 case SHASTA_EXT_LED_COMBO:
11816 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11817 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11818 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11819 LED_CTRL_MODE_PHY_2);
11824 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11825 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11826 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11827 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11829 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11830 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11832 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11833 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11834 if ((tp->pdev->subsystem_vendor ==
11835 PCI_VENDOR_ID_ARIMA) &&
11836 (tp->pdev->subsystem_device == 0x205a ||
11837 tp->pdev->subsystem_device == 0x2063))
11838 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11840 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11841 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11844 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11845 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11846 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11847 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11850 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11851 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11852 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11854 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11855 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11856 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11858 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11859 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11860 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11862 if (cfg2 & (1 << 17))
11863 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11865 /* serdes signal pre-emphasis in register 0x590 set by */
11866 /* bootcode if bit 18 is set */
11867 if (cfg2 & (1 << 18))
11868 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11870 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11871 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11872 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11873 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11875 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11878 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11879 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11880 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11883 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11884 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11885 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11886 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11887 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11888 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11891 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11892 device_set_wakeup_enable(&tp->pdev->dev,
11893 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11896 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11901 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11902 tw32(OTP_CTRL, cmd);
11904 /* Wait for up to 1 ms for command to execute. */
11905 for (i = 0; i < 100; i++) {
11906 val = tr32(OTP_STATUS);
11907 if (val & OTP_STATUS_CMD_DONE)
11912 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11915 /* Read the gphy configuration from the OTP region of the chip. The gphy
11916 * configuration is a 32-bit value that straddles the alignment boundary.
11917 * We do two 32-bit reads and then shift and merge the results.
11919 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11921 u32 bhalf_otp, thalf_otp;
11923 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11925 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11928 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11930 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11933 thalf_otp = tr32(OTP_READ_DATA);
11935 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11937 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11940 bhalf_otp = tr32(OTP_READ_DATA);
11942 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11945 static int __devinit tg3_phy_probe(struct tg3 *tp)
11947 u32 hw_phy_id_1, hw_phy_id_2;
11948 u32 hw_phy_id, hw_phy_id_masked;
11951 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11952 return tg3_phy_init(tp);
11954 /* Reading the PHY ID register can conflict with ASF
11955 * firmware access to the PHY hardware.
11958 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11959 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11960 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11962 /* Now read the physical PHY_ID from the chip and verify
11963 * that it is sane. If it doesn't look good, we fall back
11964 * to either the hard-coded table based PHY_ID and failing
11965 * that the value found in the eeprom area.
11967 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11968 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11970 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11971 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11972 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11974 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11977 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11978 tp->phy_id = hw_phy_id;
11979 if (hw_phy_id_masked == PHY_ID_BCM8002)
11980 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11982 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11984 if (tp->phy_id != PHY_ID_INVALID) {
11985 /* Do nothing, phy ID already set up in
11986 * tg3_get_eeprom_hw_cfg().
11989 struct subsys_tbl_ent *p;
11991 /* No eeprom signature? Try the hardcoded
11992 * subsys device table.
11994 p = lookup_by_subsys(tp);
11998 tp->phy_id = p->phy_id;
12000 tp->phy_id == PHY_ID_BCM8002)
12001 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12005 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
12006 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12007 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12008 u32 bmsr, adv_reg, tg3_ctrl, mask;
12010 tg3_readphy(tp, MII_BMSR, &bmsr);
12011 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12012 (bmsr & BMSR_LSTATUS))
12013 goto skip_phy_reset;
12015 err = tg3_phy_reset(tp);
12019 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12020 ADVERTISE_100HALF | ADVERTISE_100FULL |
12021 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12023 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
12024 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12025 MII_TG3_CTRL_ADV_1000_FULL);
12026 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12027 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12028 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12029 MII_TG3_CTRL_ENABLE_AS_MASTER);
12032 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12033 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12034 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12035 if (!tg3_copper_is_advertising_all(tp, mask)) {
12036 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12038 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12039 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12041 tg3_writephy(tp, MII_BMCR,
12042 BMCR_ANENABLE | BMCR_ANRESTART);
12044 tg3_phy_set_wirespeed(tp);
12046 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12047 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12048 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12052 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
12053 err = tg3_init_5401phy_dsp(tp);
12058 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
12059 err = tg3_init_5401phy_dsp(tp);
12062 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
12063 tp->link_config.advertising =
12064 (ADVERTISED_1000baseT_Half |
12065 ADVERTISED_1000baseT_Full |
12066 ADVERTISED_Autoneg |
12068 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
12069 tp->link_config.advertising &=
12070 ~(ADVERTISED_1000baseT_Half |
12071 ADVERTISED_1000baseT_Full);
12076 static void __devinit tg3_read_partno(struct tg3 *tp)
12078 unsigned char vpd_data[256]; /* in little-endian format */
12082 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12083 tg3_nvram_read(tp, 0x0, &magic))
12084 goto out_not_found;
12086 if (magic == TG3_EEPROM_MAGIC) {
12087 for (i = 0; i < 256; i += 4) {
12090 /* The data is in little-endian format in NVRAM.
12091 * Use the big-endian read routines to preserve
12092 * the byte order as it exists in NVRAM.
12094 if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp))
12095 goto out_not_found;
12097 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12102 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
12103 for (i = 0; i < 256; i += 4) {
12108 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
12110 while (j++ < 100) {
12111 pci_read_config_word(tp->pdev, vpd_cap +
12112 PCI_VPD_ADDR, &tmp16);
12113 if (tmp16 & 0x8000)
12117 if (!(tmp16 & 0x8000))
12118 goto out_not_found;
12120 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
12122 v = cpu_to_le32(tmp);
12123 memcpy(&vpd_data[i], &v, sizeof(v));
12127 /* Now parse and find the part number. */
12128 for (i = 0; i < 254; ) {
12129 unsigned char val = vpd_data[i];
12130 unsigned int block_end;
12132 if (val == 0x82 || val == 0x91) {
12135 (vpd_data[i + 2] << 8)));
12140 goto out_not_found;
12142 block_end = (i + 3 +
12144 (vpd_data[i + 2] << 8)));
12147 if (block_end > 256)
12148 goto out_not_found;
12150 while (i < (block_end - 2)) {
12151 if (vpd_data[i + 0] == 'P' &&
12152 vpd_data[i + 1] == 'N') {
12153 int partno_len = vpd_data[i + 2];
12156 if (partno_len > 24 || (partno_len + i) > 256)
12157 goto out_not_found;
12159 memcpy(tp->board_part_number,
12160 &vpd_data[i], partno_len);
12165 i += 3 + vpd_data[i + 2];
12168 /* Part number not found. */
12169 goto out_not_found;
12173 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12174 strcpy(tp->board_part_number, "BCM95906");
12175 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12176 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12177 strcpy(tp->board_part_number, "BCM57780");
12178 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12179 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12180 strcpy(tp->board_part_number, "BCM57760");
12181 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12182 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12183 strcpy(tp->board_part_number, "BCM57790");
12184 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12185 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12186 strcpy(tp->board_part_number, "BCM57788");
12188 strcpy(tp->board_part_number, "none");
12191 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12195 if (tg3_nvram_read(tp, offset, &val) ||
12196 (val & 0xfc000000) != 0x0c000000 ||
12197 tg3_nvram_read(tp, offset + 4, &val) ||
12204 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12206 u32 val, offset, start, ver_offset;
12208 bool newver = false;
12210 if (tg3_nvram_read(tp, 0xc, &offset) ||
12211 tg3_nvram_read(tp, 0x4, &start))
12214 offset = tg3_nvram_logical_addr(tp, offset);
12216 if (tg3_nvram_read(tp, offset, &val))
12219 if ((val & 0xfc000000) == 0x0c000000) {
12220 if (tg3_nvram_read(tp, offset + 4, &val))
12228 if (tg3_nvram_read(tp, offset + 8, &ver_offset))
12231 offset = offset + ver_offset - start;
12232 for (i = 0; i < 16; i += 4) {
12234 if (tg3_nvram_read_be32(tp, offset + i, &v))
12237 memcpy(tp->fw_ver + i, &v, sizeof(v));
12242 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12245 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12246 TG3_NVM_BCVER_MAJSFT;
12247 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12248 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
12252 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12254 u32 val, major, minor;
12256 /* Use native endian representation */
12257 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12260 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12261 TG3_NVM_HWSB_CFG1_MAJSFT;
12262 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12263 TG3_NVM_HWSB_CFG1_MINSFT;
12265 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12268 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12270 u32 offset, major, minor, build;
12272 tp->fw_ver[0] = 's';
12273 tp->fw_ver[1] = 'b';
12274 tp->fw_ver[2] = '\0';
12276 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12279 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12280 case TG3_EEPROM_SB_REVISION_0:
12281 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12283 case TG3_EEPROM_SB_REVISION_2:
12284 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12286 case TG3_EEPROM_SB_REVISION_3:
12287 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12293 if (tg3_nvram_read(tp, offset, &val))
12296 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12297 TG3_EEPROM_SB_EDH_BLD_SHFT;
12298 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12299 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12300 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12302 if (minor > 99 || build > 26)
12305 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
12308 tp->fw_ver[8] = 'a' + build - 1;
12309 tp->fw_ver[9] = '\0';
12313 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12315 u32 val, offset, start;
12318 for (offset = TG3_NVM_DIR_START;
12319 offset < TG3_NVM_DIR_END;
12320 offset += TG3_NVM_DIRENT_SIZE) {
12321 if (tg3_nvram_read(tp, offset, &val))
12324 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12328 if (offset == TG3_NVM_DIR_END)
12331 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12332 start = 0x08000000;
12333 else if (tg3_nvram_read(tp, offset - 4, &start))
12336 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12337 !tg3_fw_img_is_valid(tp, offset) ||
12338 tg3_nvram_read(tp, offset + 8, &val))
12341 offset += val - start;
12343 vlen = strlen(tp->fw_ver);
12345 tp->fw_ver[vlen++] = ',';
12346 tp->fw_ver[vlen++] = ' ';
12348 for (i = 0; i < 4; i++) {
12350 if (tg3_nvram_read_be32(tp, offset, &v))
12353 offset += sizeof(v);
12355 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12356 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12360 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12365 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12370 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12371 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12374 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12375 if (apedata != APE_SEG_SIG_MAGIC)
12378 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12379 if (!(apedata & APE_FW_STATUS_READY))
12382 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12384 vlen = strlen(tp->fw_ver);
12386 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
12387 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12388 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12389 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12390 (apedata & APE_FW_VERSION_BLDMSK));
12393 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12397 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12398 tp->fw_ver[0] = 's';
12399 tp->fw_ver[1] = 'b';
12400 tp->fw_ver[2] = '\0';
12405 if (tg3_nvram_read(tp, 0, &val))
12408 if (val == TG3_EEPROM_MAGIC)
12409 tg3_read_bc_ver(tp);
12410 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12411 tg3_read_sb_ver(tp, val);
12412 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12413 tg3_read_hwsb_ver(tp);
12417 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12418 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
12421 tg3_read_mgmtfw_ver(tp);
12423 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12426 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12428 static int __devinit tg3_get_invariants(struct tg3 *tp)
12430 static struct pci_device_id write_reorder_chipsets[] = {
12431 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12432 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12433 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12434 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12435 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12436 PCI_DEVICE_ID_VIA_8385_0) },
12440 u32 pci_state_reg, grc_misc_cfg;
12445 /* Force memory write invalidate off. If we leave it on,
12446 * then on 5700_BX chips we have to enable a workaround.
12447 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12448 * to match the cacheline size. The Broadcom driver have this
12449 * workaround but turns MWI off all the times so never uses
12450 * it. This seems to suggest that the workaround is insufficient.
12452 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12453 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12454 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12456 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12457 * has the register indirect write enable bit set before
12458 * we try to access any of the MMIO registers. It is also
12459 * critical that the PCI-X hw workaround situation is decided
12460 * before that as well.
12462 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12465 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12466 MISC_HOST_CTRL_CHIPREV_SHIFT);
12467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12468 u32 prod_id_asic_rev;
12470 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C ||
12471 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S ||
12472 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C ||
12473 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S)
12474 pci_read_config_dword(tp->pdev,
12475 TG3PCI_GEN2_PRODID_ASICREV,
12476 &prod_id_asic_rev);
12478 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12479 &prod_id_asic_rev);
12481 tp->pci_chip_rev_id = prod_id_asic_rev;
12484 /* Wrong chip ID in 5752 A0. This code can be removed later
12485 * as A0 is not in production.
12487 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12488 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12490 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12491 * we need to disable memory and use config. cycles
12492 * only to access all registers. The 5702/03 chips
12493 * can mistakenly decode the special cycles from the
12494 * ICH chipsets as memory write cycles, causing corruption
12495 * of register and memory space. Only certain ICH bridges
12496 * will drive special cycles with non-zero data during the
12497 * address phase which can fall within the 5703's address
12498 * range. This is not an ICH bug as the PCI spec allows
12499 * non-zero address during special cycles. However, only
12500 * these ICH bridges are known to drive non-zero addresses
12501 * during special cycles.
12503 * Since special cycles do not cross PCI bridges, we only
12504 * enable this workaround if the 5703 is on the secondary
12505 * bus of these ICH bridges.
12507 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12508 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12509 static struct tg3_dev_id {
12513 } ich_chipsets[] = {
12514 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12516 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12518 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12520 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12524 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12525 struct pci_dev *bridge = NULL;
12527 while (pci_id->vendor != 0) {
12528 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12534 if (pci_id->rev != PCI_ANY_ID) {
12535 if (bridge->revision > pci_id->rev)
12538 if (bridge->subordinate &&
12539 (bridge->subordinate->number ==
12540 tp->pdev->bus->number)) {
12542 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12543 pci_dev_put(bridge);
12549 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12550 static struct tg3_dev_id {
12553 } bridge_chipsets[] = {
12554 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12555 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12558 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12559 struct pci_dev *bridge = NULL;
12561 while (pci_id->vendor != 0) {
12562 bridge = pci_get_device(pci_id->vendor,
12569 if (bridge->subordinate &&
12570 (bridge->subordinate->number <=
12571 tp->pdev->bus->number) &&
12572 (bridge->subordinate->subordinate >=
12573 tp->pdev->bus->number)) {
12574 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12575 pci_dev_put(bridge);
12581 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12582 * DMA addresses > 40-bit. This bridge may have other additional
12583 * 57xx devices behind it in some 4-port NIC designs for example.
12584 * Any tg3 device found behind the bridge will also need the 40-bit
12587 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12588 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12589 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12590 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12591 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12594 struct pci_dev *bridge = NULL;
12597 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12598 PCI_DEVICE_ID_SERVERWORKS_EPB,
12600 if (bridge && bridge->subordinate &&
12601 (bridge->subordinate->number <=
12602 tp->pdev->bus->number) &&
12603 (bridge->subordinate->subordinate >=
12604 tp->pdev->bus->number)) {
12605 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12606 pci_dev_put(bridge);
12612 /* Initialize misc host control in PCI block. */
12613 tp->misc_host_ctrl |= (misc_ctrl_reg &
12614 MISC_HOST_CTRL_CHIPREV);
12615 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12616 tp->misc_host_ctrl);
12618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
12619 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
12620 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12621 tp->pdev_peer = tg3_find_peer(tp);
12623 /* Intentionally exclude ASIC_REV_5906 */
12624 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12626 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12628 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12629 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12630 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12631 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12633 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12634 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12636 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12637 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12638 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12640 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12641 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12642 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12644 /* 5700 B0 chips do not support checksumming correctly due
12645 * to hardware bugs.
12647 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12648 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12650 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12651 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12652 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12653 tp->dev->features |= NETIF_F_IPV6_CSUM;
12656 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12657 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12658 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12659 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12660 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12661 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12662 tp->pdev_peer == tp->pdev))
12663 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12665 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12666 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12667 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12668 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12670 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12671 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12673 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12674 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12680 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12681 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
12682 tp->irq_max = TG3_IRQ_MAX_VECS;
12685 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
12686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12687 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
12689 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
12690 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
12694 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12695 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
12696 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12697 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
12699 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12702 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12703 if (tp->pcie_cap != 0) {
12706 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12708 pcie_set_readrq(tp->pdev, 4096);
12710 pci_read_config_word(tp->pdev,
12711 tp->pcie_cap + PCI_EXP_LNKCTL,
12713 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12714 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12715 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12717 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12718 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
12719 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
12720 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12722 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12723 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12724 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12725 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12726 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12727 if (!tp->pcix_cap) {
12728 printk(KERN_ERR PFX "Cannot find PCI-X "
12729 "capability, aborting.\n");
12733 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12734 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12737 /* If we have an AMD 762 or VIA K8T800 chipset, write
12738 * reordering to the mailbox registers done by the host
12739 * controller can cause major troubles. We read back from
12740 * every mailbox register write to force the writes to be
12741 * posted to the chip in order.
12743 if (pci_dev_present(write_reorder_chipsets) &&
12744 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12745 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12747 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
12748 &tp->pci_cacheline_sz);
12749 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12750 &tp->pci_lat_timer);
12751 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12752 tp->pci_lat_timer < 64) {
12753 tp->pci_lat_timer = 64;
12754 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12755 tp->pci_lat_timer);
12758 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12759 /* 5700 BX chips need to have their TX producer index
12760 * mailboxes written twice to workaround a bug.
12762 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12764 /* If we are in PCI-X mode, enable register write workaround.
12766 * The workaround is to use indirect register accesses
12767 * for all chip writes not to mailbox registers.
12769 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12772 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12774 /* The chip can have it's power management PCI config
12775 * space registers clobbered due to this bug.
12776 * So explicitly force the chip into D0 here.
12778 pci_read_config_dword(tp->pdev,
12779 tp->pm_cap + PCI_PM_CTRL,
12781 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12782 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12783 pci_write_config_dword(tp->pdev,
12784 tp->pm_cap + PCI_PM_CTRL,
12787 /* Also, force SERR#/PERR# in PCI command. */
12788 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12789 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12790 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12794 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12795 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12796 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12797 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12799 /* Chip-specific fixup from Broadcom driver */
12800 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12801 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12802 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12803 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12806 /* Default fast path register access methods */
12807 tp->read32 = tg3_read32;
12808 tp->write32 = tg3_write32;
12809 tp->read32_mbox = tg3_read32;
12810 tp->write32_mbox = tg3_write32;
12811 tp->write32_tx_mbox = tg3_write32;
12812 tp->write32_rx_mbox = tg3_write32;
12814 /* Various workaround register access methods */
12815 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12816 tp->write32 = tg3_write_indirect_reg32;
12817 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12818 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12819 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12821 * Back to back register writes can cause problems on these
12822 * chips, the workaround is to read back all reg writes
12823 * except those to mailbox regs.
12825 * See tg3_write_indirect_reg32().
12827 tp->write32 = tg3_write_flush_reg32;
12830 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12831 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12832 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12833 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12834 tp->write32_rx_mbox = tg3_write_flush_reg32;
12837 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12838 tp->read32 = tg3_read_indirect_reg32;
12839 tp->write32 = tg3_write_indirect_reg32;
12840 tp->read32_mbox = tg3_read_indirect_mbox;
12841 tp->write32_mbox = tg3_write_indirect_mbox;
12842 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12843 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12848 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12849 pci_cmd &= ~PCI_COMMAND_MEMORY;
12850 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12852 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12853 tp->read32_mbox = tg3_read32_mbox_5906;
12854 tp->write32_mbox = tg3_write32_mbox_5906;
12855 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12856 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12859 if (tp->write32 == tg3_write_indirect_reg32 ||
12860 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12861 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12863 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12865 /* Get eeprom hw config before calling tg3_set_power_state().
12866 * In particular, the TG3_FLG2_IS_NIC flag must be
12867 * determined before calling tg3_set_power_state() so that
12868 * we know whether or not to switch out of Vaux power.
12869 * When the flag is set, it means that GPIO1 is used for eeprom
12870 * write protect and also implies that it is a LOM where GPIOs
12871 * are not used to switch power.
12873 tg3_get_eeprom_hw_cfg(tp);
12875 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12876 /* Allow reads and writes to the
12877 * APE register and memory space.
12879 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12880 PCISTATE_ALLOW_APE_SHMEM_WR;
12881 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12885 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12886 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12887 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12888 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12889 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12890 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12892 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12893 * GPIO1 driven high will bring 5700's external PHY out of reset.
12894 * It is also used as eeprom write protect on LOMs.
12896 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12897 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12898 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12899 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12900 GRC_LCLCTRL_GPIO_OUTPUT1);
12901 /* Unused GPIO3 must be driven as output on 5752 because there
12902 * are no pull-up resistors on unused GPIO pins.
12904 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12905 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12909 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12911 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
12912 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
12913 /* Turn off the debug UART. */
12914 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12915 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12916 /* Keep VMain power. */
12917 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12918 GRC_LCLCTRL_GPIO_OUTPUT0;
12921 /* Force the chip into D0. */
12922 err = tg3_set_power_state(tp, PCI_D0);
12924 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12925 pci_name(tp->pdev));
12929 /* Derive initial jumbo mode from MTU assigned in
12930 * ether_setup() via the alloc_etherdev() call
12932 if (tp->dev->mtu > ETH_DATA_LEN &&
12933 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12934 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12936 /* Determine WakeOnLan speed to use. */
12937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12938 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12939 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12940 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12941 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12943 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12947 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
12949 /* A few boards don't want Ethernet@WireSpeed phy feature */
12950 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12951 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12952 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12953 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12954 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
12955 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12956 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12958 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12959 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12960 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12961 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12962 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12964 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12965 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
12966 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12967 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
12968 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
12969 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12973 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12974 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12975 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12976 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12977 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12979 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12983 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12984 tp->phy_otp = tg3_read_otp_phycfg(tp);
12985 if (tp->phy_otp == 0)
12986 tp->phy_otp = TG3_OTP_DEFAULT;
12989 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12990 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12992 tp->mi_mode = MAC_MI_MODE_BASE;
12994 tp->coalesce_mode = 0;
12995 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12996 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12997 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12999 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13001 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13003 err = tg3_mdio_init(tp);
13007 /* Initialize data/descriptor byte/word swapping. */
13008 val = tr32(GRC_MODE);
13009 val &= GRC_MODE_HOST_STACKUP;
13010 tw32(GRC_MODE, val | tp->grc_mode);
13012 tg3_switch_clocks(tp);
13014 /* Clear this out for sanity. */
13015 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13017 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13019 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13020 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13021 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13023 if (chiprevid == CHIPREV_ID_5701_A0 ||
13024 chiprevid == CHIPREV_ID_5701_B0 ||
13025 chiprevid == CHIPREV_ID_5701_B2 ||
13026 chiprevid == CHIPREV_ID_5701_B5) {
13027 void __iomem *sram_base;
13029 /* Write some dummy words into the SRAM status block
13030 * area, see if it reads back correctly. If the return
13031 * value is bad, force enable the PCIX workaround.
13033 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13035 writel(0x00000000, sram_base);
13036 writel(0x00000000, sram_base + 4);
13037 writel(0xffffffff, sram_base + 4);
13038 if (readl(sram_base) != 0x00000000)
13039 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13044 tg3_nvram_init(tp);
13046 grc_misc_cfg = tr32(GRC_MISC_CFG);
13047 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13050 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13051 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13052 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13054 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13055 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13056 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13057 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13058 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13059 HOSTCC_MODE_CLRTICK_TXBD);
13061 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13062 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13063 tp->misc_host_ctrl);
13066 /* Preserve the APE MAC_MODE bits */
13067 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13068 tp->mac_mode = tr32(MAC_MODE) |
13069 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13071 tp->mac_mode = TG3_DEF_MAC_MODE;
13073 /* these are limited to 10/100 only */
13074 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13075 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13076 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13077 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13078 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13079 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13080 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13081 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13082 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13083 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13084 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13085 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13086 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
13087 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
13089 err = tg3_phy_probe(tp);
13091 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
13092 pci_name(tp->pdev), err);
13093 /* ... but do not return immediately ... */
13097 tg3_read_partno(tp);
13098 tg3_read_fw_ver(tp);
13100 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
13101 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13104 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
13106 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13109 /* 5700 {AX,BX} chips have a broken status block link
13110 * change bit implementation, so we must use the
13111 * status register in those cases.
13113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13114 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13116 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13118 /* The led_ctrl is set during tg3_phy_probe, here we might
13119 * have to force the link status polling mechanism based
13120 * upon subsystem IDs.
13122 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13123 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13124 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
13125 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
13126 TG3_FLAG_USE_LINKCHG_REG);
13129 /* For all SERDES we poll the MAC status register. */
13130 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
13131 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13133 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13135 tp->rx_offset = NET_IP_ALIGN;
13136 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13137 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
13140 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13142 /* Increment the rx prod index on the rx std ring by at most
13143 * 8 for these chips to workaround hw errata.
13145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13146 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13147 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13148 tp->rx_std_max_post = 8;
13150 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13151 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13152 PCIE_PWR_MGMT_L1_THRESH_MSK;
13157 #ifdef CONFIG_SPARC
13158 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13160 struct net_device *dev = tp->dev;
13161 struct pci_dev *pdev = tp->pdev;
13162 struct device_node *dp = pci_device_to_OF_node(pdev);
13163 const unsigned char *addr;
13166 addr = of_get_property(dp, "local-mac-address", &len);
13167 if (addr && len == 6) {
13168 memcpy(dev->dev_addr, addr, 6);
13169 memcpy(dev->perm_addr, dev->dev_addr, 6);
13175 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13177 struct net_device *dev = tp->dev;
13179 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13180 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13185 static int __devinit tg3_get_device_address(struct tg3 *tp)
13187 struct net_device *dev = tp->dev;
13188 u32 hi, lo, mac_offset;
13191 #ifdef CONFIG_SPARC
13192 if (!tg3_get_macaddr_sparc(tp))
13197 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13198 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13199 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13201 if (tg3_nvram_lock(tp))
13202 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13204 tg3_nvram_unlock(tp);
13205 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13206 if (tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC)
13208 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13211 /* First try to get it from MAC address mailbox. */
13212 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13213 if ((hi >> 16) == 0x484b) {
13214 dev->dev_addr[0] = (hi >> 8) & 0xff;
13215 dev->dev_addr[1] = (hi >> 0) & 0xff;
13217 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13218 dev->dev_addr[2] = (lo >> 24) & 0xff;
13219 dev->dev_addr[3] = (lo >> 16) & 0xff;
13220 dev->dev_addr[4] = (lo >> 8) & 0xff;
13221 dev->dev_addr[5] = (lo >> 0) & 0xff;
13223 /* Some old bootcode may report a 0 MAC address in SRAM */
13224 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13227 /* Next, try NVRAM. */
13228 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13229 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13230 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13231 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13232 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13234 /* Finally just fetch it out of the MAC control regs. */
13236 hi = tr32(MAC_ADDR_0_HIGH);
13237 lo = tr32(MAC_ADDR_0_LOW);
13239 dev->dev_addr[5] = lo & 0xff;
13240 dev->dev_addr[4] = (lo >> 8) & 0xff;
13241 dev->dev_addr[3] = (lo >> 16) & 0xff;
13242 dev->dev_addr[2] = (lo >> 24) & 0xff;
13243 dev->dev_addr[1] = hi & 0xff;
13244 dev->dev_addr[0] = (hi >> 8) & 0xff;
13248 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13249 #ifdef CONFIG_SPARC
13250 if (!tg3_get_default_macaddr_sparc(tp))
13255 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13259 #define BOUNDARY_SINGLE_CACHELINE 1
13260 #define BOUNDARY_MULTI_CACHELINE 2
13262 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13264 int cacheline_size;
13268 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13270 cacheline_size = 1024;
13272 cacheline_size = (int) byte * 4;
13274 /* On 5703 and later chips, the boundary bits have no
13277 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13278 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13279 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13282 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13283 goal = BOUNDARY_MULTI_CACHELINE;
13285 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13286 goal = BOUNDARY_SINGLE_CACHELINE;
13295 /* PCI controllers on most RISC systems tend to disconnect
13296 * when a device tries to burst across a cache-line boundary.
13297 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13299 * Unfortunately, for PCI-E there are only limited
13300 * write-side controls for this, and thus for reads
13301 * we will still get the disconnects. We'll also waste
13302 * these PCI cycles for both read and write for chips
13303 * other than 5700 and 5701 which do not implement the
13306 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13307 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13308 switch (cacheline_size) {
13313 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13314 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13315 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13317 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13318 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13323 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13324 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13328 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13329 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13332 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13333 switch (cacheline_size) {
13337 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13338 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13339 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13345 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13346 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13350 switch (cacheline_size) {
13352 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13353 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13354 DMA_RWCTRL_WRITE_BNDRY_16);
13359 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13360 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13361 DMA_RWCTRL_WRITE_BNDRY_32);
13366 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13367 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13368 DMA_RWCTRL_WRITE_BNDRY_64);
13373 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13374 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13375 DMA_RWCTRL_WRITE_BNDRY_128);
13380 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13381 DMA_RWCTRL_WRITE_BNDRY_256);
13384 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13385 DMA_RWCTRL_WRITE_BNDRY_512);
13389 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13390 DMA_RWCTRL_WRITE_BNDRY_1024);
13399 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13401 struct tg3_internal_buffer_desc test_desc;
13402 u32 sram_dma_descs;
13405 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13407 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13408 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13409 tw32(RDMAC_STATUS, 0);
13410 tw32(WDMAC_STATUS, 0);
13412 tw32(BUFMGR_MODE, 0);
13413 tw32(FTQ_RESET, 0);
13415 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13416 test_desc.addr_lo = buf_dma & 0xffffffff;
13417 test_desc.nic_mbuf = 0x00002100;
13418 test_desc.len = size;
13421 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13422 * the *second* time the tg3 driver was getting loaded after an
13425 * Broadcom tells me:
13426 * ...the DMA engine is connected to the GRC block and a DMA
13427 * reset may affect the GRC block in some unpredictable way...
13428 * The behavior of resets to individual blocks has not been tested.
13430 * Broadcom noted the GRC reset will also reset all sub-components.
13433 test_desc.cqid_sqid = (13 << 8) | 2;
13435 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13438 test_desc.cqid_sqid = (16 << 8) | 7;
13440 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13443 test_desc.flags = 0x00000005;
13445 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13448 val = *(((u32 *)&test_desc) + i);
13449 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13450 sram_dma_descs + (i * sizeof(u32)));
13451 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13453 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13456 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13458 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13462 for (i = 0; i < 40; i++) {
13466 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13468 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13469 if ((val & 0xffff) == sram_dma_descs) {
13480 #define TEST_BUFFER_SIZE 0x2000
13482 static int __devinit tg3_test_dma(struct tg3 *tp)
13484 dma_addr_t buf_dma;
13485 u32 *buf, saved_dma_rwctrl;
13488 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13494 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13495 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13497 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13499 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13500 /* DMA read watermark not used on PCIE */
13501 tp->dma_rwctrl |= 0x00180000;
13502 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13504 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13505 tp->dma_rwctrl |= 0x003f0000;
13507 tp->dma_rwctrl |= 0x003f000f;
13509 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13510 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13511 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13512 u32 read_water = 0x7;
13514 /* If the 5704 is behind the EPB bridge, we can
13515 * do the less restrictive ONE_DMA workaround for
13516 * better performance.
13518 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13520 tp->dma_rwctrl |= 0x8000;
13521 else if (ccval == 0x6 || ccval == 0x7)
13522 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13526 /* Set bit 23 to enable PCIX hw bug fix */
13528 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13529 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13531 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13532 /* 5780 always in PCIX mode */
13533 tp->dma_rwctrl |= 0x00144000;
13534 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13535 /* 5714 always in PCIX mode */
13536 tp->dma_rwctrl |= 0x00148000;
13538 tp->dma_rwctrl |= 0x001b000f;
13542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13544 tp->dma_rwctrl &= 0xfffffff0;
13546 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13547 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13548 /* Remove this if it causes problems for some boards. */
13549 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13551 /* On 5700/5701 chips, we need to set this bit.
13552 * Otherwise the chip will issue cacheline transactions
13553 * to streamable DMA memory with not all the byte
13554 * enables turned on. This is an error on several
13555 * RISC PCI controllers, in particular sparc64.
13557 * On 5703/5704 chips, this bit has been reassigned
13558 * a different meaning. In particular, it is used
13559 * on those chips to enable a PCI-X workaround.
13561 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13564 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13567 /* Unneeded, already done by tg3_get_invariants. */
13568 tg3_switch_clocks(tp);
13572 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13573 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13576 /* It is best to perform DMA test with maximum write burst size
13577 * to expose the 5700/5701 write DMA bug.
13579 saved_dma_rwctrl = tp->dma_rwctrl;
13580 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13581 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13586 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13589 /* Send the buffer to the chip. */
13590 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13592 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13597 /* validate data reached card RAM correctly. */
13598 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13600 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13601 if (le32_to_cpu(val) != p[i]) {
13602 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13603 /* ret = -ENODEV here? */
13608 /* Now read it back. */
13609 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13611 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13617 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13621 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13622 DMA_RWCTRL_WRITE_BNDRY_16) {
13623 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13624 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13625 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13628 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13634 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13640 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13641 DMA_RWCTRL_WRITE_BNDRY_16) {
13642 static struct pci_device_id dma_wait_state_chipsets[] = {
13643 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13644 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13648 /* DMA test passed without adjusting DMA boundary,
13649 * now look for chipsets that are known to expose the
13650 * DMA bug without failing the test.
13652 if (pci_dev_present(dma_wait_state_chipsets)) {
13653 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13654 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13657 /* Safe to use the calculated DMA boundary. */
13658 tp->dma_rwctrl = saved_dma_rwctrl;
13660 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13664 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13669 static void __devinit tg3_init_link_config(struct tg3 *tp)
13671 tp->link_config.advertising =
13672 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13673 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13674 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13675 ADVERTISED_Autoneg | ADVERTISED_MII);
13676 tp->link_config.speed = SPEED_INVALID;
13677 tp->link_config.duplex = DUPLEX_INVALID;
13678 tp->link_config.autoneg = AUTONEG_ENABLE;
13679 tp->link_config.active_speed = SPEED_INVALID;
13680 tp->link_config.active_duplex = DUPLEX_INVALID;
13681 tp->link_config.phy_is_low_power = 0;
13682 tp->link_config.orig_speed = SPEED_INVALID;
13683 tp->link_config.orig_duplex = DUPLEX_INVALID;
13684 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13687 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13689 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS &&
13690 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
13691 tp->bufmgr_config.mbuf_read_dma_low_water =
13692 DEFAULT_MB_RDMA_LOW_WATER_5705;
13693 tp->bufmgr_config.mbuf_mac_rx_low_water =
13694 DEFAULT_MB_MACRX_LOW_WATER_5705;
13695 tp->bufmgr_config.mbuf_high_water =
13696 DEFAULT_MB_HIGH_WATER_5705;
13697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13698 tp->bufmgr_config.mbuf_mac_rx_low_water =
13699 DEFAULT_MB_MACRX_LOW_WATER_5906;
13700 tp->bufmgr_config.mbuf_high_water =
13701 DEFAULT_MB_HIGH_WATER_5906;
13704 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13705 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13706 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13707 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13708 tp->bufmgr_config.mbuf_high_water_jumbo =
13709 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13711 tp->bufmgr_config.mbuf_read_dma_low_water =
13712 DEFAULT_MB_RDMA_LOW_WATER;
13713 tp->bufmgr_config.mbuf_mac_rx_low_water =
13714 DEFAULT_MB_MACRX_LOW_WATER;
13715 tp->bufmgr_config.mbuf_high_water =
13716 DEFAULT_MB_HIGH_WATER;
13718 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13719 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13720 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13721 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13722 tp->bufmgr_config.mbuf_high_water_jumbo =
13723 DEFAULT_MB_HIGH_WATER_JUMBO;
13726 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13727 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13730 static char * __devinit tg3_phy_string(struct tg3 *tp)
13732 switch (tp->phy_id & PHY_ID_MASK) {
13733 case PHY_ID_BCM5400: return "5400";
13734 case PHY_ID_BCM5401: return "5401";
13735 case PHY_ID_BCM5411: return "5411";
13736 case PHY_ID_BCM5701: return "5701";
13737 case PHY_ID_BCM5703: return "5703";
13738 case PHY_ID_BCM5704: return "5704";
13739 case PHY_ID_BCM5705: return "5705";
13740 case PHY_ID_BCM5750: return "5750";
13741 case PHY_ID_BCM5752: return "5752";
13742 case PHY_ID_BCM5714: return "5714";
13743 case PHY_ID_BCM5780: return "5780";
13744 case PHY_ID_BCM5755: return "5755";
13745 case PHY_ID_BCM5787: return "5787";
13746 case PHY_ID_BCM5784: return "5784";
13747 case PHY_ID_BCM5756: return "5722/5756";
13748 case PHY_ID_BCM5906: return "5906";
13749 case PHY_ID_BCM5761: return "5761";
13750 case PHY_ID_BCM8002: return "8002/serdes";
13751 case 0: return "serdes";
13752 default: return "unknown";
13756 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13758 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13759 strcpy(str, "PCI Express");
13761 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13762 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13764 strcpy(str, "PCIX:");
13766 if ((clock_ctrl == 7) ||
13767 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13768 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13769 strcat(str, "133MHz");
13770 else if (clock_ctrl == 0)
13771 strcat(str, "33MHz");
13772 else if (clock_ctrl == 2)
13773 strcat(str, "50MHz");
13774 else if (clock_ctrl == 4)
13775 strcat(str, "66MHz");
13776 else if (clock_ctrl == 6)
13777 strcat(str, "100MHz");
13779 strcpy(str, "PCI:");
13780 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13781 strcat(str, "66MHz");
13783 strcat(str, "33MHz");
13785 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13786 strcat(str, ":32-bit");
13788 strcat(str, ":64-bit");
13792 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13794 struct pci_dev *peer;
13795 unsigned int func, devnr = tp->pdev->devfn & ~7;
13797 for (func = 0; func < 8; func++) {
13798 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13799 if (peer && peer != tp->pdev)
13803 /* 5704 can be configured in single-port mode, set peer to
13804 * tp->pdev in that case.
13812 * We don't need to keep the refcount elevated; there's no way
13813 * to remove one half of this device without removing the other
13820 static void __devinit tg3_init_coal(struct tg3 *tp)
13822 struct ethtool_coalesce *ec = &tp->coal;
13824 memset(ec, 0, sizeof(*ec));
13825 ec->cmd = ETHTOOL_GCOALESCE;
13826 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13827 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13828 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13829 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13830 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13831 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13832 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13833 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13834 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13836 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13837 HOSTCC_MODE_CLRTICK_TXBD)) {
13838 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13839 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13840 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13841 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13844 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13845 ec->rx_coalesce_usecs_irq = 0;
13846 ec->tx_coalesce_usecs_irq = 0;
13847 ec->stats_block_coalesce_usecs = 0;
13851 static const struct net_device_ops tg3_netdev_ops = {
13852 .ndo_open = tg3_open,
13853 .ndo_stop = tg3_close,
13854 .ndo_start_xmit = tg3_start_xmit,
13855 .ndo_get_stats = tg3_get_stats,
13856 .ndo_validate_addr = eth_validate_addr,
13857 .ndo_set_multicast_list = tg3_set_rx_mode,
13858 .ndo_set_mac_address = tg3_set_mac_addr,
13859 .ndo_do_ioctl = tg3_ioctl,
13860 .ndo_tx_timeout = tg3_tx_timeout,
13861 .ndo_change_mtu = tg3_change_mtu,
13862 #if TG3_VLAN_TAG_USED
13863 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13865 #ifdef CONFIG_NET_POLL_CONTROLLER
13866 .ndo_poll_controller = tg3_poll_controller,
13870 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13871 .ndo_open = tg3_open,
13872 .ndo_stop = tg3_close,
13873 .ndo_start_xmit = tg3_start_xmit_dma_bug,
13874 .ndo_get_stats = tg3_get_stats,
13875 .ndo_validate_addr = eth_validate_addr,
13876 .ndo_set_multicast_list = tg3_set_rx_mode,
13877 .ndo_set_mac_address = tg3_set_mac_addr,
13878 .ndo_do_ioctl = tg3_ioctl,
13879 .ndo_tx_timeout = tg3_tx_timeout,
13880 .ndo_change_mtu = tg3_change_mtu,
13881 #if TG3_VLAN_TAG_USED
13882 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13884 #ifdef CONFIG_NET_POLL_CONTROLLER
13885 .ndo_poll_controller = tg3_poll_controller,
13889 static int __devinit tg3_init_one(struct pci_dev *pdev,
13890 const struct pci_device_id *ent)
13892 static int tg3_version_printed = 0;
13893 struct net_device *dev;
13895 int i, err, pm_cap;
13896 u32 sndmbx, rcvmbx, intmbx;
13898 u64 dma_mask, persist_dma_mask;
13900 if (tg3_version_printed++ == 0)
13901 printk(KERN_INFO "%s", version);
13903 err = pci_enable_device(pdev);
13905 printk(KERN_ERR PFX "Cannot enable PCI device, "
13910 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13912 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13914 goto err_out_disable_pdev;
13917 pci_set_master(pdev);
13919 /* Find power-management capability. */
13920 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13922 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13925 goto err_out_free_res;
13928 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
13930 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13932 goto err_out_free_res;
13935 SET_NETDEV_DEV(dev, &pdev->dev);
13937 #if TG3_VLAN_TAG_USED
13938 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13941 tp = netdev_priv(dev);
13944 tp->pm_cap = pm_cap;
13945 tp->rx_mode = TG3_DEF_RX_MODE;
13946 tp->tx_mode = TG3_DEF_TX_MODE;
13949 tp->msg_enable = tg3_debug;
13951 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13953 /* The word/byte swap controls here control register access byte
13954 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13957 tp->misc_host_ctrl =
13958 MISC_HOST_CTRL_MASK_PCI_INT |
13959 MISC_HOST_CTRL_WORD_SWAP |
13960 MISC_HOST_CTRL_INDIR_ACCESS |
13961 MISC_HOST_CTRL_PCISTATE_RW;
13963 /* The NONFRM (non-frame) byte/word swap controls take effect
13964 * on descriptor entries, anything which isn't packet data.
13966 * The StrongARM chips on the board (one for tx, one for rx)
13967 * are running in big-endian mode.
13969 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13970 GRC_MODE_WSWAP_NONFRM_DATA);
13971 #ifdef __BIG_ENDIAN
13972 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13974 spin_lock_init(&tp->lock);
13975 spin_lock_init(&tp->indirect_lock);
13976 INIT_WORK(&tp->reset_task, tg3_reset_task);
13978 tp->regs = pci_ioremap_bar(pdev, BAR_0);
13980 printk(KERN_ERR PFX "Cannot map device registers, "
13983 goto err_out_free_dev;
13986 tg3_init_link_config(tp);
13988 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13989 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13991 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
13992 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
13993 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
13994 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
13995 struct tg3_napi *tnapi = &tp->napi[i];
13998 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14000 tnapi->int_mbox = intmbx;
14006 tnapi->consmbox = rcvmbx;
14007 tnapi->prodmbox = sndmbx;
14010 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14012 tnapi->coal_now = HOSTCC_MODE_NOW;
14014 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14018 * If we support MSIX, we'll be using RSS. If we're using
14019 * RSS, the first vector only handles link interrupts and the
14020 * remaining vectors handle rx and tx interrupts. Reuse the
14021 * mailbox values for the next iteration. The values we setup
14022 * above are still useful for the single vectored mode.
14035 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
14036 dev->ethtool_ops = &tg3_ethtool_ops;
14037 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14038 dev->irq = pdev->irq;
14040 err = tg3_get_invariants(tp);
14042 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
14044 goto err_out_iounmap;
14047 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
14048 dev->netdev_ops = &tg3_netdev_ops;
14050 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14053 /* The EPB bridge inside 5714, 5715, and 5780 and any
14054 * device behind the EPB cannot support DMA addresses > 40-bit.
14055 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14056 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14057 * do DMA address check in tg3_start_xmit().
14059 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14060 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14061 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14062 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14063 #ifdef CONFIG_HIGHMEM
14064 dma_mask = DMA_BIT_MASK(64);
14067 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14069 /* Configure DMA attributes. */
14070 if (dma_mask > DMA_BIT_MASK(32)) {
14071 err = pci_set_dma_mask(pdev, dma_mask);
14073 dev->features |= NETIF_F_HIGHDMA;
14074 err = pci_set_consistent_dma_mask(pdev,
14077 printk(KERN_ERR PFX "Unable to obtain 64 bit "
14078 "DMA for consistent allocations\n");
14079 goto err_out_iounmap;
14083 if (err || dma_mask == DMA_BIT_MASK(32)) {
14084 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14086 printk(KERN_ERR PFX "No usable DMA configuration, "
14088 goto err_out_iounmap;
14092 tg3_init_bufmgr_config(tp);
14094 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14095 tp->fw_needed = FIRMWARE_TG3;
14097 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
14098 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14100 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14101 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14102 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
14103 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14104 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
14105 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
14107 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
14108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14109 tp->fw_needed = FIRMWARE_TG3TSO5;
14111 tp->fw_needed = FIRMWARE_TG3TSO;
14114 /* TSO is on by default on chips that support hardware TSO.
14115 * Firmware TSO on older chips gives lower performance, so it
14116 * is off by default, but can be enabled using ethtool.
14118 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
14119 if (dev->features & NETIF_F_IP_CSUM)
14120 dev->features |= NETIF_F_TSO;
14121 if ((dev->features & NETIF_F_IPV6_CSUM) &&
14122 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
14123 dev->features |= NETIF_F_TSO6;
14124 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14125 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14126 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14127 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14128 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14129 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
14130 dev->features |= NETIF_F_TSO_ECN;
14134 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14135 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14136 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14137 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14138 tp->rx_pending = 63;
14141 err = tg3_get_device_address(tp);
14143 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
14148 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14149 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14150 if (!tp->aperegs) {
14151 printk(KERN_ERR PFX "Cannot map APE registers, "
14157 tg3_ape_lock_init(tp);
14159 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14160 tg3_read_dash_ver(tp);
14164 * Reset chip in case UNDI or EFI driver did not shutdown
14165 * DMA self test will enable WDMAC and we'll see (spurious)
14166 * pending DMA on the PCI bus at that point.
14168 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14169 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14170 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14171 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14174 err = tg3_test_dma(tp);
14176 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
14177 goto err_out_apeunmap;
14180 /* flow control autonegotiation is default behavior */
14181 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14182 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14186 pci_set_drvdata(pdev, dev);
14188 err = register_netdev(dev);
14190 printk(KERN_ERR PFX "Cannot register net device, "
14192 goto err_out_apeunmap;
14195 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14197 tp->board_part_number,
14198 tp->pci_chip_rev_id,
14199 tg3_bus_string(tp, str),
14202 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14203 struct phy_device *phydev;
14204 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14206 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14207 tp->dev->name, phydev->drv->name,
14208 dev_name(&phydev->dev));
14211 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
14212 tp->dev->name, tg3_phy_string(tp),
14213 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14214 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14215 "10/100/1000Base-T")),
14216 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
14218 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14220 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14221 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14222 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
14223 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14224 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14225 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14226 dev->name, tp->dma_rwctrl,
14227 (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
14228 (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
14234 iounmap(tp->aperegs);
14235 tp->aperegs = NULL;
14240 release_firmware(tp->fw);
14252 pci_release_regions(pdev);
14254 err_out_disable_pdev:
14255 pci_disable_device(pdev);
14256 pci_set_drvdata(pdev, NULL);
14260 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14262 struct net_device *dev = pci_get_drvdata(pdev);
14265 struct tg3 *tp = netdev_priv(dev);
14268 release_firmware(tp->fw);
14270 flush_scheduled_work();
14272 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14277 unregister_netdev(dev);
14279 iounmap(tp->aperegs);
14280 tp->aperegs = NULL;
14287 pci_release_regions(pdev);
14288 pci_disable_device(pdev);
14289 pci_set_drvdata(pdev, NULL);
14293 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14295 struct net_device *dev = pci_get_drvdata(pdev);
14296 struct tg3 *tp = netdev_priv(dev);
14297 pci_power_t target_state;
14300 /* PCI register 4 needs to be saved whether netif_running() or not.
14301 * MSI address and data need to be saved if using MSI and
14304 pci_save_state(pdev);
14306 if (!netif_running(dev))
14309 flush_scheduled_work();
14311 tg3_netif_stop(tp);
14313 del_timer_sync(&tp->timer);
14315 tg3_full_lock(tp, 1);
14316 tg3_disable_ints(tp);
14317 tg3_full_unlock(tp);
14319 netif_device_detach(dev);
14321 tg3_full_lock(tp, 0);
14322 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14323 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14324 tg3_full_unlock(tp);
14326 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14328 err = tg3_set_power_state(tp, target_state);
14332 tg3_full_lock(tp, 0);
14334 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14335 err2 = tg3_restart_hw(tp, 1);
14339 tp->timer.expires = jiffies + tp->timer_offset;
14340 add_timer(&tp->timer);
14342 netif_device_attach(dev);
14343 tg3_netif_start(tp);
14346 tg3_full_unlock(tp);
14355 static int tg3_resume(struct pci_dev *pdev)
14357 struct net_device *dev = pci_get_drvdata(pdev);
14358 struct tg3 *tp = netdev_priv(dev);
14361 pci_restore_state(tp->pdev);
14363 if (!netif_running(dev))
14366 err = tg3_set_power_state(tp, PCI_D0);
14370 netif_device_attach(dev);
14372 tg3_full_lock(tp, 0);
14374 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14375 err = tg3_restart_hw(tp, 1);
14379 tp->timer.expires = jiffies + tp->timer_offset;
14380 add_timer(&tp->timer);
14382 tg3_netif_start(tp);
14385 tg3_full_unlock(tp);
14393 static struct pci_driver tg3_driver = {
14394 .name = DRV_MODULE_NAME,
14395 .id_table = tg3_pci_tbl,
14396 .probe = tg3_init_one,
14397 .remove = __devexit_p(tg3_remove_one),
14398 .suspend = tg3_suspend,
14399 .resume = tg3_resume
14402 static int __init tg3_init(void)
14404 return pci_register_driver(&tg3_driver);
14407 static void __exit tg3_cleanup(void)
14409 pci_unregister_driver(&tg3_driver);
14412 module_init(tg3_init);
14413 module_exit(tg3_cleanup);