2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2009 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.101"
72 #define DRV_MODULE_RELDATE "August 28, 2009"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
105 #define TG3_RSS_INDIR_TBL_SIZE 128
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129 #define TG3_DMA_BYTE_ENAB 64
131 #define TG3_RX_STD_DMA_SZ 1536
132 #define TG3_RX_JMB_DMA_SZ 9046
134 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
136 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
137 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139 /* minimum number of free TX descriptors required to wake up TX process */
140 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
142 #define TG3_RAW_IP_ALIGN 2
144 /* number of ETHTOOL_GSTATS u64's */
145 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
147 #define TG3_NUM_TEST 6
149 #define FIRMWARE_TG3 "tigon/tg3.bin"
150 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
151 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
153 static char version[] __devinitdata =
154 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
156 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
157 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
158 MODULE_LICENSE("GPL");
159 MODULE_VERSION(DRV_MODULE_VERSION);
160 MODULE_FIRMWARE(FIRMWARE_TG3);
161 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
162 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
164 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
166 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
167 module_param(tg3_debug, int, 0);
168 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
170 static struct pci_device_id tg3_pci_tbl[] = {
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
237 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
238 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
239 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
240 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
241 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
242 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
243 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
247 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
249 static const struct {
250 const char string[ETH_GSTRING_LEN];
251 } ethtool_stats_keys[TG3_NUM_STATS] = {
254 { "rx_ucast_packets" },
255 { "rx_mcast_packets" },
256 { "rx_bcast_packets" },
258 { "rx_align_errors" },
259 { "rx_xon_pause_rcvd" },
260 { "rx_xoff_pause_rcvd" },
261 { "rx_mac_ctrl_rcvd" },
262 { "rx_xoff_entered" },
263 { "rx_frame_too_long_errors" },
265 { "rx_undersize_packets" },
266 { "rx_in_length_errors" },
267 { "rx_out_length_errors" },
268 { "rx_64_or_less_octet_packets" },
269 { "rx_65_to_127_octet_packets" },
270 { "rx_128_to_255_octet_packets" },
271 { "rx_256_to_511_octet_packets" },
272 { "rx_512_to_1023_octet_packets" },
273 { "rx_1024_to_1522_octet_packets" },
274 { "rx_1523_to_2047_octet_packets" },
275 { "rx_2048_to_4095_octet_packets" },
276 { "rx_4096_to_8191_octet_packets" },
277 { "rx_8192_to_9022_octet_packets" },
284 { "tx_flow_control" },
286 { "tx_single_collisions" },
287 { "tx_mult_collisions" },
289 { "tx_excessive_collisions" },
290 { "tx_late_collisions" },
291 { "tx_collide_2times" },
292 { "tx_collide_3times" },
293 { "tx_collide_4times" },
294 { "tx_collide_5times" },
295 { "tx_collide_6times" },
296 { "tx_collide_7times" },
297 { "tx_collide_8times" },
298 { "tx_collide_9times" },
299 { "tx_collide_10times" },
300 { "tx_collide_11times" },
301 { "tx_collide_12times" },
302 { "tx_collide_13times" },
303 { "tx_collide_14times" },
304 { "tx_collide_15times" },
305 { "tx_ucast_packets" },
306 { "tx_mcast_packets" },
307 { "tx_bcast_packets" },
308 { "tx_carrier_sense_errors" },
312 { "dma_writeq_full" },
313 { "dma_write_prioq_full" },
317 { "rx_threshold_hit" },
319 { "dma_readq_full" },
320 { "dma_read_prioq_full" },
321 { "tx_comp_queue_full" },
323 { "ring_set_send_prod_index" },
324 { "ring_status_update" },
326 { "nic_avoided_irqs" },
327 { "nic_tx_threshold_hit" }
330 static const struct {
331 const char string[ETH_GSTRING_LEN];
332 } ethtool_test_keys[TG3_NUM_TEST] = {
333 { "nvram test (online) " },
334 { "link test (online) " },
335 { "register test (offline)" },
336 { "memory test (offline)" },
337 { "loopback test (offline)" },
338 { "interrupt test (offline)" },
341 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
343 writel(val, tp->regs + off);
346 static u32 tg3_read32(struct tg3 *tp, u32 off)
348 return (readl(tp->regs + off));
351 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
353 writel(val, tp->aperegs + off);
356 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
358 return (readl(tp->aperegs + off));
361 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
365 spin_lock_irqsave(&tp->indirect_lock, flags);
366 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
367 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
368 spin_unlock_irqrestore(&tp->indirect_lock, flags);
371 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
373 writel(val, tp->regs + off);
374 readl(tp->regs + off);
377 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
382 spin_lock_irqsave(&tp->indirect_lock, flags);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
384 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
385 spin_unlock_irqrestore(&tp->indirect_lock, flags);
389 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
393 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
394 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
395 TG3_64BIT_REG_LOW, val);
398 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
399 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
400 TG3_64BIT_REG_LOW, val);
404 spin_lock_irqsave(&tp->indirect_lock, flags);
405 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
406 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
407 spin_unlock_irqrestore(&tp->indirect_lock, flags);
409 /* In indirect mode when disabling interrupts, we also need
410 * to clear the interrupt bit in the GRC local ctrl register.
412 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
414 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
415 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
419 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
424 spin_lock_irqsave(&tp->indirect_lock, flags);
425 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
426 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
427 spin_unlock_irqrestore(&tp->indirect_lock, flags);
431 /* usec_wait specifies the wait time in usec when writing to certain registers
432 * where it is unsafe to read back the register without some delay.
433 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
434 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
436 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
438 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
439 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
440 /* Non-posted methods */
441 tp->write32(tp, off, val);
444 tg3_write32(tp, off, val);
449 /* Wait again after the read for the posted method to guarantee that
450 * the wait time is met.
456 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
458 tp->write32_mbox(tp, off, val);
459 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
460 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
461 tp->read32_mbox(tp, off);
464 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
466 void __iomem *mbox = tp->regs + off;
468 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
470 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
474 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
476 return (readl(tp->regs + off + GRCMBOX_BASE));
479 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
481 writel(val, tp->regs + off + GRCMBOX_BASE);
484 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
485 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
486 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
487 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
488 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
490 #define tw32(reg,val) tp->write32(tp, reg, val)
491 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
492 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
493 #define tr32(reg) tp->read32(tp, reg)
495 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
499 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
500 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
503 spin_lock_irqsave(&tp->indirect_lock, flags);
504 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
506 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
508 /* Always leave this as zero. */
509 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
512 tw32_f(TG3PCI_MEM_WIN_DATA, val);
514 /* Always leave this as zero. */
515 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
517 spin_unlock_irqrestore(&tp->indirect_lock, flags);
520 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
524 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
525 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
530 spin_lock_irqsave(&tp->indirect_lock, flags);
531 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
532 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
533 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
535 /* Always leave this as zero. */
536 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
538 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
539 *val = tr32(TG3PCI_MEM_WIN_DATA);
541 /* Always leave this as zero. */
542 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
544 spin_unlock_irqrestore(&tp->indirect_lock, flags);
547 static void tg3_ape_lock_init(struct tg3 *tp)
551 /* Make sure the driver hasn't any stale locks. */
552 for (i = 0; i < 8; i++)
553 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
554 APE_LOCK_GRANT_DRIVER);
557 static int tg3_ape_lock(struct tg3 *tp, int locknum)
563 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
567 case TG3_APE_LOCK_GRC:
568 case TG3_APE_LOCK_MEM:
576 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
578 /* Wait for up to 1 millisecond to acquire lock. */
579 for (i = 0; i < 100; i++) {
580 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
581 if (status == APE_LOCK_GRANT_DRIVER)
586 if (status != APE_LOCK_GRANT_DRIVER) {
587 /* Revoke the lock request. */
588 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
589 APE_LOCK_GRANT_DRIVER);
597 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
601 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
605 case TG3_APE_LOCK_GRC:
606 case TG3_APE_LOCK_MEM:
613 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
616 static void tg3_disable_ints(struct tg3 *tp)
620 tw32(TG3PCI_MISC_HOST_CTRL,
621 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
622 for (i = 0; i < tp->irq_max; i++)
623 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
626 static void tg3_enable_ints(struct tg3 *tp)
634 tw32(TG3PCI_MISC_HOST_CTRL,
635 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
637 for (i = 0; i < tp->irq_cnt; i++) {
638 struct tg3_napi *tnapi = &tp->napi[i];
639 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
640 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
641 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
643 coal_now |= tnapi->coal_now;
646 /* Force an initial interrupt */
647 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
648 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
649 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
651 tw32(HOSTCC_MODE, tp->coalesce_mode |
652 HOSTCC_MODE_ENABLE | coal_now);
655 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
657 struct tg3 *tp = tnapi->tp;
658 struct tg3_hw_status *sblk = tnapi->hw_status;
659 unsigned int work_exists = 0;
661 /* check for phy events */
662 if (!(tp->tg3_flags &
663 (TG3_FLAG_USE_LINKCHG_REG |
664 TG3_FLAG_POLL_SERDES))) {
665 if (sblk->status & SD_STATUS_LINK_CHG)
668 /* check for RX/TX work to do */
669 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
670 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
677 * similar to tg3_enable_ints, but it accurately determines whether there
678 * is new work pending and can return without flushing the PIO write
679 * which reenables interrupts
681 static void tg3_int_reenable(struct tg3_napi *tnapi)
683 struct tg3 *tp = tnapi->tp;
685 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
688 /* When doing tagged status, this work check is unnecessary.
689 * The last_tag we write above tells the chip which piece of
690 * work we've completed.
692 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
694 tw32(HOSTCC_MODE, tp->coalesce_mode |
695 HOSTCC_MODE_ENABLE | tnapi->coal_now);
698 static void tg3_napi_disable(struct tg3 *tp)
702 for (i = tp->irq_cnt - 1; i >= 0; i--)
703 napi_disable(&tp->napi[i].napi);
706 static void tg3_napi_enable(struct tg3 *tp)
710 for (i = 0; i < tp->irq_cnt; i++)
711 napi_enable(&tp->napi[i].napi);
714 static inline void tg3_netif_stop(struct tg3 *tp)
716 tp->dev->trans_start = jiffies; /* prevent tx timeout */
717 tg3_napi_disable(tp);
718 netif_tx_disable(tp->dev);
721 static inline void tg3_netif_start(struct tg3 *tp)
723 /* NOTE: unconditional netif_tx_wake_all_queues is only
724 * appropriate so long as all callers are assured to
725 * have free tx slots (such as after tg3_init_hw)
727 netif_tx_wake_all_queues(tp->dev);
730 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
734 static void tg3_switch_clocks(struct tg3 *tp)
736 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
739 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
740 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
743 orig_clock_ctrl = clock_ctrl;
744 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
745 CLOCK_CTRL_CLKRUN_OENABLE |
747 tp->pci_clock_ctrl = clock_ctrl;
749 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
750 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
751 tw32_wait_f(TG3PCI_CLOCK_CTRL,
752 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
754 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
755 tw32_wait_f(TG3PCI_CLOCK_CTRL,
757 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
759 tw32_wait_f(TG3PCI_CLOCK_CTRL,
760 clock_ctrl | (CLOCK_CTRL_ALTCLK),
763 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
766 #define PHY_BUSY_LOOPS 5000
768 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
774 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
776 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
782 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
783 MI_COM_PHY_ADDR_MASK);
784 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
785 MI_COM_REG_ADDR_MASK);
786 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
788 tw32_f(MAC_MI_COM, frame_val);
790 loops = PHY_BUSY_LOOPS;
793 frame_val = tr32(MAC_MI_COM);
795 if ((frame_val & MI_COM_BUSY) == 0) {
797 frame_val = tr32(MAC_MI_COM);
805 *val = frame_val & MI_COM_DATA_MASK;
809 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810 tw32_f(MAC_MI_MODE, tp->mi_mode);
817 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
823 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
824 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
827 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
829 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
833 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
834 MI_COM_PHY_ADDR_MASK);
835 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
836 MI_COM_REG_ADDR_MASK);
837 frame_val |= (val & MI_COM_DATA_MASK);
838 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
840 tw32_f(MAC_MI_COM, frame_val);
842 loops = PHY_BUSY_LOOPS;
845 frame_val = tr32(MAC_MI_COM);
846 if ((frame_val & MI_COM_BUSY) == 0) {
848 frame_val = tr32(MAC_MI_COM);
858 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
859 tw32_f(MAC_MI_MODE, tp->mi_mode);
866 static int tg3_bmcr_reset(struct tg3 *tp)
871 /* OK, reset it, and poll the BMCR_RESET bit until it
872 * clears or we time out.
874 phy_control = BMCR_RESET;
875 err = tg3_writephy(tp, MII_BMCR, phy_control);
881 err = tg3_readphy(tp, MII_BMCR, &phy_control);
885 if ((phy_control & BMCR_RESET) == 0) {
897 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
899 struct tg3 *tp = bp->priv;
902 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
905 if (tg3_readphy(tp, reg, &val))
911 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
913 struct tg3 *tp = bp->priv;
915 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
918 if (tg3_writephy(tp, reg, val))
924 static int tg3_mdio_reset(struct mii_bus *bp)
929 static void tg3_mdio_config_5785(struct tg3 *tp)
932 struct phy_device *phydev;
934 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
935 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
936 case TG3_PHY_ID_BCM50610:
937 val = MAC_PHYCFG2_50610_LED_MODES;
939 case TG3_PHY_ID_BCMAC131:
940 val = MAC_PHYCFG2_AC131_LED_MODES;
942 case TG3_PHY_ID_RTL8211C:
943 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
945 case TG3_PHY_ID_RTL8201E:
946 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
952 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
953 tw32(MAC_PHYCFG2, val);
955 val = tr32(MAC_PHYCFG1);
956 val &= ~(MAC_PHYCFG1_RGMII_INT |
957 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
958 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
959 tw32(MAC_PHYCFG1, val);
964 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
965 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
966 MAC_PHYCFG2_FMODE_MASK_MASK |
967 MAC_PHYCFG2_GMODE_MASK_MASK |
968 MAC_PHYCFG2_ACT_MASK_MASK |
969 MAC_PHYCFG2_QUAL_MASK_MASK |
970 MAC_PHYCFG2_INBAND_ENABLE;
972 tw32(MAC_PHYCFG2, val);
974 val = tr32(MAC_PHYCFG1);
975 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
976 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
977 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
978 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
979 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
980 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
981 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
983 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
984 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
985 tw32(MAC_PHYCFG1, val);
987 val = tr32(MAC_EXT_RGMII_MODE);
988 val &= ~(MAC_RGMII_MODE_RX_INT_B |
989 MAC_RGMII_MODE_RX_QUALITY |
990 MAC_RGMII_MODE_RX_ACTIVITY |
991 MAC_RGMII_MODE_RX_ENG_DET |
992 MAC_RGMII_MODE_TX_ENABLE |
993 MAC_RGMII_MODE_TX_LOWPWR |
994 MAC_RGMII_MODE_TX_RESET);
995 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
996 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
997 val |= MAC_RGMII_MODE_RX_INT_B |
998 MAC_RGMII_MODE_RX_QUALITY |
999 MAC_RGMII_MODE_RX_ACTIVITY |
1000 MAC_RGMII_MODE_RX_ENG_DET;
1001 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1002 val |= MAC_RGMII_MODE_TX_ENABLE |
1003 MAC_RGMII_MODE_TX_LOWPWR |
1004 MAC_RGMII_MODE_TX_RESET;
1006 tw32(MAC_EXT_RGMII_MODE, val);
1009 static void tg3_mdio_start(struct tg3 *tp)
1011 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1012 mutex_lock(&tp->mdio_bus->mdio_lock);
1013 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1014 mutex_unlock(&tp->mdio_bus->mdio_lock);
1017 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1018 tw32_f(MAC_MI_MODE, tp->mi_mode);
1021 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1022 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1023 tg3_mdio_config_5785(tp);
1026 static void tg3_mdio_stop(struct tg3 *tp)
1028 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1029 mutex_lock(&tp->mdio_bus->mdio_lock);
1030 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
1031 mutex_unlock(&tp->mdio_bus->mdio_lock);
1035 static int tg3_mdio_init(struct tg3 *tp)
1039 struct phy_device *phydev;
1043 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1044 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1047 tp->mdio_bus = mdiobus_alloc();
1048 if (tp->mdio_bus == NULL)
1051 tp->mdio_bus->name = "tg3 mdio bus";
1052 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1053 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1054 tp->mdio_bus->priv = tp;
1055 tp->mdio_bus->parent = &tp->pdev->dev;
1056 tp->mdio_bus->read = &tg3_mdio_read;
1057 tp->mdio_bus->write = &tg3_mdio_write;
1058 tp->mdio_bus->reset = &tg3_mdio_reset;
1059 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1060 tp->mdio_bus->irq = &tp->mdio_irq[0];
1062 for (i = 0; i < PHY_MAX_ADDR; i++)
1063 tp->mdio_bus->irq[i] = PHY_POLL;
1065 /* The bus registration will look for all the PHYs on the mdio bus.
1066 * Unfortunately, it does not ensure the PHY is powered up before
1067 * accessing the PHY ID registers. A chip reset is the
1068 * quickest way to bring the device back to an operational state..
1070 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1073 i = mdiobus_register(tp->mdio_bus);
1075 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1077 mdiobus_free(tp->mdio_bus);
1081 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1083 if (!phydev || !phydev->drv) {
1084 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1085 mdiobus_unregister(tp->mdio_bus);
1086 mdiobus_free(tp->mdio_bus);
1090 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1091 case TG3_PHY_ID_BCM57780:
1092 phydev->interface = PHY_INTERFACE_MODE_GMII;
1094 case TG3_PHY_ID_BCM50610:
1095 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1096 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1097 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1098 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1099 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1100 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1102 case TG3_PHY_ID_RTL8211C:
1103 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1105 case TG3_PHY_ID_RTL8201E:
1106 case TG3_PHY_ID_BCMAC131:
1107 phydev->interface = PHY_INTERFACE_MODE_MII;
1108 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1112 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1114 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1115 tg3_mdio_config_5785(tp);
1120 static void tg3_mdio_fini(struct tg3 *tp)
1122 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1123 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1124 mdiobus_unregister(tp->mdio_bus);
1125 mdiobus_free(tp->mdio_bus);
1126 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1130 /* tp->lock is held. */
1131 static inline void tg3_generate_fw_event(struct tg3 *tp)
1135 val = tr32(GRC_RX_CPU_EVENT);
1136 val |= GRC_RX_CPU_DRIVER_EVENT;
1137 tw32_f(GRC_RX_CPU_EVENT, val);
1139 tp->last_event_jiffies = jiffies;
1142 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1144 /* tp->lock is held. */
1145 static void tg3_wait_for_event_ack(struct tg3 *tp)
1148 unsigned int delay_cnt;
1151 /* If enough time has passed, no wait is necessary. */
1152 time_remain = (long)(tp->last_event_jiffies + 1 +
1153 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1155 if (time_remain < 0)
1158 /* Check if we can shorten the wait time. */
1159 delay_cnt = jiffies_to_usecs(time_remain);
1160 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1161 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1162 delay_cnt = (delay_cnt >> 3) + 1;
1164 for (i = 0; i < delay_cnt; i++) {
1165 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1171 /* tp->lock is held. */
1172 static void tg3_ump_link_report(struct tg3 *tp)
1177 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1178 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1181 tg3_wait_for_event_ack(tp);
1183 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1185 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1188 if (!tg3_readphy(tp, MII_BMCR, ®))
1190 if (!tg3_readphy(tp, MII_BMSR, ®))
1191 val |= (reg & 0xffff);
1192 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1195 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1197 if (!tg3_readphy(tp, MII_LPA, ®))
1198 val |= (reg & 0xffff);
1199 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1202 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1203 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1205 if (!tg3_readphy(tp, MII_STAT1000, ®))
1206 val |= (reg & 0xffff);
1208 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1210 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1214 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1216 tg3_generate_fw_event(tp);
1219 static void tg3_link_report(struct tg3 *tp)
1221 if (!netif_carrier_ok(tp->dev)) {
1222 if (netif_msg_link(tp))
1223 printk(KERN_INFO PFX "%s: Link is down.\n",
1225 tg3_ump_link_report(tp);
1226 } else if (netif_msg_link(tp)) {
1227 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1229 (tp->link_config.active_speed == SPEED_1000 ?
1231 (tp->link_config.active_speed == SPEED_100 ?
1233 (tp->link_config.active_duplex == DUPLEX_FULL ?
1236 printk(KERN_INFO PFX
1237 "%s: Flow control is %s for TX and %s for RX.\n",
1239 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1241 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1243 tg3_ump_link_report(tp);
1247 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1251 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1252 miireg = ADVERTISE_PAUSE_CAP;
1253 else if (flow_ctrl & FLOW_CTRL_TX)
1254 miireg = ADVERTISE_PAUSE_ASYM;
1255 else if (flow_ctrl & FLOW_CTRL_RX)
1256 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1263 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1267 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1268 miireg = ADVERTISE_1000XPAUSE;
1269 else if (flow_ctrl & FLOW_CTRL_TX)
1270 miireg = ADVERTISE_1000XPSE_ASYM;
1271 else if (flow_ctrl & FLOW_CTRL_RX)
1272 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1279 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1283 if (lcladv & ADVERTISE_1000XPAUSE) {
1284 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1285 if (rmtadv & LPA_1000XPAUSE)
1286 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1287 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1290 if (rmtadv & LPA_1000XPAUSE)
1291 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1293 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1294 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1301 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1305 u32 old_rx_mode = tp->rx_mode;
1306 u32 old_tx_mode = tp->tx_mode;
1308 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1309 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1311 autoneg = tp->link_config.autoneg;
1313 if (autoneg == AUTONEG_ENABLE &&
1314 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1315 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1316 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1318 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1320 flowctrl = tp->link_config.flowctrl;
1322 tp->link_config.active_flowctrl = flowctrl;
1324 if (flowctrl & FLOW_CTRL_RX)
1325 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1327 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1329 if (old_rx_mode != tp->rx_mode)
1330 tw32_f(MAC_RX_MODE, tp->rx_mode);
1332 if (flowctrl & FLOW_CTRL_TX)
1333 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1335 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1337 if (old_tx_mode != tp->tx_mode)
1338 tw32_f(MAC_TX_MODE, tp->tx_mode);
1341 static void tg3_adjust_link(struct net_device *dev)
1343 u8 oldflowctrl, linkmesg = 0;
1344 u32 mac_mode, lcl_adv, rmt_adv;
1345 struct tg3 *tp = netdev_priv(dev);
1346 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1348 spin_lock(&tp->lock);
1350 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1351 MAC_MODE_HALF_DUPLEX);
1353 oldflowctrl = tp->link_config.active_flowctrl;
1359 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1360 mac_mode |= MAC_MODE_PORT_MODE_MII;
1362 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1364 if (phydev->duplex == DUPLEX_HALF)
1365 mac_mode |= MAC_MODE_HALF_DUPLEX;
1367 lcl_adv = tg3_advert_flowctrl_1000T(
1368 tp->link_config.flowctrl);
1371 rmt_adv = LPA_PAUSE_CAP;
1372 if (phydev->asym_pause)
1373 rmt_adv |= LPA_PAUSE_ASYM;
1376 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1378 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1380 if (mac_mode != tp->mac_mode) {
1381 tp->mac_mode = mac_mode;
1382 tw32_f(MAC_MODE, tp->mac_mode);
1386 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1387 if (phydev->speed == SPEED_10)
1389 MAC_MI_STAT_10MBPS_MODE |
1390 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1392 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1395 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1396 tw32(MAC_TX_LENGTHS,
1397 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1398 (6 << TX_LENGTHS_IPG_SHIFT) |
1399 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1401 tw32(MAC_TX_LENGTHS,
1402 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1403 (6 << TX_LENGTHS_IPG_SHIFT) |
1404 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1406 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1407 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1408 phydev->speed != tp->link_config.active_speed ||
1409 phydev->duplex != tp->link_config.active_duplex ||
1410 oldflowctrl != tp->link_config.active_flowctrl)
1413 tp->link_config.active_speed = phydev->speed;
1414 tp->link_config.active_duplex = phydev->duplex;
1416 spin_unlock(&tp->lock);
1419 tg3_link_report(tp);
1422 static int tg3_phy_init(struct tg3 *tp)
1424 struct phy_device *phydev;
1426 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1429 /* Bring the PHY back to a known state. */
1432 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1434 /* Attach the MAC to the PHY. */
1435 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1436 phydev->dev_flags, phydev->interface);
1437 if (IS_ERR(phydev)) {
1438 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1439 return PTR_ERR(phydev);
1442 /* Mask with MAC supported features. */
1443 switch (phydev->interface) {
1444 case PHY_INTERFACE_MODE_GMII:
1445 case PHY_INTERFACE_MODE_RGMII:
1446 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1447 phydev->supported &= (PHY_GBIT_FEATURES |
1449 SUPPORTED_Asym_Pause);
1453 case PHY_INTERFACE_MODE_MII:
1454 phydev->supported &= (PHY_BASIC_FEATURES |
1456 SUPPORTED_Asym_Pause);
1459 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1463 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1465 phydev->advertising = phydev->supported;
1470 static void tg3_phy_start(struct tg3 *tp)
1472 struct phy_device *phydev;
1474 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1477 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1479 if (tp->link_config.phy_is_low_power) {
1480 tp->link_config.phy_is_low_power = 0;
1481 phydev->speed = tp->link_config.orig_speed;
1482 phydev->duplex = tp->link_config.orig_duplex;
1483 phydev->autoneg = tp->link_config.orig_autoneg;
1484 phydev->advertising = tp->link_config.orig_advertising;
1489 phy_start_aneg(phydev);
1492 static void tg3_phy_stop(struct tg3 *tp)
1494 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1497 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1500 static void tg3_phy_fini(struct tg3 *tp)
1502 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1503 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1504 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1508 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1510 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1511 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1514 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1518 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1521 tg3_writephy(tp, MII_TG3_FET_TEST,
1522 phytest | MII_TG3_FET_SHADOW_EN);
1523 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1525 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1527 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1528 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1530 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1534 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1538 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1541 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1542 tg3_phy_fet_toggle_apd(tp, enable);
1546 reg = MII_TG3_MISC_SHDW_WREN |
1547 MII_TG3_MISC_SHDW_SCR5_SEL |
1548 MII_TG3_MISC_SHDW_SCR5_LPED |
1549 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1550 MII_TG3_MISC_SHDW_SCR5_SDTL |
1551 MII_TG3_MISC_SHDW_SCR5_C125OE;
1552 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1553 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1555 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1558 reg = MII_TG3_MISC_SHDW_WREN |
1559 MII_TG3_MISC_SHDW_APD_SEL |
1560 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1562 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1564 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1567 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1571 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1572 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1575 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1578 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1579 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1581 tg3_writephy(tp, MII_TG3_FET_TEST,
1582 ephy | MII_TG3_FET_SHADOW_EN);
1583 if (!tg3_readphy(tp, reg, &phy)) {
1585 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1587 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1588 tg3_writephy(tp, reg, phy);
1590 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1593 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1594 MII_TG3_AUXCTL_SHDWSEL_MISC;
1595 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1596 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1598 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1600 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1601 phy |= MII_TG3_AUXCTL_MISC_WREN;
1602 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1607 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1611 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1614 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1615 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1616 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1617 (val | (1 << 15) | (1 << 4)));
1620 static void tg3_phy_apply_otp(struct tg3 *tp)
1629 /* Enable SM_DSP clock and tx 6dB coding. */
1630 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1631 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1632 MII_TG3_AUXCTL_ACTL_TX_6DB;
1633 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1635 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1636 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1637 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1639 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1640 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1641 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1643 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1644 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1645 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1647 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1648 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1650 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1651 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1653 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1654 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1655 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1657 /* Turn off SM_DSP clock. */
1658 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1659 MII_TG3_AUXCTL_ACTL_TX_6DB;
1660 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1663 static int tg3_wait_macro_done(struct tg3 *tp)
1670 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1671 if ((tmp32 & 0x1000) == 0)
1681 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1683 static const u32 test_pat[4][6] = {
1684 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1685 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1686 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1687 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1691 for (chan = 0; chan < 4; chan++) {
1694 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1695 (chan * 0x2000) | 0x0200);
1696 tg3_writephy(tp, 0x16, 0x0002);
1698 for (i = 0; i < 6; i++)
1699 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1702 tg3_writephy(tp, 0x16, 0x0202);
1703 if (tg3_wait_macro_done(tp)) {
1708 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1709 (chan * 0x2000) | 0x0200);
1710 tg3_writephy(tp, 0x16, 0x0082);
1711 if (tg3_wait_macro_done(tp)) {
1716 tg3_writephy(tp, 0x16, 0x0802);
1717 if (tg3_wait_macro_done(tp)) {
1722 for (i = 0; i < 6; i += 2) {
1725 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1726 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1727 tg3_wait_macro_done(tp)) {
1733 if (low != test_pat[chan][i] ||
1734 high != test_pat[chan][i+1]) {
1735 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1736 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1737 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1747 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1751 for (chan = 0; chan < 4; chan++) {
1754 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1755 (chan * 0x2000) | 0x0200);
1756 tg3_writephy(tp, 0x16, 0x0002);
1757 for (i = 0; i < 6; i++)
1758 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1759 tg3_writephy(tp, 0x16, 0x0202);
1760 if (tg3_wait_macro_done(tp))
1767 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1769 u32 reg32, phy9_orig;
1770 int retries, do_phy_reset, err;
1776 err = tg3_bmcr_reset(tp);
1782 /* Disable transmitter and interrupt. */
1783 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1787 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1789 /* Set full-duplex, 1000 mbps. */
1790 tg3_writephy(tp, MII_BMCR,
1791 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1793 /* Set to master mode. */
1794 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1797 tg3_writephy(tp, MII_TG3_CTRL,
1798 (MII_TG3_CTRL_AS_MASTER |
1799 MII_TG3_CTRL_ENABLE_AS_MASTER));
1801 /* Enable SM_DSP_CLOCK and 6dB. */
1802 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1804 /* Block the PHY control access. */
1805 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1806 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1808 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1811 } while (--retries);
1813 err = tg3_phy_reset_chanpat(tp);
1817 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1818 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1820 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1821 tg3_writephy(tp, 0x16, 0x0000);
1823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1824 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1825 /* Set Extended packet length bit for jumbo frames */
1826 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1829 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1832 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1834 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1836 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1843 /* This will reset the tigon3 PHY if there is no valid
1844 * link unless the FORCE argument is non-zero.
1846 static int tg3_phy_reset(struct tg3 *tp)
1852 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1855 val = tr32(GRC_MISC_CFG);
1856 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1859 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1860 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1864 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1865 netif_carrier_off(tp->dev);
1866 tg3_link_report(tp);
1869 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1870 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1871 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1872 err = tg3_phy_reset_5703_4_5(tp);
1879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1880 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1881 cpmuctrl = tr32(TG3_CPMU_CTRL);
1882 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1884 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1887 err = tg3_bmcr_reset(tp);
1891 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1894 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1895 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1897 tw32(TG3_CPMU_CTRL, cpmuctrl);
1900 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1901 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1904 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1905 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1906 CPMU_LSPD_1000MB_MACCLK_12_5) {
1907 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1909 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1913 tg3_phy_apply_otp(tp);
1915 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1916 tg3_phy_toggle_apd(tp, true);
1918 tg3_phy_toggle_apd(tp, false);
1921 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1922 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1923 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1924 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1925 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1926 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1927 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1929 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1930 tg3_writephy(tp, 0x1c, 0x8d68);
1931 tg3_writephy(tp, 0x1c, 0x8d68);
1933 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1934 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1935 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1936 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1937 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1938 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1939 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1940 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1941 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1943 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1944 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1945 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1946 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1947 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1948 tg3_writephy(tp, MII_TG3_TEST1,
1949 MII_TG3_TEST1_TRIM_EN | 0x4);
1951 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1952 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1954 /* Set Extended packet length bit (bit 14) on all chips that */
1955 /* support jumbo frames */
1956 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1957 /* Cannot do read-modify-write on 5401 */
1958 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1959 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1962 /* Set bit 14 with read-modify-write to preserve other bits */
1963 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1964 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1965 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1968 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1969 * jumbo frames transmission.
1971 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1974 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1975 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1976 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1980 /* adjust output voltage */
1981 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
1984 tg3_phy_toggle_automdix(tp, 1);
1985 tg3_phy_set_wirespeed(tp);
1989 static void tg3_frob_aux_power(struct tg3 *tp)
1991 struct tg3 *tp_peer = tp;
1993 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1996 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1997 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1998 struct net_device *dev_peer;
2000 dev_peer = pci_get_drvdata(tp->pdev_peer);
2001 /* remove_one() may have been run on the peer. */
2005 tp_peer = netdev_priv(dev_peer);
2008 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2009 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2010 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2011 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2012 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2013 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2014 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2015 (GRC_LCLCTRL_GPIO_OE0 |
2016 GRC_LCLCTRL_GPIO_OE1 |
2017 GRC_LCLCTRL_GPIO_OE2 |
2018 GRC_LCLCTRL_GPIO_OUTPUT0 |
2019 GRC_LCLCTRL_GPIO_OUTPUT1),
2021 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2022 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2023 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2024 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2025 GRC_LCLCTRL_GPIO_OE1 |
2026 GRC_LCLCTRL_GPIO_OE2 |
2027 GRC_LCLCTRL_GPIO_OUTPUT0 |
2028 GRC_LCLCTRL_GPIO_OUTPUT1 |
2030 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2032 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2033 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2035 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2036 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2039 u32 grc_local_ctrl = 0;
2041 if (tp_peer != tp &&
2042 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2045 /* Workaround to prevent overdrawing Amps. */
2046 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2048 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2049 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2050 grc_local_ctrl, 100);
2053 /* On 5753 and variants, GPIO2 cannot be used. */
2054 no_gpio2 = tp->nic_sram_data_cfg &
2055 NIC_SRAM_DATA_CFG_NO_GPIO2;
2057 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2058 GRC_LCLCTRL_GPIO_OE1 |
2059 GRC_LCLCTRL_GPIO_OE2 |
2060 GRC_LCLCTRL_GPIO_OUTPUT1 |
2061 GRC_LCLCTRL_GPIO_OUTPUT2;
2063 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2064 GRC_LCLCTRL_GPIO_OUTPUT2);
2066 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2067 grc_local_ctrl, 100);
2069 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2071 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2072 grc_local_ctrl, 100);
2075 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2076 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2077 grc_local_ctrl, 100);
2081 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2082 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2083 if (tp_peer != tp &&
2084 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2087 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2088 (GRC_LCLCTRL_GPIO_OE1 |
2089 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2091 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2092 GRC_LCLCTRL_GPIO_OE1, 100);
2094 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2095 (GRC_LCLCTRL_GPIO_OE1 |
2096 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2101 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2103 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2105 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2106 if (speed != SPEED_10)
2108 } else if (speed == SPEED_10)
2114 static int tg3_setup_phy(struct tg3 *, int);
2116 #define RESET_KIND_SHUTDOWN 0
2117 #define RESET_KIND_INIT 1
2118 #define RESET_KIND_SUSPEND 2
2120 static void tg3_write_sig_post_reset(struct tg3 *, int);
2121 static int tg3_halt_cpu(struct tg3 *, u32);
2123 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2127 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2129 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2130 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2133 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2134 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2135 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2140 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2142 val = tr32(GRC_MISC_CFG);
2143 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2146 } else if (do_low_power) {
2147 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2148 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2150 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2151 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2152 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2153 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2154 MII_TG3_AUXCTL_PCTL_VREG_11V);
2157 /* The PHY should not be powered down on some chips because
2160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2162 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2163 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2166 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2167 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2168 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2169 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2170 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2171 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2174 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2177 /* tp->lock is held. */
2178 static int tg3_nvram_lock(struct tg3 *tp)
2180 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2183 if (tp->nvram_lock_cnt == 0) {
2184 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2185 for (i = 0; i < 8000; i++) {
2186 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2191 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2195 tp->nvram_lock_cnt++;
2200 /* tp->lock is held. */
2201 static void tg3_nvram_unlock(struct tg3 *tp)
2203 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2204 if (tp->nvram_lock_cnt > 0)
2205 tp->nvram_lock_cnt--;
2206 if (tp->nvram_lock_cnt == 0)
2207 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2211 /* tp->lock is held. */
2212 static void tg3_enable_nvram_access(struct tg3 *tp)
2214 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2215 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2216 u32 nvaccess = tr32(NVRAM_ACCESS);
2218 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2222 /* tp->lock is held. */
2223 static void tg3_disable_nvram_access(struct tg3 *tp)
2225 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2226 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2227 u32 nvaccess = tr32(NVRAM_ACCESS);
2229 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2233 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2234 u32 offset, u32 *val)
2239 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2242 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2243 EEPROM_ADDR_DEVID_MASK |
2245 tw32(GRC_EEPROM_ADDR,
2247 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2248 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2249 EEPROM_ADDR_ADDR_MASK) |
2250 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2252 for (i = 0; i < 1000; i++) {
2253 tmp = tr32(GRC_EEPROM_ADDR);
2255 if (tmp & EEPROM_ADDR_COMPLETE)
2259 if (!(tmp & EEPROM_ADDR_COMPLETE))
2262 tmp = tr32(GRC_EEPROM_DATA);
2265 * The data will always be opposite the native endian
2266 * format. Perform a blind byteswap to compensate.
2273 #define NVRAM_CMD_TIMEOUT 10000
2275 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2279 tw32(NVRAM_CMD, nvram_cmd);
2280 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2282 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2288 if (i == NVRAM_CMD_TIMEOUT)
2294 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2296 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2297 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2298 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2299 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2300 (tp->nvram_jedecnum == JEDEC_ATMEL))
2302 addr = ((addr / tp->nvram_pagesize) <<
2303 ATMEL_AT45DB0X1B_PAGE_POS) +
2304 (addr % tp->nvram_pagesize);
2309 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2311 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2312 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2313 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2314 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2315 (tp->nvram_jedecnum == JEDEC_ATMEL))
2317 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2318 tp->nvram_pagesize) +
2319 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2324 /* NOTE: Data read in from NVRAM is byteswapped according to
2325 * the byteswapping settings for all other register accesses.
2326 * tg3 devices are BE devices, so on a BE machine, the data
2327 * returned will be exactly as it is seen in NVRAM. On a LE
2328 * machine, the 32-bit value will be byteswapped.
2330 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2334 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2335 return tg3_nvram_read_using_eeprom(tp, offset, val);
2337 offset = tg3_nvram_phys_addr(tp, offset);
2339 if (offset > NVRAM_ADDR_MSK)
2342 ret = tg3_nvram_lock(tp);
2346 tg3_enable_nvram_access(tp);
2348 tw32(NVRAM_ADDR, offset);
2349 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2350 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2353 *val = tr32(NVRAM_RDDATA);
2355 tg3_disable_nvram_access(tp);
2357 tg3_nvram_unlock(tp);
2362 /* Ensures NVRAM data is in bytestream format. */
2363 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2366 int res = tg3_nvram_read(tp, offset, &v);
2368 *val = cpu_to_be32(v);
2372 /* tp->lock is held. */
2373 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2375 u32 addr_high, addr_low;
2378 addr_high = ((tp->dev->dev_addr[0] << 8) |
2379 tp->dev->dev_addr[1]);
2380 addr_low = ((tp->dev->dev_addr[2] << 24) |
2381 (tp->dev->dev_addr[3] << 16) |
2382 (tp->dev->dev_addr[4] << 8) |
2383 (tp->dev->dev_addr[5] << 0));
2384 for (i = 0; i < 4; i++) {
2385 if (i == 1 && skip_mac_1)
2387 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2388 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2392 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2393 for (i = 0; i < 12; i++) {
2394 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2395 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2399 addr_high = (tp->dev->dev_addr[0] +
2400 tp->dev->dev_addr[1] +
2401 tp->dev->dev_addr[2] +
2402 tp->dev->dev_addr[3] +
2403 tp->dev->dev_addr[4] +
2404 tp->dev->dev_addr[5]) &
2405 TX_BACKOFF_SEED_MASK;
2406 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2409 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2412 bool device_should_wake, do_low_power;
2414 /* Make sure register accesses (indirect or otherwise)
2415 * will function correctly.
2417 pci_write_config_dword(tp->pdev,
2418 TG3PCI_MISC_HOST_CTRL,
2419 tp->misc_host_ctrl);
2423 pci_enable_wake(tp->pdev, state, false);
2424 pci_set_power_state(tp->pdev, PCI_D0);
2426 /* Switch out of Vaux if it is a NIC */
2427 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2428 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2438 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2439 tp->dev->name, state);
2443 /* Restore the CLKREQ setting. */
2444 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2447 pci_read_config_word(tp->pdev,
2448 tp->pcie_cap + PCI_EXP_LNKCTL,
2450 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2451 pci_write_config_word(tp->pdev,
2452 tp->pcie_cap + PCI_EXP_LNKCTL,
2456 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2457 tw32(TG3PCI_MISC_HOST_CTRL,
2458 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2460 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2461 device_may_wakeup(&tp->pdev->dev) &&
2462 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2464 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2465 do_low_power = false;
2466 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2467 !tp->link_config.phy_is_low_power) {
2468 struct phy_device *phydev;
2469 u32 phyid, advertising;
2471 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2473 tp->link_config.phy_is_low_power = 1;
2475 tp->link_config.orig_speed = phydev->speed;
2476 tp->link_config.orig_duplex = phydev->duplex;
2477 tp->link_config.orig_autoneg = phydev->autoneg;
2478 tp->link_config.orig_advertising = phydev->advertising;
2480 advertising = ADVERTISED_TP |
2482 ADVERTISED_Autoneg |
2483 ADVERTISED_10baseT_Half;
2485 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2486 device_should_wake) {
2487 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2489 ADVERTISED_100baseT_Half |
2490 ADVERTISED_100baseT_Full |
2491 ADVERTISED_10baseT_Full;
2493 advertising |= ADVERTISED_10baseT_Full;
2496 phydev->advertising = advertising;
2498 phy_start_aneg(phydev);
2500 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2501 if (phyid != TG3_PHY_ID_BCMAC131) {
2502 phyid &= TG3_PHY_OUI_MASK;
2503 if (phyid == TG3_PHY_OUI_1 ||
2504 phyid == TG3_PHY_OUI_2 ||
2505 phyid == TG3_PHY_OUI_3)
2506 do_low_power = true;
2510 do_low_power = true;
2512 if (tp->link_config.phy_is_low_power == 0) {
2513 tp->link_config.phy_is_low_power = 1;
2514 tp->link_config.orig_speed = tp->link_config.speed;
2515 tp->link_config.orig_duplex = tp->link_config.duplex;
2516 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2519 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2520 tp->link_config.speed = SPEED_10;
2521 tp->link_config.duplex = DUPLEX_HALF;
2522 tp->link_config.autoneg = AUTONEG_ENABLE;
2523 tg3_setup_phy(tp, 0);
2527 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2530 val = tr32(GRC_VCPU_EXT_CTRL);
2531 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2532 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2536 for (i = 0; i < 200; i++) {
2537 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2538 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2543 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2544 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2545 WOL_DRV_STATE_SHUTDOWN |
2549 if (device_should_wake) {
2552 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2554 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2558 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2559 mac_mode = MAC_MODE_PORT_MODE_GMII;
2561 mac_mode = MAC_MODE_PORT_MODE_MII;
2563 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2564 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2566 u32 speed = (tp->tg3_flags &
2567 TG3_FLAG_WOL_SPEED_100MB) ?
2568 SPEED_100 : SPEED_10;
2569 if (tg3_5700_link_polarity(tp, speed))
2570 mac_mode |= MAC_MODE_LINK_POLARITY;
2572 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2575 mac_mode = MAC_MODE_PORT_MODE_TBI;
2578 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2579 tw32(MAC_LED_CTRL, tp->led_ctrl);
2581 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2582 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2583 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2584 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2585 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2586 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2588 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2589 mac_mode |= tp->mac_mode &
2590 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2591 if (mac_mode & MAC_MODE_APE_TX_EN)
2592 mac_mode |= MAC_MODE_TDE_ENABLE;
2595 tw32_f(MAC_MODE, mac_mode);
2598 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2602 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2603 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2604 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2607 base_val = tp->pci_clock_ctrl;
2608 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2609 CLOCK_CTRL_TXCLK_DISABLE);
2611 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2612 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2613 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2614 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2615 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2617 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2618 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2619 u32 newbits1, newbits2;
2621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2623 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2624 CLOCK_CTRL_TXCLK_DISABLE |
2626 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2627 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2628 newbits1 = CLOCK_CTRL_625_CORE;
2629 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2631 newbits1 = CLOCK_CTRL_ALTCLK;
2632 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2635 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2638 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2641 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2644 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2645 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2646 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2647 CLOCK_CTRL_TXCLK_DISABLE |
2648 CLOCK_CTRL_44MHZ_CORE);
2650 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2653 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2654 tp->pci_clock_ctrl | newbits3, 40);
2658 if (!(device_should_wake) &&
2659 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2660 tg3_power_down_phy(tp, do_low_power);
2662 tg3_frob_aux_power(tp);
2664 /* Workaround for unstable PLL clock */
2665 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2666 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2667 u32 val = tr32(0x7d00);
2669 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2671 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2674 err = tg3_nvram_lock(tp);
2675 tg3_halt_cpu(tp, RX_CPU_BASE);
2677 tg3_nvram_unlock(tp);
2681 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2683 if (device_should_wake)
2684 pci_enable_wake(tp->pdev, state, true);
2686 /* Finally, set the new power state. */
2687 pci_set_power_state(tp->pdev, state);
2692 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2694 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2695 case MII_TG3_AUX_STAT_10HALF:
2697 *duplex = DUPLEX_HALF;
2700 case MII_TG3_AUX_STAT_10FULL:
2702 *duplex = DUPLEX_FULL;
2705 case MII_TG3_AUX_STAT_100HALF:
2707 *duplex = DUPLEX_HALF;
2710 case MII_TG3_AUX_STAT_100FULL:
2712 *duplex = DUPLEX_FULL;
2715 case MII_TG3_AUX_STAT_1000HALF:
2716 *speed = SPEED_1000;
2717 *duplex = DUPLEX_HALF;
2720 case MII_TG3_AUX_STAT_1000FULL:
2721 *speed = SPEED_1000;
2722 *duplex = DUPLEX_FULL;
2726 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2727 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2729 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2733 *speed = SPEED_INVALID;
2734 *duplex = DUPLEX_INVALID;
2739 static void tg3_phy_copper_begin(struct tg3 *tp)
2744 if (tp->link_config.phy_is_low_power) {
2745 /* Entering low power mode. Disable gigabit and
2746 * 100baseT advertisements.
2748 tg3_writephy(tp, MII_TG3_CTRL, 0);
2750 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2751 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2752 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2753 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2755 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2756 } else if (tp->link_config.speed == SPEED_INVALID) {
2757 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2758 tp->link_config.advertising &=
2759 ~(ADVERTISED_1000baseT_Half |
2760 ADVERTISED_1000baseT_Full);
2762 new_adv = ADVERTISE_CSMA;
2763 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2764 new_adv |= ADVERTISE_10HALF;
2765 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2766 new_adv |= ADVERTISE_10FULL;
2767 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2768 new_adv |= ADVERTISE_100HALF;
2769 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2770 new_adv |= ADVERTISE_100FULL;
2772 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2774 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2776 if (tp->link_config.advertising &
2777 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2779 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2780 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2781 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2782 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2783 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2784 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2785 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2786 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2787 MII_TG3_CTRL_ENABLE_AS_MASTER);
2788 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2790 tg3_writephy(tp, MII_TG3_CTRL, 0);
2793 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2794 new_adv |= ADVERTISE_CSMA;
2796 /* Asking for a specific link mode. */
2797 if (tp->link_config.speed == SPEED_1000) {
2798 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2800 if (tp->link_config.duplex == DUPLEX_FULL)
2801 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2803 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2804 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2805 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2806 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2807 MII_TG3_CTRL_ENABLE_AS_MASTER);
2809 if (tp->link_config.speed == SPEED_100) {
2810 if (tp->link_config.duplex == DUPLEX_FULL)
2811 new_adv |= ADVERTISE_100FULL;
2813 new_adv |= ADVERTISE_100HALF;
2815 if (tp->link_config.duplex == DUPLEX_FULL)
2816 new_adv |= ADVERTISE_10FULL;
2818 new_adv |= ADVERTISE_10HALF;
2820 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2825 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2828 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2829 tp->link_config.speed != SPEED_INVALID) {
2830 u32 bmcr, orig_bmcr;
2832 tp->link_config.active_speed = tp->link_config.speed;
2833 tp->link_config.active_duplex = tp->link_config.duplex;
2836 switch (tp->link_config.speed) {
2842 bmcr |= BMCR_SPEED100;
2846 bmcr |= TG3_BMCR_SPEED1000;
2850 if (tp->link_config.duplex == DUPLEX_FULL)
2851 bmcr |= BMCR_FULLDPLX;
2853 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2854 (bmcr != orig_bmcr)) {
2855 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2856 for (i = 0; i < 1500; i++) {
2860 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2861 tg3_readphy(tp, MII_BMSR, &tmp))
2863 if (!(tmp & BMSR_LSTATUS)) {
2868 tg3_writephy(tp, MII_BMCR, bmcr);
2872 tg3_writephy(tp, MII_BMCR,
2873 BMCR_ANENABLE | BMCR_ANRESTART);
2877 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2881 /* Turn off tap power management. */
2882 /* Set Extended packet length bit */
2883 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2885 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2886 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2888 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2889 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2891 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2892 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2894 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2895 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2897 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2898 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2905 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2907 u32 adv_reg, all_mask = 0;
2909 if (mask & ADVERTISED_10baseT_Half)
2910 all_mask |= ADVERTISE_10HALF;
2911 if (mask & ADVERTISED_10baseT_Full)
2912 all_mask |= ADVERTISE_10FULL;
2913 if (mask & ADVERTISED_100baseT_Half)
2914 all_mask |= ADVERTISE_100HALF;
2915 if (mask & ADVERTISED_100baseT_Full)
2916 all_mask |= ADVERTISE_100FULL;
2918 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2921 if ((adv_reg & all_mask) != all_mask)
2923 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2927 if (mask & ADVERTISED_1000baseT_Half)
2928 all_mask |= ADVERTISE_1000HALF;
2929 if (mask & ADVERTISED_1000baseT_Full)
2930 all_mask |= ADVERTISE_1000FULL;
2932 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2935 if ((tg3_ctrl & all_mask) != all_mask)
2941 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2945 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2948 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2949 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2951 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2952 if (curadv != reqadv)
2955 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2956 tg3_readphy(tp, MII_LPA, rmtadv);
2958 /* Reprogram the advertisement register, even if it
2959 * does not affect the current link. If the link
2960 * gets renegotiated in the future, we can save an
2961 * additional renegotiation cycle by advertising
2962 * it correctly in the first place.
2964 if (curadv != reqadv) {
2965 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2966 ADVERTISE_PAUSE_ASYM);
2967 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2974 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2976 int current_link_up;
2978 u32 lcl_adv, rmt_adv;
2986 (MAC_STATUS_SYNC_CHANGED |
2987 MAC_STATUS_CFG_CHANGED |
2988 MAC_STATUS_MI_COMPLETION |
2989 MAC_STATUS_LNKSTATE_CHANGED));
2992 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2994 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2998 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3000 /* Some third-party PHYs need to be reset on link going
3003 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3004 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3005 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3006 netif_carrier_ok(tp->dev)) {
3007 tg3_readphy(tp, MII_BMSR, &bmsr);
3008 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3009 !(bmsr & BMSR_LSTATUS))
3015 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
3016 tg3_readphy(tp, MII_BMSR, &bmsr);
3017 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3018 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3021 if (!(bmsr & BMSR_LSTATUS)) {
3022 err = tg3_init_5401phy_dsp(tp);
3026 tg3_readphy(tp, MII_BMSR, &bmsr);
3027 for (i = 0; i < 1000; i++) {
3029 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3030 (bmsr & BMSR_LSTATUS)) {
3036 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3037 !(bmsr & BMSR_LSTATUS) &&
3038 tp->link_config.active_speed == SPEED_1000) {
3039 err = tg3_phy_reset(tp);
3041 err = tg3_init_5401phy_dsp(tp);
3046 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3047 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3048 /* 5701 {A0,B0} CRC bug workaround */
3049 tg3_writephy(tp, 0x15, 0x0a75);
3050 tg3_writephy(tp, 0x1c, 0x8c68);
3051 tg3_writephy(tp, 0x1c, 0x8d68);
3052 tg3_writephy(tp, 0x1c, 0x8c68);
3055 /* Clear pending interrupts... */
3056 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3057 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3059 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3060 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3061 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3062 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3064 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3065 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3066 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3067 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3068 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3070 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3073 current_link_up = 0;
3074 current_speed = SPEED_INVALID;
3075 current_duplex = DUPLEX_INVALID;
3077 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3080 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3081 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3082 if (!(val & (1 << 10))) {
3084 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3090 for (i = 0; i < 100; i++) {
3091 tg3_readphy(tp, MII_BMSR, &bmsr);
3092 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3093 (bmsr & BMSR_LSTATUS))
3098 if (bmsr & BMSR_LSTATUS) {
3101 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3102 for (i = 0; i < 2000; i++) {
3104 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3109 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3114 for (i = 0; i < 200; i++) {
3115 tg3_readphy(tp, MII_BMCR, &bmcr);
3116 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3118 if (bmcr && bmcr != 0x7fff)
3126 tp->link_config.active_speed = current_speed;
3127 tp->link_config.active_duplex = current_duplex;
3129 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3130 if ((bmcr & BMCR_ANENABLE) &&
3131 tg3_copper_is_advertising_all(tp,
3132 tp->link_config.advertising)) {
3133 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3135 current_link_up = 1;
3138 if (!(bmcr & BMCR_ANENABLE) &&
3139 tp->link_config.speed == current_speed &&
3140 tp->link_config.duplex == current_duplex &&
3141 tp->link_config.flowctrl ==
3142 tp->link_config.active_flowctrl) {
3143 current_link_up = 1;
3147 if (current_link_up == 1 &&
3148 tp->link_config.active_duplex == DUPLEX_FULL)
3149 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3153 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3156 tg3_phy_copper_begin(tp);
3158 tg3_readphy(tp, MII_BMSR, &tmp);
3159 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3160 (tmp & BMSR_LSTATUS))
3161 current_link_up = 1;
3164 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3165 if (current_link_up == 1) {
3166 if (tp->link_config.active_speed == SPEED_100 ||
3167 tp->link_config.active_speed == SPEED_10)
3168 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3170 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3171 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3172 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3174 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3176 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3177 if (tp->link_config.active_duplex == DUPLEX_HALF)
3178 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3180 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3181 if (current_link_up == 1 &&
3182 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3183 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3185 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3188 /* ??? Without this setting Netgear GA302T PHY does not
3189 * ??? send/receive packets...
3191 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3192 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3193 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3194 tw32_f(MAC_MI_MODE, tp->mi_mode);
3198 tw32_f(MAC_MODE, tp->mac_mode);
3201 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3202 /* Polled via timer. */
3203 tw32_f(MAC_EVENT, 0);
3205 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3209 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3210 current_link_up == 1 &&
3211 tp->link_config.active_speed == SPEED_1000 &&
3212 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3213 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3216 (MAC_STATUS_SYNC_CHANGED |
3217 MAC_STATUS_CFG_CHANGED));
3220 NIC_SRAM_FIRMWARE_MBOX,
3221 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3224 /* Prevent send BD corruption. */
3225 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3226 u16 oldlnkctl, newlnkctl;
3228 pci_read_config_word(tp->pdev,
3229 tp->pcie_cap + PCI_EXP_LNKCTL,
3231 if (tp->link_config.active_speed == SPEED_100 ||
3232 tp->link_config.active_speed == SPEED_10)
3233 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3235 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3236 if (newlnkctl != oldlnkctl)
3237 pci_write_config_word(tp->pdev,
3238 tp->pcie_cap + PCI_EXP_LNKCTL,
3240 } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
3241 u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
3242 if (tp->link_config.active_speed == SPEED_100 ||
3243 tp->link_config.active_speed == SPEED_10)
3244 newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3246 newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3247 if (newreg != oldreg)
3248 tw32(TG3_PCIE_LNKCTL, newreg);
3251 if (current_link_up != netif_carrier_ok(tp->dev)) {
3252 if (current_link_up)
3253 netif_carrier_on(tp->dev);
3255 netif_carrier_off(tp->dev);
3256 tg3_link_report(tp);
3262 struct tg3_fiber_aneginfo {
3264 #define ANEG_STATE_UNKNOWN 0
3265 #define ANEG_STATE_AN_ENABLE 1
3266 #define ANEG_STATE_RESTART_INIT 2
3267 #define ANEG_STATE_RESTART 3
3268 #define ANEG_STATE_DISABLE_LINK_OK 4
3269 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3270 #define ANEG_STATE_ABILITY_DETECT 6
3271 #define ANEG_STATE_ACK_DETECT_INIT 7
3272 #define ANEG_STATE_ACK_DETECT 8
3273 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3274 #define ANEG_STATE_COMPLETE_ACK 10
3275 #define ANEG_STATE_IDLE_DETECT_INIT 11
3276 #define ANEG_STATE_IDLE_DETECT 12
3277 #define ANEG_STATE_LINK_OK 13
3278 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3279 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3282 #define MR_AN_ENABLE 0x00000001
3283 #define MR_RESTART_AN 0x00000002
3284 #define MR_AN_COMPLETE 0x00000004
3285 #define MR_PAGE_RX 0x00000008
3286 #define MR_NP_LOADED 0x00000010
3287 #define MR_TOGGLE_TX 0x00000020
3288 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3289 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3290 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3291 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3292 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3293 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3294 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3295 #define MR_TOGGLE_RX 0x00002000
3296 #define MR_NP_RX 0x00004000
3298 #define MR_LINK_OK 0x80000000
3300 unsigned long link_time, cur_time;
3302 u32 ability_match_cfg;
3303 int ability_match_count;
3305 char ability_match, idle_match, ack_match;
3307 u32 txconfig, rxconfig;
3308 #define ANEG_CFG_NP 0x00000080
3309 #define ANEG_CFG_ACK 0x00000040
3310 #define ANEG_CFG_RF2 0x00000020
3311 #define ANEG_CFG_RF1 0x00000010
3312 #define ANEG_CFG_PS2 0x00000001
3313 #define ANEG_CFG_PS1 0x00008000
3314 #define ANEG_CFG_HD 0x00004000
3315 #define ANEG_CFG_FD 0x00002000
3316 #define ANEG_CFG_INVAL 0x00001f06
3321 #define ANEG_TIMER_ENAB 2
3322 #define ANEG_FAILED -1
3324 #define ANEG_STATE_SETTLE_TIME 10000
3326 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3327 struct tg3_fiber_aneginfo *ap)
3330 unsigned long delta;
3334 if (ap->state == ANEG_STATE_UNKNOWN) {
3338 ap->ability_match_cfg = 0;
3339 ap->ability_match_count = 0;
3340 ap->ability_match = 0;
3346 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3347 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3349 if (rx_cfg_reg != ap->ability_match_cfg) {
3350 ap->ability_match_cfg = rx_cfg_reg;
3351 ap->ability_match = 0;
3352 ap->ability_match_count = 0;
3354 if (++ap->ability_match_count > 1) {
3355 ap->ability_match = 1;
3356 ap->ability_match_cfg = rx_cfg_reg;
3359 if (rx_cfg_reg & ANEG_CFG_ACK)
3367 ap->ability_match_cfg = 0;
3368 ap->ability_match_count = 0;
3369 ap->ability_match = 0;
3375 ap->rxconfig = rx_cfg_reg;
3379 case ANEG_STATE_UNKNOWN:
3380 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3381 ap->state = ANEG_STATE_AN_ENABLE;
3384 case ANEG_STATE_AN_ENABLE:
3385 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3386 if (ap->flags & MR_AN_ENABLE) {
3389 ap->ability_match_cfg = 0;
3390 ap->ability_match_count = 0;
3391 ap->ability_match = 0;
3395 ap->state = ANEG_STATE_RESTART_INIT;
3397 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3401 case ANEG_STATE_RESTART_INIT:
3402 ap->link_time = ap->cur_time;
3403 ap->flags &= ~(MR_NP_LOADED);
3405 tw32(MAC_TX_AUTO_NEG, 0);
3406 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3407 tw32_f(MAC_MODE, tp->mac_mode);
3410 ret = ANEG_TIMER_ENAB;
3411 ap->state = ANEG_STATE_RESTART;
3414 case ANEG_STATE_RESTART:
3415 delta = ap->cur_time - ap->link_time;
3416 if (delta > ANEG_STATE_SETTLE_TIME) {
3417 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3419 ret = ANEG_TIMER_ENAB;
3423 case ANEG_STATE_DISABLE_LINK_OK:
3427 case ANEG_STATE_ABILITY_DETECT_INIT:
3428 ap->flags &= ~(MR_TOGGLE_TX);
3429 ap->txconfig = ANEG_CFG_FD;
3430 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3431 if (flowctrl & ADVERTISE_1000XPAUSE)
3432 ap->txconfig |= ANEG_CFG_PS1;
3433 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3434 ap->txconfig |= ANEG_CFG_PS2;
3435 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3436 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3437 tw32_f(MAC_MODE, tp->mac_mode);
3440 ap->state = ANEG_STATE_ABILITY_DETECT;
3443 case ANEG_STATE_ABILITY_DETECT:
3444 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3445 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3449 case ANEG_STATE_ACK_DETECT_INIT:
3450 ap->txconfig |= ANEG_CFG_ACK;
3451 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3452 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3453 tw32_f(MAC_MODE, tp->mac_mode);
3456 ap->state = ANEG_STATE_ACK_DETECT;
3459 case ANEG_STATE_ACK_DETECT:
3460 if (ap->ack_match != 0) {
3461 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3462 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3463 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3465 ap->state = ANEG_STATE_AN_ENABLE;
3467 } else if (ap->ability_match != 0 &&
3468 ap->rxconfig == 0) {
3469 ap->state = ANEG_STATE_AN_ENABLE;
3473 case ANEG_STATE_COMPLETE_ACK_INIT:
3474 if (ap->rxconfig & ANEG_CFG_INVAL) {
3478 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3479 MR_LP_ADV_HALF_DUPLEX |
3480 MR_LP_ADV_SYM_PAUSE |
3481 MR_LP_ADV_ASYM_PAUSE |
3482 MR_LP_ADV_REMOTE_FAULT1 |
3483 MR_LP_ADV_REMOTE_FAULT2 |
3484 MR_LP_ADV_NEXT_PAGE |
3487 if (ap->rxconfig & ANEG_CFG_FD)
3488 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3489 if (ap->rxconfig & ANEG_CFG_HD)
3490 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3491 if (ap->rxconfig & ANEG_CFG_PS1)
3492 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3493 if (ap->rxconfig & ANEG_CFG_PS2)
3494 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3495 if (ap->rxconfig & ANEG_CFG_RF1)
3496 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3497 if (ap->rxconfig & ANEG_CFG_RF2)
3498 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3499 if (ap->rxconfig & ANEG_CFG_NP)
3500 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3502 ap->link_time = ap->cur_time;
3504 ap->flags ^= (MR_TOGGLE_TX);
3505 if (ap->rxconfig & 0x0008)
3506 ap->flags |= MR_TOGGLE_RX;
3507 if (ap->rxconfig & ANEG_CFG_NP)
3508 ap->flags |= MR_NP_RX;
3509 ap->flags |= MR_PAGE_RX;
3511 ap->state = ANEG_STATE_COMPLETE_ACK;
3512 ret = ANEG_TIMER_ENAB;
3515 case ANEG_STATE_COMPLETE_ACK:
3516 if (ap->ability_match != 0 &&
3517 ap->rxconfig == 0) {
3518 ap->state = ANEG_STATE_AN_ENABLE;
3521 delta = ap->cur_time - ap->link_time;
3522 if (delta > ANEG_STATE_SETTLE_TIME) {
3523 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3524 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3526 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3527 !(ap->flags & MR_NP_RX)) {
3528 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3536 case ANEG_STATE_IDLE_DETECT_INIT:
3537 ap->link_time = ap->cur_time;
3538 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3539 tw32_f(MAC_MODE, tp->mac_mode);
3542 ap->state = ANEG_STATE_IDLE_DETECT;
3543 ret = ANEG_TIMER_ENAB;
3546 case ANEG_STATE_IDLE_DETECT:
3547 if (ap->ability_match != 0 &&
3548 ap->rxconfig == 0) {
3549 ap->state = ANEG_STATE_AN_ENABLE;
3552 delta = ap->cur_time - ap->link_time;
3553 if (delta > ANEG_STATE_SETTLE_TIME) {
3554 /* XXX another gem from the Broadcom driver :( */
3555 ap->state = ANEG_STATE_LINK_OK;
3559 case ANEG_STATE_LINK_OK:
3560 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3564 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3565 /* ??? unimplemented */
3568 case ANEG_STATE_NEXT_PAGE_WAIT:
3569 /* ??? unimplemented */
3580 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3583 struct tg3_fiber_aneginfo aninfo;
3584 int status = ANEG_FAILED;
3588 tw32_f(MAC_TX_AUTO_NEG, 0);
3590 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3591 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3594 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3597 memset(&aninfo, 0, sizeof(aninfo));
3598 aninfo.flags |= MR_AN_ENABLE;
3599 aninfo.state = ANEG_STATE_UNKNOWN;
3600 aninfo.cur_time = 0;
3602 while (++tick < 195000) {
3603 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3604 if (status == ANEG_DONE || status == ANEG_FAILED)
3610 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3611 tw32_f(MAC_MODE, tp->mac_mode);
3614 *txflags = aninfo.txconfig;
3615 *rxflags = aninfo.flags;
3617 if (status == ANEG_DONE &&
3618 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3619 MR_LP_ADV_FULL_DUPLEX)))
3625 static void tg3_init_bcm8002(struct tg3 *tp)
3627 u32 mac_status = tr32(MAC_STATUS);
3630 /* Reset when initting first time or we have a link. */
3631 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3632 !(mac_status & MAC_STATUS_PCS_SYNCED))
3635 /* Set PLL lock range. */
3636 tg3_writephy(tp, 0x16, 0x8007);
3639 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3641 /* Wait for reset to complete. */
3642 /* XXX schedule_timeout() ... */
3643 for (i = 0; i < 500; i++)
3646 /* Config mode; select PMA/Ch 1 regs. */
3647 tg3_writephy(tp, 0x10, 0x8411);
3649 /* Enable auto-lock and comdet, select txclk for tx. */
3650 tg3_writephy(tp, 0x11, 0x0a10);
3652 tg3_writephy(tp, 0x18, 0x00a0);
3653 tg3_writephy(tp, 0x16, 0x41ff);
3655 /* Assert and deassert POR. */
3656 tg3_writephy(tp, 0x13, 0x0400);
3658 tg3_writephy(tp, 0x13, 0x0000);
3660 tg3_writephy(tp, 0x11, 0x0a50);
3662 tg3_writephy(tp, 0x11, 0x0a10);
3664 /* Wait for signal to stabilize */
3665 /* XXX schedule_timeout() ... */
3666 for (i = 0; i < 15000; i++)
3669 /* Deselect the channel register so we can read the PHYID
3672 tg3_writephy(tp, 0x10, 0x8011);
3675 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3678 u32 sg_dig_ctrl, sg_dig_status;
3679 u32 serdes_cfg, expected_sg_dig_ctrl;
3680 int workaround, port_a;
3681 int current_link_up;
3684 expected_sg_dig_ctrl = 0;
3687 current_link_up = 0;
3689 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3690 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3692 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3695 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3696 /* preserve bits 20-23 for voltage regulator */
3697 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3700 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3702 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3703 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3705 u32 val = serdes_cfg;
3711 tw32_f(MAC_SERDES_CFG, val);
3714 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3716 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3717 tg3_setup_flow_control(tp, 0, 0);
3718 current_link_up = 1;
3723 /* Want auto-negotiation. */
3724 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3726 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3727 if (flowctrl & ADVERTISE_1000XPAUSE)
3728 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3729 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3730 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3732 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3733 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3734 tp->serdes_counter &&
3735 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3736 MAC_STATUS_RCVD_CFG)) ==
3737 MAC_STATUS_PCS_SYNCED)) {
3738 tp->serdes_counter--;
3739 current_link_up = 1;
3744 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3745 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3747 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3749 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3750 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3751 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3752 MAC_STATUS_SIGNAL_DET)) {
3753 sg_dig_status = tr32(SG_DIG_STATUS);
3754 mac_status = tr32(MAC_STATUS);
3756 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3757 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3758 u32 local_adv = 0, remote_adv = 0;
3760 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3761 local_adv |= ADVERTISE_1000XPAUSE;
3762 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3763 local_adv |= ADVERTISE_1000XPSE_ASYM;
3765 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3766 remote_adv |= LPA_1000XPAUSE;
3767 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3768 remote_adv |= LPA_1000XPAUSE_ASYM;
3770 tg3_setup_flow_control(tp, local_adv, remote_adv);
3771 current_link_up = 1;
3772 tp->serdes_counter = 0;
3773 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3774 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3775 if (tp->serdes_counter)
3776 tp->serdes_counter--;
3779 u32 val = serdes_cfg;
3786 tw32_f(MAC_SERDES_CFG, val);
3789 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3792 /* Link parallel detection - link is up */
3793 /* only if we have PCS_SYNC and not */
3794 /* receiving config code words */
3795 mac_status = tr32(MAC_STATUS);
3796 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3797 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3798 tg3_setup_flow_control(tp, 0, 0);
3799 current_link_up = 1;
3801 TG3_FLG2_PARALLEL_DETECT;
3802 tp->serdes_counter =
3803 SERDES_PARALLEL_DET_TIMEOUT;
3805 goto restart_autoneg;
3809 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3810 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3814 return current_link_up;
3817 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3819 int current_link_up = 0;
3821 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3824 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3825 u32 txflags, rxflags;
3828 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3829 u32 local_adv = 0, remote_adv = 0;
3831 if (txflags & ANEG_CFG_PS1)
3832 local_adv |= ADVERTISE_1000XPAUSE;
3833 if (txflags & ANEG_CFG_PS2)
3834 local_adv |= ADVERTISE_1000XPSE_ASYM;
3836 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3837 remote_adv |= LPA_1000XPAUSE;
3838 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3839 remote_adv |= LPA_1000XPAUSE_ASYM;
3841 tg3_setup_flow_control(tp, local_adv, remote_adv);
3843 current_link_up = 1;
3845 for (i = 0; i < 30; i++) {
3848 (MAC_STATUS_SYNC_CHANGED |
3849 MAC_STATUS_CFG_CHANGED));
3851 if ((tr32(MAC_STATUS) &
3852 (MAC_STATUS_SYNC_CHANGED |
3853 MAC_STATUS_CFG_CHANGED)) == 0)
3857 mac_status = tr32(MAC_STATUS);
3858 if (current_link_up == 0 &&
3859 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3860 !(mac_status & MAC_STATUS_RCVD_CFG))
3861 current_link_up = 1;
3863 tg3_setup_flow_control(tp, 0, 0);
3865 /* Forcing 1000FD link up. */
3866 current_link_up = 1;
3868 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3871 tw32_f(MAC_MODE, tp->mac_mode);
3876 return current_link_up;
3879 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3882 u16 orig_active_speed;
3883 u8 orig_active_duplex;
3885 int current_link_up;
3888 orig_pause_cfg = tp->link_config.active_flowctrl;
3889 orig_active_speed = tp->link_config.active_speed;
3890 orig_active_duplex = tp->link_config.active_duplex;
3892 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3893 netif_carrier_ok(tp->dev) &&
3894 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3895 mac_status = tr32(MAC_STATUS);
3896 mac_status &= (MAC_STATUS_PCS_SYNCED |
3897 MAC_STATUS_SIGNAL_DET |
3898 MAC_STATUS_CFG_CHANGED |
3899 MAC_STATUS_RCVD_CFG);
3900 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3901 MAC_STATUS_SIGNAL_DET)) {
3902 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3903 MAC_STATUS_CFG_CHANGED));
3908 tw32_f(MAC_TX_AUTO_NEG, 0);
3910 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3911 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3912 tw32_f(MAC_MODE, tp->mac_mode);
3915 if (tp->phy_id == PHY_ID_BCM8002)
3916 tg3_init_bcm8002(tp);
3918 /* Enable link change event even when serdes polling. */
3919 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3922 current_link_up = 0;
3923 mac_status = tr32(MAC_STATUS);
3925 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3926 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3928 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3930 tp->napi[0].hw_status->status =
3931 (SD_STATUS_UPDATED |
3932 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3934 for (i = 0; i < 100; i++) {
3935 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3936 MAC_STATUS_CFG_CHANGED));
3938 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3939 MAC_STATUS_CFG_CHANGED |
3940 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3944 mac_status = tr32(MAC_STATUS);
3945 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3946 current_link_up = 0;
3947 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3948 tp->serdes_counter == 0) {
3949 tw32_f(MAC_MODE, (tp->mac_mode |
3950 MAC_MODE_SEND_CONFIGS));
3952 tw32_f(MAC_MODE, tp->mac_mode);
3956 if (current_link_up == 1) {
3957 tp->link_config.active_speed = SPEED_1000;
3958 tp->link_config.active_duplex = DUPLEX_FULL;
3959 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3960 LED_CTRL_LNKLED_OVERRIDE |
3961 LED_CTRL_1000MBPS_ON));
3963 tp->link_config.active_speed = SPEED_INVALID;
3964 tp->link_config.active_duplex = DUPLEX_INVALID;
3965 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3966 LED_CTRL_LNKLED_OVERRIDE |
3967 LED_CTRL_TRAFFIC_OVERRIDE));
3970 if (current_link_up != netif_carrier_ok(tp->dev)) {
3971 if (current_link_up)
3972 netif_carrier_on(tp->dev);
3974 netif_carrier_off(tp->dev);
3975 tg3_link_report(tp);
3977 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3978 if (orig_pause_cfg != now_pause_cfg ||
3979 orig_active_speed != tp->link_config.active_speed ||
3980 orig_active_duplex != tp->link_config.active_duplex)
3981 tg3_link_report(tp);
3987 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3989 int current_link_up, err = 0;
3993 u32 local_adv, remote_adv;
3995 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3996 tw32_f(MAC_MODE, tp->mac_mode);
4002 (MAC_STATUS_SYNC_CHANGED |
4003 MAC_STATUS_CFG_CHANGED |
4004 MAC_STATUS_MI_COMPLETION |
4005 MAC_STATUS_LNKSTATE_CHANGED));
4011 current_link_up = 0;
4012 current_speed = SPEED_INVALID;
4013 current_duplex = DUPLEX_INVALID;
4015 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4016 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4018 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4019 bmsr |= BMSR_LSTATUS;
4021 bmsr &= ~BMSR_LSTATUS;
4024 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4026 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4027 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4028 /* do nothing, just check for link up at the end */
4029 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4032 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4033 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4034 ADVERTISE_1000XPAUSE |
4035 ADVERTISE_1000XPSE_ASYM |
4038 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4040 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4041 new_adv |= ADVERTISE_1000XHALF;
4042 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4043 new_adv |= ADVERTISE_1000XFULL;
4045 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4046 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4047 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4048 tg3_writephy(tp, MII_BMCR, bmcr);
4050 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4051 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4052 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4059 bmcr &= ~BMCR_SPEED1000;
4060 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4062 if (tp->link_config.duplex == DUPLEX_FULL)
4063 new_bmcr |= BMCR_FULLDPLX;
4065 if (new_bmcr != bmcr) {
4066 /* BMCR_SPEED1000 is a reserved bit that needs
4067 * to be set on write.
4069 new_bmcr |= BMCR_SPEED1000;
4071 /* Force a linkdown */
4072 if (netif_carrier_ok(tp->dev)) {
4075 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4076 adv &= ~(ADVERTISE_1000XFULL |
4077 ADVERTISE_1000XHALF |
4079 tg3_writephy(tp, MII_ADVERTISE, adv);
4080 tg3_writephy(tp, MII_BMCR, bmcr |
4084 netif_carrier_off(tp->dev);
4086 tg3_writephy(tp, MII_BMCR, new_bmcr);
4088 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4089 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4090 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4092 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4093 bmsr |= BMSR_LSTATUS;
4095 bmsr &= ~BMSR_LSTATUS;
4097 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4101 if (bmsr & BMSR_LSTATUS) {
4102 current_speed = SPEED_1000;
4103 current_link_up = 1;
4104 if (bmcr & BMCR_FULLDPLX)
4105 current_duplex = DUPLEX_FULL;
4107 current_duplex = DUPLEX_HALF;
4112 if (bmcr & BMCR_ANENABLE) {
4115 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4116 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4117 common = local_adv & remote_adv;
4118 if (common & (ADVERTISE_1000XHALF |
4119 ADVERTISE_1000XFULL)) {
4120 if (common & ADVERTISE_1000XFULL)
4121 current_duplex = DUPLEX_FULL;
4123 current_duplex = DUPLEX_HALF;
4126 current_link_up = 0;
4130 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4131 tg3_setup_flow_control(tp, local_adv, remote_adv);
4133 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4134 if (tp->link_config.active_duplex == DUPLEX_HALF)
4135 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4137 tw32_f(MAC_MODE, tp->mac_mode);
4140 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4142 tp->link_config.active_speed = current_speed;
4143 tp->link_config.active_duplex = current_duplex;
4145 if (current_link_up != netif_carrier_ok(tp->dev)) {
4146 if (current_link_up)
4147 netif_carrier_on(tp->dev);
4149 netif_carrier_off(tp->dev);
4150 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4152 tg3_link_report(tp);
4157 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4159 if (tp->serdes_counter) {
4160 /* Give autoneg time to complete. */
4161 tp->serdes_counter--;
4164 if (!netif_carrier_ok(tp->dev) &&
4165 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4168 tg3_readphy(tp, MII_BMCR, &bmcr);
4169 if (bmcr & BMCR_ANENABLE) {
4172 /* Select shadow register 0x1f */
4173 tg3_writephy(tp, 0x1c, 0x7c00);
4174 tg3_readphy(tp, 0x1c, &phy1);
4176 /* Select expansion interrupt status register */
4177 tg3_writephy(tp, 0x17, 0x0f01);
4178 tg3_readphy(tp, 0x15, &phy2);
4179 tg3_readphy(tp, 0x15, &phy2);
4181 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4182 /* We have signal detect and not receiving
4183 * config code words, link is up by parallel
4187 bmcr &= ~BMCR_ANENABLE;
4188 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4189 tg3_writephy(tp, MII_BMCR, bmcr);
4190 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4194 else if (netif_carrier_ok(tp->dev) &&
4195 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4196 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4199 /* Select expansion interrupt status register */
4200 tg3_writephy(tp, 0x17, 0x0f01);
4201 tg3_readphy(tp, 0x15, &phy2);
4205 /* Config code words received, turn on autoneg. */
4206 tg3_readphy(tp, MII_BMCR, &bmcr);
4207 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4209 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4215 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4219 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4220 err = tg3_setup_fiber_phy(tp, force_reset);
4221 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4222 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4224 err = tg3_setup_copper_phy(tp, force_reset);
4227 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4230 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4231 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4233 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4238 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4239 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4240 tw32(GRC_MISC_CFG, val);
4243 if (tp->link_config.active_speed == SPEED_1000 &&
4244 tp->link_config.active_duplex == DUPLEX_HALF)
4245 tw32(MAC_TX_LENGTHS,
4246 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4247 (6 << TX_LENGTHS_IPG_SHIFT) |
4248 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4250 tw32(MAC_TX_LENGTHS,
4251 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4252 (6 << TX_LENGTHS_IPG_SHIFT) |
4253 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4255 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4256 if (netif_carrier_ok(tp->dev)) {
4257 tw32(HOSTCC_STAT_COAL_TICKS,
4258 tp->coal.stats_block_coalesce_usecs);
4260 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4264 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4265 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4266 if (!netif_carrier_ok(tp->dev))
4267 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4270 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4271 tw32(PCIE_PWR_MGMT_THRESH, val);
4277 /* This is called whenever we suspect that the system chipset is re-
4278 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4279 * is bogus tx completions. We try to recover by setting the
4280 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4283 static void tg3_tx_recover(struct tg3 *tp)
4285 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4286 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4288 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4289 "mapped I/O cycles to the network device, attempting to "
4290 "recover. Please report the problem to the driver maintainer "
4291 "and include system chipset information.\n", tp->dev->name);
4293 spin_lock(&tp->lock);
4294 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4295 spin_unlock(&tp->lock);
4298 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4301 return tnapi->tx_pending -
4302 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4305 /* Tigon3 never reports partial packet sends. So we do not
4306 * need special logic to handle SKBs that have not had all
4307 * of their frags sent yet, like SunGEM does.
4309 static void tg3_tx(struct tg3_napi *tnapi)
4311 struct tg3 *tp = tnapi->tp;
4312 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4313 u32 sw_idx = tnapi->tx_cons;
4314 struct netdev_queue *txq;
4315 int index = tnapi - tp->napi;
4317 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
4320 txq = netdev_get_tx_queue(tp->dev, index);
4322 while (sw_idx != hw_idx) {
4323 struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4324 struct sk_buff *skb = ri->skb;
4327 if (unlikely(skb == NULL)) {
4332 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4336 sw_idx = NEXT_TX(sw_idx);
4338 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4339 ri = &tnapi->tx_buffers[sw_idx];
4340 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4342 sw_idx = NEXT_TX(sw_idx);
4347 if (unlikely(tx_bug)) {
4353 tnapi->tx_cons = sw_idx;
4355 /* Need to make the tx_cons update visible to tg3_start_xmit()
4356 * before checking for netif_queue_stopped(). Without the
4357 * memory barrier, there is a small possibility that tg3_start_xmit()
4358 * will miss it and cause the queue to be stopped forever.
4362 if (unlikely(netif_tx_queue_stopped(txq) &&
4363 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4364 __netif_tx_lock(txq, smp_processor_id());
4365 if (netif_tx_queue_stopped(txq) &&
4366 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4367 netif_tx_wake_queue(txq);
4368 __netif_tx_unlock(txq);
4372 /* Returns size of skb allocated or < 0 on error.
4374 * We only need to fill in the address because the other members
4375 * of the RX descriptor are invariant, see tg3_init_rings.
4377 * Note the purposeful assymetry of cpu vs. chip accesses. For
4378 * posting buffers we only dirty the first cache line of the RX
4379 * descriptor (containing the address). Whereas for the RX status
4380 * buffers the cpu only reads the last cacheline of the RX descriptor
4381 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4383 static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4384 int src_idx, u32 dest_idx_unmasked)
4386 struct tg3 *tp = tnapi->tp;
4387 struct tg3_rx_buffer_desc *desc;
4388 struct ring_info *map, *src_map;
4389 struct sk_buff *skb;
4391 int skb_size, dest_idx;
4392 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4395 switch (opaque_key) {
4396 case RXD_OPAQUE_RING_STD:
4397 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4398 desc = &tpr->rx_std[dest_idx];
4399 map = &tpr->rx_std_buffers[dest_idx];
4401 src_map = &tpr->rx_std_buffers[src_idx];
4402 skb_size = tp->rx_pkt_map_sz;
4405 case RXD_OPAQUE_RING_JUMBO:
4406 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4407 desc = &tpr->rx_jmb[dest_idx].std;
4408 map = &tpr->rx_jmb_buffers[dest_idx];
4410 src_map = &tpr->rx_jmb_buffers[src_idx];
4411 skb_size = TG3_RX_JMB_MAP_SZ;
4418 /* Do not overwrite any of the map or rp information
4419 * until we are sure we can commit to a new buffer.
4421 * Callers depend upon this behavior and assume that
4422 * we leave everything unchanged if we fail.
4424 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4428 skb_reserve(skb, tp->rx_offset);
4430 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4431 PCI_DMA_FROMDEVICE);
4434 pci_unmap_addr_set(map, mapping, mapping);
4436 if (src_map != NULL)
4437 src_map->skb = NULL;
4439 desc->addr_hi = ((u64)mapping >> 32);
4440 desc->addr_lo = ((u64)mapping & 0xffffffff);
4445 /* We only need to move over in the address because the other
4446 * members of the RX descriptor are invariant. See notes above
4447 * tg3_alloc_rx_skb for full details.
4449 static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
4450 int src_idx, u32 dest_idx_unmasked)
4452 struct tg3 *tp = tnapi->tp;
4453 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4454 struct ring_info *src_map, *dest_map;
4456 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4458 switch (opaque_key) {
4459 case RXD_OPAQUE_RING_STD:
4460 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4461 dest_desc = &tpr->rx_std[dest_idx];
4462 dest_map = &tpr->rx_std_buffers[dest_idx];
4463 src_desc = &tpr->rx_std[src_idx];
4464 src_map = &tpr->rx_std_buffers[src_idx];
4467 case RXD_OPAQUE_RING_JUMBO:
4468 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4469 dest_desc = &tpr->rx_jmb[dest_idx].std;
4470 dest_map = &tpr->rx_jmb_buffers[dest_idx];
4471 src_desc = &tpr->rx_jmb[src_idx].std;
4472 src_map = &tpr->rx_jmb_buffers[src_idx];
4479 dest_map->skb = src_map->skb;
4480 pci_unmap_addr_set(dest_map, mapping,
4481 pci_unmap_addr(src_map, mapping));
4482 dest_desc->addr_hi = src_desc->addr_hi;
4483 dest_desc->addr_lo = src_desc->addr_lo;
4485 src_map->skb = NULL;
4488 /* The RX ring scheme is composed of multiple rings which post fresh
4489 * buffers to the chip, and one special ring the chip uses to report
4490 * status back to the host.
4492 * The special ring reports the status of received packets to the
4493 * host. The chip does not write into the original descriptor the
4494 * RX buffer was obtained from. The chip simply takes the original
4495 * descriptor as provided by the host, updates the status and length
4496 * field, then writes this into the next status ring entry.
4498 * Each ring the host uses to post buffers to the chip is described
4499 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4500 * it is first placed into the on-chip ram. When the packet's length
4501 * is known, it walks down the TG3_BDINFO entries to select the ring.
4502 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4503 * which is within the range of the new packet's length is chosen.
4505 * The "separate ring for rx status" scheme may sound queer, but it makes
4506 * sense from a cache coherency perspective. If only the host writes
4507 * to the buffer post rings, and only the chip writes to the rx status
4508 * rings, then cache lines never move beyond shared-modified state.
4509 * If both the host and chip were to write into the same ring, cache line
4510 * eviction could occur since both entities want it in an exclusive state.
4512 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4514 struct tg3 *tp = tnapi->tp;
4515 u32 work_mask, rx_std_posted = 0;
4516 u32 sw_idx = tnapi->rx_rcb_ptr;
4519 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4521 hw_idx = *(tnapi->rx_rcb_prod_idx);
4523 * We need to order the read of hw_idx and the read of
4524 * the opaque cookie.
4529 while (sw_idx != hw_idx && budget > 0) {
4530 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4532 struct sk_buff *skb;
4533 dma_addr_t dma_addr;
4534 u32 opaque_key, desc_idx, *post_ptr;
4536 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4537 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4538 if (opaque_key == RXD_OPAQUE_RING_STD) {
4539 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx];
4540 dma_addr = pci_unmap_addr(ri, mapping);
4542 post_ptr = &tpr->rx_std_ptr;
4544 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4545 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx];
4546 dma_addr = pci_unmap_addr(ri, mapping);
4548 post_ptr = &tpr->rx_jmb_ptr;
4550 goto next_pkt_nopost;
4552 work_mask |= opaque_key;
4554 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4555 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4557 tg3_recycle_rx(tnapi, opaque_key,
4558 desc_idx, *post_ptr);
4560 /* Other statistics kept track of by card. */
4561 tp->net_stats.rx_dropped++;
4565 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4568 if (len > RX_COPY_THRESHOLD
4569 && tp->rx_offset == NET_IP_ALIGN
4570 /* rx_offset will likely not equal NET_IP_ALIGN
4571 * if this is a 5701 card running in PCI-X mode
4572 * [see tg3_get_invariants()]
4577 skb_size = tg3_alloc_rx_skb(tnapi, opaque_key,
4578 desc_idx, *post_ptr);
4582 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4583 PCI_DMA_FROMDEVICE);
4587 struct sk_buff *copy_skb;
4589 tg3_recycle_rx(tnapi, opaque_key,
4590 desc_idx, *post_ptr);
4592 copy_skb = netdev_alloc_skb(tp->dev,
4593 len + TG3_RAW_IP_ALIGN);
4594 if (copy_skb == NULL)
4595 goto drop_it_no_recycle;
4597 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4598 skb_put(copy_skb, len);
4599 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4600 skb_copy_from_linear_data(skb, copy_skb->data, len);
4601 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4603 /* We'll reuse the original ring buffer. */
4607 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4608 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4609 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4610 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4611 skb->ip_summed = CHECKSUM_UNNECESSARY;
4613 skb->ip_summed = CHECKSUM_NONE;
4615 skb->protocol = eth_type_trans(skb, tp->dev);
4617 if (len > (tp->dev->mtu + ETH_HLEN) &&
4618 skb->protocol != htons(ETH_P_8021Q)) {
4623 #if TG3_VLAN_TAG_USED
4624 if (tp->vlgrp != NULL &&
4625 desc->type_flags & RXD_FLAG_VLAN) {
4626 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4627 desc->err_vlan & RXD_VLAN_MASK, skb);
4630 napi_gro_receive(&tnapi->napi, skb);
4638 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4639 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4641 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4642 TG3_64BIT_REG_LOW, idx);
4643 work_mask &= ~RXD_OPAQUE_RING_STD;
4648 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4650 /* Refresh hw_idx to see if there is new work */
4651 if (sw_idx == hw_idx) {
4652 hw_idx = *(tnapi->rx_rcb_prod_idx);
4657 /* ACK the status ring. */
4658 tnapi->rx_rcb_ptr = sw_idx;
4659 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4661 /* Refill RX ring(s). */
4662 if (work_mask & RXD_OPAQUE_RING_STD) {
4663 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE;
4664 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4667 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4668 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE;
4669 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4677 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4679 struct tg3 *tp = tnapi->tp;
4680 struct tg3_hw_status *sblk = tnapi->hw_status;
4682 /* handle link change and other phy events */
4683 if (!(tp->tg3_flags &
4684 (TG3_FLAG_USE_LINKCHG_REG |
4685 TG3_FLAG_POLL_SERDES))) {
4686 if (sblk->status & SD_STATUS_LINK_CHG) {
4687 sblk->status = SD_STATUS_UPDATED |
4688 (sblk->status & ~SD_STATUS_LINK_CHG);
4689 spin_lock(&tp->lock);
4690 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4692 (MAC_STATUS_SYNC_CHANGED |
4693 MAC_STATUS_CFG_CHANGED |
4694 MAC_STATUS_MI_COMPLETION |
4695 MAC_STATUS_LNKSTATE_CHANGED));
4698 tg3_setup_phy(tp, 0);
4699 spin_unlock(&tp->lock);
4703 /* run TX completion thread */
4704 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4706 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4710 /* run RX thread, within the bounds set by NAPI.
4711 * All RX "locking" is done by ensuring outside
4712 * code synchronizes with tg3->napi.poll()
4714 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4715 work_done += tg3_rx(tnapi, budget - work_done);
4720 static int tg3_poll(struct napi_struct *napi, int budget)
4722 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4723 struct tg3 *tp = tnapi->tp;
4725 struct tg3_hw_status *sblk = tnapi->hw_status;
4728 work_done = tg3_poll_work(tnapi, work_done, budget);
4730 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4733 if (unlikely(work_done >= budget))
4736 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4737 /* tp->last_tag is used in tg3_int_reenable() below
4738 * to tell the hw how much work has been processed,
4739 * so we must read it before checking for more work.
4741 tnapi->last_tag = sblk->status_tag;
4742 tnapi->last_irq_tag = tnapi->last_tag;
4745 sblk->status &= ~SD_STATUS_UPDATED;
4747 if (likely(!tg3_has_work(tnapi))) {
4748 napi_complete(napi);
4749 tg3_int_reenable(tnapi);
4757 /* work_done is guaranteed to be less than budget. */
4758 napi_complete(napi);
4759 schedule_work(&tp->reset_task);
4763 static void tg3_irq_quiesce(struct tg3 *tp)
4767 BUG_ON(tp->irq_sync);
4772 for (i = 0; i < tp->irq_cnt; i++)
4773 synchronize_irq(tp->napi[i].irq_vec);
4776 static inline int tg3_irq_sync(struct tg3 *tp)
4778 return tp->irq_sync;
4781 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4782 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4783 * with as well. Most of the time, this is not necessary except when
4784 * shutting down the device.
4786 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4788 spin_lock_bh(&tp->lock);
4790 tg3_irq_quiesce(tp);
4793 static inline void tg3_full_unlock(struct tg3 *tp)
4795 spin_unlock_bh(&tp->lock);
4798 /* One-shot MSI handler - Chip automatically disables interrupt
4799 * after sending MSI so driver doesn't have to do it.
4801 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4803 struct tg3_napi *tnapi = dev_id;
4804 struct tg3 *tp = tnapi->tp;
4806 prefetch(tnapi->hw_status);
4808 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4810 if (likely(!tg3_irq_sync(tp)))
4811 napi_schedule(&tnapi->napi);
4816 /* MSI ISR - No need to check for interrupt sharing and no need to
4817 * flush status block and interrupt mailbox. PCI ordering rules
4818 * guarantee that MSI will arrive after the status block.
4820 static irqreturn_t tg3_msi(int irq, void *dev_id)
4822 struct tg3_napi *tnapi = dev_id;
4823 struct tg3 *tp = tnapi->tp;
4825 prefetch(tnapi->hw_status);
4827 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4829 * Writing any value to intr-mbox-0 clears PCI INTA# and
4830 * chip-internal interrupt pending events.
4831 * Writing non-zero to intr-mbox-0 additional tells the
4832 * NIC to stop sending us irqs, engaging "in-intr-handler"
4835 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4836 if (likely(!tg3_irq_sync(tp)))
4837 napi_schedule(&tnapi->napi);
4839 return IRQ_RETVAL(1);
4842 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4844 struct tg3_napi *tnapi = dev_id;
4845 struct tg3 *tp = tnapi->tp;
4846 struct tg3_hw_status *sblk = tnapi->hw_status;
4847 unsigned int handled = 1;
4849 /* In INTx mode, it is possible for the interrupt to arrive at
4850 * the CPU before the status block posted prior to the interrupt.
4851 * Reading the PCI State register will confirm whether the
4852 * interrupt is ours and will flush the status block.
4854 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4855 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4856 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4863 * Writing any value to intr-mbox-0 clears PCI INTA# and
4864 * chip-internal interrupt pending events.
4865 * Writing non-zero to intr-mbox-0 additional tells the
4866 * NIC to stop sending us irqs, engaging "in-intr-handler"
4869 * Flush the mailbox to de-assert the IRQ immediately to prevent
4870 * spurious interrupts. The flush impacts performance but
4871 * excessive spurious interrupts can be worse in some cases.
4873 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4874 if (tg3_irq_sync(tp))
4876 sblk->status &= ~SD_STATUS_UPDATED;
4877 if (likely(tg3_has_work(tnapi))) {
4878 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4879 napi_schedule(&tnapi->napi);
4881 /* No work, shared interrupt perhaps? re-enable
4882 * interrupts, and flush that PCI write
4884 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4888 return IRQ_RETVAL(handled);
4891 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4893 struct tg3_napi *tnapi = dev_id;
4894 struct tg3 *tp = tnapi->tp;
4895 struct tg3_hw_status *sblk = tnapi->hw_status;
4896 unsigned int handled = 1;
4898 /* In INTx mode, it is possible for the interrupt to arrive at
4899 * the CPU before the status block posted prior to the interrupt.
4900 * Reading the PCI State register will confirm whether the
4901 * interrupt is ours and will flush the status block.
4903 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
4904 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4905 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4912 * writing any value to intr-mbox-0 clears PCI INTA# and
4913 * chip-internal interrupt pending events.
4914 * writing non-zero to intr-mbox-0 additional tells the
4915 * NIC to stop sending us irqs, engaging "in-intr-handler"
4918 * Flush the mailbox to de-assert the IRQ immediately to prevent
4919 * spurious interrupts. The flush impacts performance but
4920 * excessive spurious interrupts can be worse in some cases.
4922 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4925 * In a shared interrupt configuration, sometimes other devices'
4926 * interrupts will scream. We record the current status tag here
4927 * so that the above check can report that the screaming interrupts
4928 * are unhandled. Eventually they will be silenced.
4930 tnapi->last_irq_tag = sblk->status_tag;
4932 if (tg3_irq_sync(tp))
4935 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4937 napi_schedule(&tnapi->napi);
4940 return IRQ_RETVAL(handled);
4943 /* ISR for interrupt test */
4944 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4946 struct tg3_napi *tnapi = dev_id;
4947 struct tg3 *tp = tnapi->tp;
4948 struct tg3_hw_status *sblk = tnapi->hw_status;
4950 if ((sblk->status & SD_STATUS_UPDATED) ||
4951 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4952 tg3_disable_ints(tp);
4953 return IRQ_RETVAL(1);
4955 return IRQ_RETVAL(0);
4958 static int tg3_init_hw(struct tg3 *, int);
4959 static int tg3_halt(struct tg3 *, int, int);
4961 /* Restart hardware after configuration changes, self-test, etc.
4962 * Invoked with tp->lock held.
4964 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4965 __releases(tp->lock)
4966 __acquires(tp->lock)
4970 err = tg3_init_hw(tp, reset_phy);
4972 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4973 "aborting.\n", tp->dev->name);
4974 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4975 tg3_full_unlock(tp);
4976 del_timer_sync(&tp->timer);
4978 tg3_napi_enable(tp);
4980 tg3_full_lock(tp, 0);
4985 #ifdef CONFIG_NET_POLL_CONTROLLER
4986 static void tg3_poll_controller(struct net_device *dev)
4989 struct tg3 *tp = netdev_priv(dev);
4991 for (i = 0; i < tp->irq_cnt; i++)
4992 tg3_interrupt(tp->napi[i].irq_vec, dev);
4996 static void tg3_reset_task(struct work_struct *work)
4998 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5000 unsigned int restart_timer;
5002 tg3_full_lock(tp, 0);
5004 if (!netif_running(tp->dev)) {
5005 tg3_full_unlock(tp);
5009 tg3_full_unlock(tp);
5015 tg3_full_lock(tp, 1);
5017 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5018 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5020 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5021 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5022 tp->write32_rx_mbox = tg3_write_flush_reg32;
5023 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5024 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5027 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5028 err = tg3_init_hw(tp, 1);
5032 tg3_netif_start(tp);
5035 mod_timer(&tp->timer, jiffies + 1);
5038 tg3_full_unlock(tp);
5044 static void tg3_dump_short_state(struct tg3 *tp)
5046 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5047 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5048 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5049 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5052 static void tg3_tx_timeout(struct net_device *dev)
5054 struct tg3 *tp = netdev_priv(dev);
5056 if (netif_msg_tx_err(tp)) {
5057 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5059 tg3_dump_short_state(tp);
5062 schedule_work(&tp->reset_task);
5065 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5066 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5068 u32 base = (u32) mapping & 0xffffffff;
5070 return ((base > 0xffffdcc0) &&
5071 (base + len + 8 < base));
5074 /* Test for DMA addresses > 40-bit */
5075 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5078 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5079 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5080 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5087 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5089 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5090 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5091 u32 last_plus_one, u32 *start,
5092 u32 base_flags, u32 mss)
5094 struct tg3_napi *tnapi = &tp->napi[0];
5095 struct sk_buff *new_skb;
5096 dma_addr_t new_addr = 0;
5100 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5101 new_skb = skb_copy(skb, GFP_ATOMIC);
5103 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5105 new_skb = skb_copy_expand(skb,
5106 skb_headroom(skb) + more_headroom,
5107 skb_tailroom(skb), GFP_ATOMIC);
5113 /* New SKB is guaranteed to be linear. */
5115 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
5116 new_addr = skb_shinfo(new_skb)->dma_head;
5118 /* Make sure new skb does not cross any 4G boundaries.
5119 * Drop the packet if it does.
5121 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
5123 skb_dma_unmap(&tp->pdev->dev, new_skb,
5126 dev_kfree_skb(new_skb);
5129 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5130 base_flags, 1 | (mss << 1));
5131 *start = NEXT_TX(entry);
5135 /* Now clean up the sw ring entries. */
5137 while (entry != last_plus_one) {
5139 tnapi->tx_buffers[entry].skb = new_skb;
5141 tnapi->tx_buffers[entry].skb = NULL;
5142 entry = NEXT_TX(entry);
5146 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5152 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5153 dma_addr_t mapping, int len, u32 flags,
5156 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5157 int is_end = (mss_and_is_end & 0x1);
5158 u32 mss = (mss_and_is_end >> 1);
5162 flags |= TXD_FLAG_END;
5163 if (flags & TXD_FLAG_VLAN) {
5164 vlan_tag = flags >> 16;
5167 vlan_tag |= (mss << TXD_MSS_SHIFT);
5169 txd->addr_hi = ((u64) mapping >> 32);
5170 txd->addr_lo = ((u64) mapping & 0xffffffff);
5171 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5172 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5175 /* hard_start_xmit for devices that don't have any bugs and
5176 * support TG3_FLG2_HW_TSO_2 only.
5178 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5179 struct net_device *dev)
5181 struct tg3 *tp = netdev_priv(dev);
5182 u32 len, entry, base_flags, mss;
5183 struct skb_shared_info *sp;
5185 struct tg3_napi *tnapi;
5186 struct netdev_queue *txq;
5188 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5189 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5190 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5193 /* We are running in BH disabled context with netif_tx_lock
5194 * and TX reclaim runs via tp->napi.poll inside of a software
5195 * interrupt. Furthermore, IRQ processing runs lockless so we have
5196 * no IRQ context deadlocks to worry about either. Rejoice!
5198 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5199 if (!netif_tx_queue_stopped(txq)) {
5200 netif_tx_stop_queue(txq);
5202 /* This is a hard error, log it. */
5203 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5204 "queue awake!\n", dev->name);
5206 return NETDEV_TX_BUSY;
5209 entry = tnapi->tx_prod;
5212 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5213 int tcp_opt_len, ip_tcp_len;
5215 if (skb_header_cloned(skb) &&
5216 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5221 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5222 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
5224 struct iphdr *iph = ip_hdr(skb);
5226 tcp_opt_len = tcp_optlen(skb);
5227 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5230 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5231 mss |= (ip_tcp_len + tcp_opt_len) << 9;
5234 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5235 TXD_FLAG_CPU_POST_DMA);
5237 tcp_hdr(skb)->check = 0;
5240 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5241 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5242 #if TG3_VLAN_TAG_USED
5243 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5244 base_flags |= (TXD_FLAG_VLAN |
5245 (vlan_tx_tag_get(skb) << 16));
5248 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5253 sp = skb_shinfo(skb);
5255 mapping = sp->dma_head;
5257 tnapi->tx_buffers[entry].skb = skb;
5259 len = skb_headlen(skb);
5261 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5262 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5264 entry = NEXT_TX(entry);
5266 /* Now loop through additional data fragments, and queue them. */
5267 if (skb_shinfo(skb)->nr_frags > 0) {
5268 unsigned int i, last;
5270 last = skb_shinfo(skb)->nr_frags - 1;
5271 for (i = 0; i <= last; i++) {
5272 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5275 mapping = sp->dma_maps[i];
5276 tnapi->tx_buffers[entry].skb = NULL;
5278 tg3_set_txd(tnapi, entry, mapping, len,
5279 base_flags, (i == last) | (mss << 1));
5281 entry = NEXT_TX(entry);
5285 /* Packets are ready, update Tx producer idx local and on card. */
5286 tw32_tx_mbox(tnapi->prodmbox, entry);
5288 tnapi->tx_prod = entry;
5289 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5290 netif_tx_stop_queue(txq);
5291 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5292 netif_tx_wake_queue(txq);
5298 return NETDEV_TX_OK;
5301 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5302 struct net_device *);
5304 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5305 * TSO header is greater than 80 bytes.
5307 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5309 struct sk_buff *segs, *nskb;
5310 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5312 /* Estimate the number of fragments in the worst case */
5313 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5314 netif_stop_queue(tp->dev);
5315 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5316 return NETDEV_TX_BUSY;
5318 netif_wake_queue(tp->dev);
5321 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5323 goto tg3_tso_bug_end;
5329 tg3_start_xmit_dma_bug(nskb, tp->dev);
5335 return NETDEV_TX_OK;
5338 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5339 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5341 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5342 struct net_device *dev)
5344 struct tg3 *tp = netdev_priv(dev);
5345 u32 len, entry, base_flags, mss;
5346 struct skb_shared_info *sp;
5347 int would_hit_hwbug;
5349 struct tg3_napi *tnapi = &tp->napi[0];
5351 len = skb_headlen(skb);
5353 /* We are running in BH disabled context with netif_tx_lock
5354 * and TX reclaim runs via tp->napi.poll inside of a software
5355 * interrupt. Furthermore, IRQ processing runs lockless so we have
5356 * no IRQ context deadlocks to worry about either. Rejoice!
5358 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5359 if (!netif_queue_stopped(dev)) {
5360 netif_stop_queue(dev);
5362 /* This is a hard error, log it. */
5363 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5364 "queue awake!\n", dev->name);
5366 return NETDEV_TX_BUSY;
5369 entry = tnapi->tx_prod;
5371 if (skb->ip_summed == CHECKSUM_PARTIAL)
5372 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5374 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5376 int tcp_opt_len, ip_tcp_len, hdr_len;
5378 if (skb_header_cloned(skb) &&
5379 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5384 tcp_opt_len = tcp_optlen(skb);
5385 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5387 hdr_len = ip_tcp_len + tcp_opt_len;
5388 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5389 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5390 return (tg3_tso_bug(tp, skb));
5392 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5393 TXD_FLAG_CPU_POST_DMA);
5397 iph->tot_len = htons(mss + hdr_len);
5398 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5399 tcp_hdr(skb)->check = 0;
5400 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5402 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5407 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5408 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5409 if (tcp_opt_len || iph->ihl > 5) {
5412 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5413 mss |= (tsflags << 11);
5416 if (tcp_opt_len || iph->ihl > 5) {
5419 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5420 base_flags |= tsflags << 12;
5424 #if TG3_VLAN_TAG_USED
5425 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5426 base_flags |= (TXD_FLAG_VLAN |
5427 (vlan_tx_tag_get(skb) << 16));
5430 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5435 sp = skb_shinfo(skb);
5437 mapping = sp->dma_head;
5439 tnapi->tx_buffers[entry].skb = skb;
5441 would_hit_hwbug = 0;
5443 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5444 would_hit_hwbug = 1;
5445 else if (tg3_4g_overflow_test(mapping, len))
5446 would_hit_hwbug = 1;
5448 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5449 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5451 entry = NEXT_TX(entry);
5453 /* Now loop through additional data fragments, and queue them. */
5454 if (skb_shinfo(skb)->nr_frags > 0) {
5455 unsigned int i, last;
5457 last = skb_shinfo(skb)->nr_frags - 1;
5458 for (i = 0; i <= last; i++) {
5459 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5462 mapping = sp->dma_maps[i];
5464 tnapi->tx_buffers[entry].skb = NULL;
5466 if (tg3_4g_overflow_test(mapping, len))
5467 would_hit_hwbug = 1;
5469 if (tg3_40bit_overflow_test(tp, mapping, len))
5470 would_hit_hwbug = 1;
5472 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5473 tg3_set_txd(tnapi, entry, mapping, len,
5474 base_flags, (i == last)|(mss << 1));
5476 tg3_set_txd(tnapi, entry, mapping, len,
5477 base_flags, (i == last));
5479 entry = NEXT_TX(entry);
5483 if (would_hit_hwbug) {
5484 u32 last_plus_one = entry;
5487 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5488 start &= (TG3_TX_RING_SIZE - 1);
5490 /* If the workaround fails due to memory/mapping
5491 * failure, silently drop this packet.
5493 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5494 &start, base_flags, mss))
5500 /* Packets are ready, update Tx producer idx local and on card. */
5501 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry);
5503 tnapi->tx_prod = entry;
5504 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5505 netif_stop_queue(dev);
5506 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5507 netif_wake_queue(tp->dev);
5513 return NETDEV_TX_OK;
5516 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5521 if (new_mtu > ETH_DATA_LEN) {
5522 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5523 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5524 ethtool_op_set_tso(dev, 0);
5527 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5529 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5530 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5531 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5535 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5537 struct tg3 *tp = netdev_priv(dev);
5540 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5543 if (!netif_running(dev)) {
5544 /* We'll just catch it later when the
5547 tg3_set_mtu(dev, tp, new_mtu);
5555 tg3_full_lock(tp, 1);
5557 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5559 tg3_set_mtu(dev, tp, new_mtu);
5561 err = tg3_restart_hw(tp, 0);
5564 tg3_netif_start(tp);
5566 tg3_full_unlock(tp);
5574 static void tg3_rx_prodring_free(struct tg3 *tp,
5575 struct tg3_rx_prodring_set *tpr)
5578 struct ring_info *rxp;
5580 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5581 rxp = &tpr->rx_std_buffers[i];
5583 if (rxp->skb == NULL)
5586 pci_unmap_single(tp->pdev,
5587 pci_unmap_addr(rxp, mapping),
5589 PCI_DMA_FROMDEVICE);
5590 dev_kfree_skb_any(rxp->skb);
5594 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5595 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5596 rxp = &tpr->rx_jmb_buffers[i];
5598 if (rxp->skb == NULL)
5601 pci_unmap_single(tp->pdev,
5602 pci_unmap_addr(rxp, mapping),
5604 PCI_DMA_FROMDEVICE);
5605 dev_kfree_skb_any(rxp->skb);
5611 /* Initialize tx/rx rings for packet processing.
5613 * The chip has been shut down and the driver detached from
5614 * the networking, so no interrupts or new tx packets will
5615 * end up in the driver. tp->{tx,}lock are held and thus
5618 static int tg3_rx_prodring_alloc(struct tg3 *tp,
5619 struct tg3_rx_prodring_set *tpr)
5621 u32 i, rx_pkt_dma_sz;
5622 struct tg3_napi *tnapi = &tp->napi[0];
5624 /* Zero out all descriptors. */
5625 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
5627 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
5628 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5629 tp->dev->mtu > ETH_DATA_LEN)
5630 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
5631 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
5633 /* Initialize invariants of the rings, we only set this
5634 * stuff once. This works because the card does not
5635 * write into the rx buffer posting rings.
5637 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5638 struct tg3_rx_buffer_desc *rxd;
5640 rxd = &tpr->rx_std[i];
5641 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
5642 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5643 rxd->opaque = (RXD_OPAQUE_RING_STD |
5644 (i << RXD_OPAQUE_INDEX_SHIFT));
5647 /* Now allocate fresh SKBs for each rx ring. */
5648 for (i = 0; i < tp->rx_pending; i++) {
5649 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5650 printk(KERN_WARNING PFX
5651 "%s: Using a smaller RX standard ring, "
5652 "only %d out of %d buffers were allocated "
5654 tp->dev->name, i, tp->rx_pending);
5662 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
5665 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
5667 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5668 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5669 struct tg3_rx_buffer_desc *rxd;
5671 rxd = &tpr->rx_jmb[i].std;
5672 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
5673 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5675 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5676 (i << RXD_OPAQUE_INDEX_SHIFT));
5679 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5680 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO,
5682 printk(KERN_WARNING PFX
5683 "%s: Using a smaller RX jumbo ring, "
5684 "only %d out of %d buffers were "
5685 "allocated successfully.\n",
5686 tp->dev->name, i, tp->rx_jumbo_pending);
5689 tp->rx_jumbo_pending = i;
5699 tg3_rx_prodring_free(tp, tpr);
5703 static void tg3_rx_prodring_fini(struct tg3 *tp,
5704 struct tg3_rx_prodring_set *tpr)
5706 kfree(tpr->rx_std_buffers);
5707 tpr->rx_std_buffers = NULL;
5708 kfree(tpr->rx_jmb_buffers);
5709 tpr->rx_jmb_buffers = NULL;
5711 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5712 tpr->rx_std, tpr->rx_std_mapping);
5716 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5717 tpr->rx_jmb, tpr->rx_jmb_mapping);
5722 static int tg3_rx_prodring_init(struct tg3 *tp,
5723 struct tg3_rx_prodring_set *tpr)
5725 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
5726 TG3_RX_RING_SIZE, GFP_KERNEL);
5727 if (!tpr->rx_std_buffers)
5730 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5731 &tpr->rx_std_mapping);
5735 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5736 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
5737 TG3_RX_JUMBO_RING_SIZE,
5739 if (!tpr->rx_jmb_buffers)
5742 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
5743 TG3_RX_JUMBO_RING_BYTES,
5744 &tpr->rx_jmb_mapping);
5752 tg3_rx_prodring_fini(tp, tpr);
5756 /* Free up pending packets in all rx/tx rings.
5758 * The chip has been shut down and the driver detached from
5759 * the networking, so no interrupts or new tx packets will
5760 * end up in the driver. tp->{tx,}lock is not held and we are not
5761 * in an interrupt context and thus may sleep.
5763 static void tg3_free_rings(struct tg3 *tp)
5767 for (j = 0; j < tp->irq_cnt; j++) {
5768 struct tg3_napi *tnapi = &tp->napi[j];
5770 if (!tnapi->tx_buffers)
5773 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5774 struct tx_ring_info *txp;
5775 struct sk_buff *skb;
5777 txp = &tnapi->tx_buffers[i];
5785 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5789 i += skb_shinfo(skb)->nr_frags + 1;
5791 dev_kfree_skb_any(skb);
5795 tg3_rx_prodring_free(tp, &tp->prodring[0]);
5798 /* Initialize tx/rx rings for packet processing.
5800 * The chip has been shut down and the driver detached from
5801 * the networking, so no interrupts or new tx packets will
5802 * end up in the driver. tp->{tx,}lock are held and thus
5805 static int tg3_init_rings(struct tg3 *tp)
5809 /* Free up all the SKBs. */
5812 for (i = 0; i < tp->irq_cnt; i++) {
5813 struct tg3_napi *tnapi = &tp->napi[i];
5815 tnapi->last_tag = 0;
5816 tnapi->last_irq_tag = 0;
5817 tnapi->hw_status->status = 0;
5818 tnapi->hw_status->status_tag = 0;
5819 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5824 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
5826 tnapi->rx_rcb_ptr = 0;
5828 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5831 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
5835 * Must not be invoked with interrupt sources disabled and
5836 * the hardware shutdown down.
5838 static void tg3_free_consistent(struct tg3 *tp)
5842 for (i = 0; i < tp->irq_cnt; i++) {
5843 struct tg3_napi *tnapi = &tp->napi[i];
5845 if (tnapi->tx_ring) {
5846 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5847 tnapi->tx_ring, tnapi->tx_desc_mapping);
5848 tnapi->tx_ring = NULL;
5851 kfree(tnapi->tx_buffers);
5852 tnapi->tx_buffers = NULL;
5854 if (tnapi->rx_rcb) {
5855 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5857 tnapi->rx_rcb_mapping);
5858 tnapi->rx_rcb = NULL;
5861 if (tnapi->hw_status) {
5862 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5864 tnapi->status_mapping);
5865 tnapi->hw_status = NULL;
5870 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5871 tp->hw_stats, tp->stats_mapping);
5872 tp->hw_stats = NULL;
5875 tg3_rx_prodring_fini(tp, &tp->prodring[0]);
5879 * Must not be invoked with interrupt sources disabled and
5880 * the hardware shutdown down. Can sleep.
5882 static int tg3_alloc_consistent(struct tg3 *tp)
5886 if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
5889 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5890 sizeof(struct tg3_hw_stats),
5891 &tp->stats_mapping);
5895 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5897 for (i = 0; i < tp->irq_cnt; i++) {
5898 struct tg3_napi *tnapi = &tp->napi[i];
5899 struct tg3_hw_status *sblk;
5901 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
5903 &tnapi->status_mapping);
5904 if (!tnapi->hw_status)
5907 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5908 sblk = tnapi->hw_status;
5911 * When RSS is enabled, the status block format changes
5912 * slightly. The "rx_jumbo_consumer", "reserved",
5913 * and "rx_mini_consumer" members get mapped to the
5914 * other three rx return ring producer indexes.
5918 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
5921 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
5924 tnapi->rx_rcb_prod_idx = &sblk->reserved;
5927 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
5932 * If multivector RSS is enabled, vector 0 does not handle
5933 * rx or tx interrupts. Don't allocate any resources for it.
5935 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
5938 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
5939 TG3_RX_RCB_RING_BYTES(tp),
5940 &tnapi->rx_rcb_mapping);
5944 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5946 tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
5947 TG3_TX_RING_SIZE, GFP_KERNEL);
5948 if (!tnapi->tx_buffers)
5951 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
5953 &tnapi->tx_desc_mapping);
5954 if (!tnapi->tx_ring)
5961 tg3_free_consistent(tp);
5965 #define MAX_WAIT_CNT 1000
5967 /* To stop a block, clear the enable bit and poll till it
5968 * clears. tp->lock is held.
5970 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5975 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5982 /* We can't enable/disable these bits of the
5983 * 5705/5750, just say success.
5996 for (i = 0; i < MAX_WAIT_CNT; i++) {
5999 if ((val & enable_bit) == 0)
6003 if (i == MAX_WAIT_CNT && !silent) {
6004 printk(KERN_ERR PFX "tg3_stop_block timed out, "
6005 "ofs=%lx enable_bit=%x\n",
6013 /* tp->lock is held. */
6014 static int tg3_abort_hw(struct tg3 *tp, int silent)
6018 tg3_disable_ints(tp);
6020 tp->rx_mode &= ~RX_MODE_ENABLE;
6021 tw32_f(MAC_RX_MODE, tp->rx_mode);
6024 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6025 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6026 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6027 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6028 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6029 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6031 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6032 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6033 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6034 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6035 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6036 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6037 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6039 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6040 tw32_f(MAC_MODE, tp->mac_mode);
6043 tp->tx_mode &= ~TX_MODE_ENABLE;
6044 tw32_f(MAC_TX_MODE, tp->tx_mode);
6046 for (i = 0; i < MAX_WAIT_CNT; i++) {
6048 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6051 if (i >= MAX_WAIT_CNT) {
6052 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
6053 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
6054 tp->dev->name, tr32(MAC_TX_MODE));
6058 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6059 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6060 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6062 tw32(FTQ_RESET, 0xffffffff);
6063 tw32(FTQ_RESET, 0x00000000);
6065 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6066 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6068 for (i = 0; i < tp->irq_cnt; i++) {
6069 struct tg3_napi *tnapi = &tp->napi[i];
6070 if (tnapi->hw_status)
6071 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6074 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6079 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6084 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6085 if (apedata != APE_SEG_SIG_MAGIC)
6088 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6089 if (!(apedata & APE_FW_STATUS_READY))
6092 /* Wait for up to 1 millisecond for APE to service previous event. */
6093 for (i = 0; i < 10; i++) {
6094 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6097 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6099 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6100 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6101 event | APE_EVENT_STATUS_EVENT_PENDING);
6103 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6105 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6111 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6112 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6115 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6120 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6124 case RESET_KIND_INIT:
6125 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6126 APE_HOST_SEG_SIG_MAGIC);
6127 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6128 APE_HOST_SEG_LEN_MAGIC);
6129 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6130 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6131 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6132 APE_HOST_DRIVER_ID_MAGIC);
6133 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6134 APE_HOST_BEHAV_NO_PHYLOCK);
6136 event = APE_EVENT_STATUS_STATE_START;
6138 case RESET_KIND_SHUTDOWN:
6139 /* With the interface we are currently using,
6140 * APE does not track driver state. Wiping
6141 * out the HOST SEGMENT SIGNATURE forces
6142 * the APE to assume OS absent status.
6144 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6146 event = APE_EVENT_STATUS_STATE_UNLOAD;
6148 case RESET_KIND_SUSPEND:
6149 event = APE_EVENT_STATUS_STATE_SUSPEND;
6155 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6157 tg3_ape_send_event(tp, event);
6160 /* tp->lock is held. */
6161 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6163 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6164 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6166 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6168 case RESET_KIND_INIT:
6169 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6173 case RESET_KIND_SHUTDOWN:
6174 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6178 case RESET_KIND_SUSPEND:
6179 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6188 if (kind == RESET_KIND_INIT ||
6189 kind == RESET_KIND_SUSPEND)
6190 tg3_ape_driver_state_change(tp, kind);
6193 /* tp->lock is held. */
6194 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6196 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6198 case RESET_KIND_INIT:
6199 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6200 DRV_STATE_START_DONE);
6203 case RESET_KIND_SHUTDOWN:
6204 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6205 DRV_STATE_UNLOAD_DONE);
6213 if (kind == RESET_KIND_SHUTDOWN)
6214 tg3_ape_driver_state_change(tp, kind);
6217 /* tp->lock is held. */
6218 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6220 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6222 case RESET_KIND_INIT:
6223 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6227 case RESET_KIND_SHUTDOWN:
6228 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6232 case RESET_KIND_SUSPEND:
6233 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6243 static int tg3_poll_fw(struct tg3 *tp)
6248 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6249 /* Wait up to 20ms for init done. */
6250 for (i = 0; i < 200; i++) {
6251 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6258 /* Wait for firmware initialization to complete. */
6259 for (i = 0; i < 100000; i++) {
6260 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6261 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6266 /* Chip might not be fitted with firmware. Some Sun onboard
6267 * parts are configured like that. So don't signal the timeout
6268 * of the above loop as an error, but do report the lack of
6269 * running firmware once.
6272 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6273 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6275 printk(KERN_INFO PFX "%s: No firmware running.\n",
6282 /* Save PCI command register before chip reset */
6283 static void tg3_save_pci_state(struct tg3 *tp)
6285 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6288 /* Restore PCI state after chip reset */
6289 static void tg3_restore_pci_state(struct tg3 *tp)
6293 /* Re-enable indirect register accesses. */
6294 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6295 tp->misc_host_ctrl);
6297 /* Set MAX PCI retry to zero. */
6298 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6299 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6300 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6301 val |= PCISTATE_RETRY_SAME_DMA;
6302 /* Allow reads and writes to the APE register and memory space. */
6303 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6304 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6305 PCISTATE_ALLOW_APE_SHMEM_WR;
6306 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6308 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6310 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6311 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6312 pcie_set_readrq(tp->pdev, 4096);
6314 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6315 tp->pci_cacheline_sz);
6316 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6321 /* Make sure PCI-X relaxed ordering bit is clear. */
6322 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6325 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6327 pcix_cmd &= ~PCI_X_CMD_ERO;
6328 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6332 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6334 /* Chip reset on 5780 will reset MSI enable bit,
6335 * so need to restore it.
6337 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6340 pci_read_config_word(tp->pdev,
6341 tp->msi_cap + PCI_MSI_FLAGS,
6343 pci_write_config_word(tp->pdev,
6344 tp->msi_cap + PCI_MSI_FLAGS,
6345 ctrl | PCI_MSI_FLAGS_ENABLE);
6346 val = tr32(MSGINT_MODE);
6347 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6352 static void tg3_stop_fw(struct tg3 *);
6354 /* tp->lock is held. */
6355 static int tg3_chip_reset(struct tg3 *tp)
6358 void (*write_op)(struct tg3 *, u32, u32);
6365 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6367 /* No matching tg3_nvram_unlock() after this because
6368 * chip reset below will undo the nvram lock.
6370 tp->nvram_lock_cnt = 0;
6372 /* GRC_MISC_CFG core clock reset will clear the memory
6373 * enable bit in PCI register 4 and the MSI enable bit
6374 * on some chips, so we save relevant registers here.
6376 tg3_save_pci_state(tp);
6378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6379 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6380 tw32(GRC_FASTBOOT_PC, 0);
6383 * We must avoid the readl() that normally takes place.
6384 * It locks machines, causes machine checks, and other
6385 * fun things. So, temporarily disable the 5701
6386 * hardware workaround, while we do the reset.
6388 write_op = tp->write32;
6389 if (write_op == tg3_write_flush_reg32)
6390 tp->write32 = tg3_write32;
6392 /* Prevent the irq handler from reading or writing PCI registers
6393 * during chip reset when the memory enable bit in the PCI command
6394 * register may be cleared. The chip does not generate interrupt
6395 * at this time, but the irq handler may still be called due to irq
6396 * sharing or irqpoll.
6398 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6399 for (i = 0; i < tp->irq_cnt; i++) {
6400 struct tg3_napi *tnapi = &tp->napi[i];
6401 if (tnapi->hw_status) {
6402 tnapi->hw_status->status = 0;
6403 tnapi->hw_status->status_tag = 0;
6405 tnapi->last_tag = 0;
6406 tnapi->last_irq_tag = 0;
6410 for (i = 0; i < tp->irq_cnt; i++)
6411 synchronize_irq(tp->napi[i].irq_vec);
6413 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6414 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6415 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6419 val = GRC_MISC_CFG_CORECLK_RESET;
6421 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6422 if (tr32(0x7e2c) == 0x60) {
6425 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6426 tw32(GRC_MISC_CFG, (1 << 29));
6431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6432 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6433 tw32(GRC_VCPU_EXT_CTRL,
6434 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6437 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6438 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6439 tw32(GRC_MISC_CFG, val);
6441 /* restore 5701 hardware bug workaround write method */
6442 tp->write32 = write_op;
6444 /* Unfortunately, we have to delay before the PCI read back.
6445 * Some 575X chips even will not respond to a PCI cfg access
6446 * when the reset command is given to the chip.
6448 * How do these hardware designers expect things to work
6449 * properly if the PCI write is posted for a long period
6450 * of time? It is always necessary to have some method by
6451 * which a register read back can occur to push the write
6452 * out which does the reset.
6454 * For most tg3 variants the trick below was working.
6459 /* Flush PCI posted writes. The normal MMIO registers
6460 * are inaccessible at this time so this is the only
6461 * way to make this reliably (actually, this is no longer
6462 * the case, see above). I tried to use indirect
6463 * register read/write but this upset some 5701 variants.
6465 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6469 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6472 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6476 /* Wait for link training to complete. */
6477 for (i = 0; i < 5000; i++)
6480 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6481 pci_write_config_dword(tp->pdev, 0xc4,
6482 cfg_val | (1 << 15));
6485 /* Clear the "no snoop" and "relaxed ordering" bits. */
6486 pci_read_config_word(tp->pdev,
6487 tp->pcie_cap + PCI_EXP_DEVCTL,
6489 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6490 PCI_EXP_DEVCTL_NOSNOOP_EN);
6492 * Older PCIe devices only support the 128 byte
6493 * MPS setting. Enforce the restriction.
6495 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6496 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6497 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6498 pci_write_config_word(tp->pdev,
6499 tp->pcie_cap + PCI_EXP_DEVCTL,
6502 pcie_set_readrq(tp->pdev, 4096);
6504 /* Clear error status */
6505 pci_write_config_word(tp->pdev,
6506 tp->pcie_cap + PCI_EXP_DEVSTA,
6507 PCI_EXP_DEVSTA_CED |
6508 PCI_EXP_DEVSTA_NFED |
6509 PCI_EXP_DEVSTA_FED |
6510 PCI_EXP_DEVSTA_URD);
6513 tg3_restore_pci_state(tp);
6515 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6518 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6519 val = tr32(MEMARB_MODE);
6520 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6522 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6524 tw32(0x5000, 0x400);
6527 tw32(GRC_MODE, tp->grc_mode);
6529 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6532 tw32(0xc4, val | (1 << 15));
6535 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6536 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6537 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6538 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6539 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6540 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6543 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6544 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6545 tw32_f(MAC_MODE, tp->mac_mode);
6546 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6547 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6548 tw32_f(MAC_MODE, tp->mac_mode);
6549 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6550 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6551 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6552 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6553 tw32_f(MAC_MODE, tp->mac_mode);
6555 tw32_f(MAC_MODE, 0);
6558 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6560 err = tg3_poll_fw(tp);
6566 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6567 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6570 tw32(0x7c00, val | (1 << 25));
6573 /* Reprobe ASF enable state. */
6574 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6575 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6576 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6577 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6580 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6581 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6582 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6583 tp->last_event_jiffies = jiffies;
6584 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6585 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6592 /* tp->lock is held. */
6593 static void tg3_stop_fw(struct tg3 *tp)
6595 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6596 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6597 /* Wait for RX cpu to ACK the previous event. */
6598 tg3_wait_for_event_ack(tp);
6600 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6602 tg3_generate_fw_event(tp);
6604 /* Wait for RX cpu to ACK this event. */
6605 tg3_wait_for_event_ack(tp);
6609 /* tp->lock is held. */
6610 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6616 tg3_write_sig_pre_reset(tp, kind);
6618 tg3_abort_hw(tp, silent);
6619 err = tg3_chip_reset(tp);
6621 __tg3_set_mac_addr(tp, 0);
6623 tg3_write_sig_legacy(tp, kind);
6624 tg3_write_sig_post_reset(tp, kind);
6632 #define RX_CPU_SCRATCH_BASE 0x30000
6633 #define RX_CPU_SCRATCH_SIZE 0x04000
6634 #define TX_CPU_SCRATCH_BASE 0x34000
6635 #define TX_CPU_SCRATCH_SIZE 0x04000
6637 /* tp->lock is held. */
6638 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6642 BUG_ON(offset == TX_CPU_BASE &&
6643 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6645 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6646 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6648 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6651 if (offset == RX_CPU_BASE) {
6652 for (i = 0; i < 10000; i++) {
6653 tw32(offset + CPU_STATE, 0xffffffff);
6654 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6655 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6659 tw32(offset + CPU_STATE, 0xffffffff);
6660 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6663 for (i = 0; i < 10000; i++) {
6664 tw32(offset + CPU_STATE, 0xffffffff);
6665 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6666 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6672 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6675 (offset == RX_CPU_BASE ? "RX" : "TX"));
6679 /* Clear firmware's nvram arbitration. */
6680 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6681 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6686 unsigned int fw_base;
6687 unsigned int fw_len;
6688 const __be32 *fw_data;
6691 /* tp->lock is held. */
6692 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6693 int cpu_scratch_size, struct fw_info *info)
6695 int err, lock_err, i;
6696 void (*write_op)(struct tg3 *, u32, u32);
6698 if (cpu_base == TX_CPU_BASE &&
6699 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6700 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6701 "TX cpu firmware on %s which is 5705.\n",
6706 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6707 write_op = tg3_write_mem;
6709 write_op = tg3_write_indirect_reg32;
6711 /* It is possible that bootcode is still loading at this point.
6712 * Get the nvram lock first before halting the cpu.
6714 lock_err = tg3_nvram_lock(tp);
6715 err = tg3_halt_cpu(tp, cpu_base);
6717 tg3_nvram_unlock(tp);
6721 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6722 write_op(tp, cpu_scratch_base + i, 0);
6723 tw32(cpu_base + CPU_STATE, 0xffffffff);
6724 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6725 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
6726 write_op(tp, (cpu_scratch_base +
6727 (info->fw_base & 0xffff) +
6729 be32_to_cpu(info->fw_data[i]));
6737 /* tp->lock is held. */
6738 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6740 struct fw_info info;
6741 const __be32 *fw_data;
6744 fw_data = (void *)tp->fw->data;
6746 /* Firmware blob starts with version numbers, followed by
6747 start address and length. We are setting complete length.
6748 length = end_address_of_bss - start_address_of_text.
6749 Remainder is the blob to be loaded contiguously
6750 from start address. */
6752 info.fw_base = be32_to_cpu(fw_data[1]);
6753 info.fw_len = tp->fw->size - 12;
6754 info.fw_data = &fw_data[3];
6756 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6757 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6762 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6763 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6768 /* Now startup only the RX cpu. */
6769 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6770 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6772 for (i = 0; i < 5; i++) {
6773 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
6775 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6776 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6777 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6781 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6782 "to set RX CPU PC, is %08x should be %08x\n",
6783 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6787 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6788 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6793 /* 5705 needs a special version of the TSO firmware. */
6795 /* tp->lock is held. */
6796 static int tg3_load_tso_firmware(struct tg3 *tp)
6798 struct fw_info info;
6799 const __be32 *fw_data;
6800 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6803 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6806 fw_data = (void *)tp->fw->data;
6808 /* Firmware blob starts with version numbers, followed by
6809 start address and length. We are setting complete length.
6810 length = end_address_of_bss - start_address_of_text.
6811 Remainder is the blob to be loaded contiguously
6812 from start address. */
6814 info.fw_base = be32_to_cpu(fw_data[1]);
6815 cpu_scratch_size = tp->fw_len;
6816 info.fw_len = tp->fw->size - 12;
6817 info.fw_data = &fw_data[3];
6819 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6820 cpu_base = RX_CPU_BASE;
6821 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6823 cpu_base = TX_CPU_BASE;
6824 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6825 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6828 err = tg3_load_firmware_cpu(tp, cpu_base,
6829 cpu_scratch_base, cpu_scratch_size,
6834 /* Now startup the cpu. */
6835 tw32(cpu_base + CPU_STATE, 0xffffffff);
6836 tw32_f(cpu_base + CPU_PC, info.fw_base);
6838 for (i = 0; i < 5; i++) {
6839 if (tr32(cpu_base + CPU_PC) == info.fw_base)
6841 tw32(cpu_base + CPU_STATE, 0xffffffff);
6842 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6843 tw32_f(cpu_base + CPU_PC, info.fw_base);
6847 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6848 "to set CPU PC, is %08x should be %08x\n",
6849 tp->dev->name, tr32(cpu_base + CPU_PC),
6853 tw32(cpu_base + CPU_STATE, 0xffffffff);
6854 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6859 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6861 struct tg3 *tp = netdev_priv(dev);
6862 struct sockaddr *addr = p;
6863 int err = 0, skip_mac_1 = 0;
6865 if (!is_valid_ether_addr(addr->sa_data))
6868 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6870 if (!netif_running(dev))
6873 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6874 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6876 addr0_high = tr32(MAC_ADDR_0_HIGH);
6877 addr0_low = tr32(MAC_ADDR_0_LOW);
6878 addr1_high = tr32(MAC_ADDR_1_HIGH);
6879 addr1_low = tr32(MAC_ADDR_1_LOW);
6881 /* Skip MAC addr 1 if ASF is using it. */
6882 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6883 !(addr1_high == 0 && addr1_low == 0))
6886 spin_lock_bh(&tp->lock);
6887 __tg3_set_mac_addr(tp, skip_mac_1);
6888 spin_unlock_bh(&tp->lock);
6893 /* tp->lock is held. */
6894 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6895 dma_addr_t mapping, u32 maxlen_flags,
6899 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6900 ((u64) mapping >> 32));
6902 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6903 ((u64) mapping & 0xffffffff));
6905 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6908 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6910 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6914 static void __tg3_set_rx_mode(struct net_device *);
6915 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6919 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
6920 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6921 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6922 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6924 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6925 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6926 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6928 tw32(HOSTCC_TXCOL_TICKS, 0);
6929 tw32(HOSTCC_TXMAX_FRAMES, 0);
6930 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
6932 tw32(HOSTCC_RXCOL_TICKS, 0);
6933 tw32(HOSTCC_RXMAX_FRAMES, 0);
6934 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
6937 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6938 u32 val = ec->stats_block_coalesce_usecs;
6940 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6941 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6943 if (!netif_carrier_ok(tp->dev))
6946 tw32(HOSTCC_STAT_COAL_TICKS, val);
6949 for (i = 0; i < tp->irq_cnt - 1; i++) {
6952 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
6953 tw32(reg, ec->rx_coalesce_usecs);
6954 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
6955 tw32(reg, ec->tx_coalesce_usecs);
6956 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
6957 tw32(reg, ec->rx_max_coalesced_frames);
6958 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
6959 tw32(reg, ec->tx_max_coalesced_frames);
6960 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
6961 tw32(reg, ec->rx_max_coalesced_frames_irq);
6962 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
6963 tw32(reg, ec->tx_max_coalesced_frames_irq);
6966 for (; i < tp->irq_max - 1; i++) {
6967 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
6968 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
6969 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
6970 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
6971 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
6972 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
6976 /* tp->lock is held. */
6977 static void tg3_rings_reset(struct tg3 *tp)
6980 u32 stblk, txrcb, rxrcb, limit;
6981 struct tg3_napi *tnapi = &tp->napi[0];
6983 /* Disable all transmit rings but the first. */
6984 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6985 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
6987 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
6989 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
6990 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
6991 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
6992 BDINFO_FLAGS_DISABLED);
6995 /* Disable all receive return rings but the first. */
6996 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6997 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
6998 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6999 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7001 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7003 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7004 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7005 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7006 BDINFO_FLAGS_DISABLED);
7008 /* Disable interrupts */
7009 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7011 /* Zero mailbox registers. */
7012 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7013 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7014 tp->napi[i].tx_prod = 0;
7015 tp->napi[i].tx_cons = 0;
7016 tw32_mailbox(tp->napi[i].prodmbox, 0);
7017 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7018 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7021 tp->napi[0].tx_prod = 0;
7022 tp->napi[0].tx_cons = 0;
7023 tw32_mailbox(tp->napi[0].prodmbox, 0);
7024 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7027 /* Make sure the NIC-based send BD rings are disabled. */
7028 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7029 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7030 for (i = 0; i < 16; i++)
7031 tw32_tx_mbox(mbox + i * 8, 0);
7034 txrcb = NIC_SRAM_SEND_RCB;
7035 rxrcb = NIC_SRAM_RCV_RET_RCB;
7037 /* Clear status block in ram. */
7038 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7040 /* Set status block DMA address */
7041 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7042 ((u64) tnapi->status_mapping >> 32));
7043 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7044 ((u64) tnapi->status_mapping & 0xffffffff));
7046 if (tnapi->tx_ring) {
7047 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7048 (TG3_TX_RING_SIZE <<
7049 BDINFO_FLAGS_MAXLEN_SHIFT),
7050 NIC_SRAM_TX_BUFFER_DESC);
7051 txrcb += TG3_BDINFO_SIZE;
7054 if (tnapi->rx_rcb) {
7055 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7056 (TG3_RX_RCB_RING_SIZE(tp) <<
7057 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7058 rxrcb += TG3_BDINFO_SIZE;
7061 stblk = HOSTCC_STATBLCK_RING1;
7063 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7064 u64 mapping = (u64)tnapi->status_mapping;
7065 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7066 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7068 /* Clear status block in ram. */
7069 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7071 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7072 (TG3_TX_RING_SIZE <<
7073 BDINFO_FLAGS_MAXLEN_SHIFT),
7074 NIC_SRAM_TX_BUFFER_DESC);
7076 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7077 (TG3_RX_RCB_RING_SIZE(tp) <<
7078 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7081 txrcb += TG3_BDINFO_SIZE;
7082 rxrcb += TG3_BDINFO_SIZE;
7086 /* tp->lock is held. */
7087 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7089 u32 val, rdmac_mode;
7091 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
7093 tg3_disable_ints(tp);
7097 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7099 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7100 tg3_abort_hw(tp, 1);
7104 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7107 err = tg3_chip_reset(tp);
7111 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7113 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7114 val = tr32(TG3_CPMU_CTRL);
7115 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7116 tw32(TG3_CPMU_CTRL, val);
7118 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7119 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7120 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7121 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7123 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7124 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7125 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7126 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7128 val = tr32(TG3_CPMU_HST_ACC);
7129 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7130 val |= CPMU_HST_ACC_MACCLK_6_25;
7131 tw32(TG3_CPMU_HST_ACC, val);
7134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7135 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7136 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7137 PCIE_PWR_MGMT_L1_THRESH_4MS;
7138 tw32(PCIE_PWR_MGMT_THRESH, val);
7140 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7141 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7143 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7146 if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
7147 val = tr32(TG3_PCIE_LNKCTL);
7148 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
7149 val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7151 val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7152 tw32(TG3_PCIE_LNKCTL, val);
7155 /* This works around an issue with Athlon chipsets on
7156 * B3 tigon3 silicon. This bit has no effect on any
7157 * other revision. But do not set this on PCI Express
7158 * chips and don't even touch the clocks if the CPMU is present.
7160 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7161 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7162 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7163 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7166 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7167 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7168 val = tr32(TG3PCI_PCISTATE);
7169 val |= PCISTATE_RETRY_SAME_DMA;
7170 tw32(TG3PCI_PCISTATE, val);
7173 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7174 /* Allow reads and writes to the
7175 * APE register and memory space.
7177 val = tr32(TG3PCI_PCISTATE);
7178 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7179 PCISTATE_ALLOW_APE_SHMEM_WR;
7180 tw32(TG3PCI_PCISTATE, val);
7183 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7184 /* Enable some hw fixes. */
7185 val = tr32(TG3PCI_MSI_DATA);
7186 val |= (1 << 26) | (1 << 28) | (1 << 29);
7187 tw32(TG3PCI_MSI_DATA, val);
7190 /* Descriptor ring init may make accesses to the
7191 * NIC SRAM area to setup the TX descriptors, so we
7192 * can only do this after the hardware has been
7193 * successfully reset.
7195 err = tg3_init_rings(tp);
7199 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7200 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7201 /* This value is determined during the probe time DMA
7202 * engine test, tg3_test_dma.
7204 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7207 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7208 GRC_MODE_4X_NIC_SEND_RINGS |
7209 GRC_MODE_NO_TX_PHDR_CSUM |
7210 GRC_MODE_NO_RX_PHDR_CSUM);
7211 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7213 /* Pseudo-header checksum is done by hardware logic and not
7214 * the offload processers, so make the chip do the pseudo-
7215 * header checksums on receive. For transmit it is more
7216 * convenient to do the pseudo-header checksum in software
7217 * as Linux does that on transmit for us in all cases.
7219 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7223 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7225 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7226 val = tr32(GRC_MISC_CFG);
7228 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7229 tw32(GRC_MISC_CFG, val);
7231 /* Initialize MBUF/DESC pool. */
7232 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7234 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7235 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7236 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7237 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7239 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7240 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7241 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7243 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7246 fw_len = tp->fw_len;
7247 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7248 tw32(BUFMGR_MB_POOL_ADDR,
7249 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7250 tw32(BUFMGR_MB_POOL_SIZE,
7251 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7254 if (tp->dev->mtu <= ETH_DATA_LEN) {
7255 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7256 tp->bufmgr_config.mbuf_read_dma_low_water);
7257 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7258 tp->bufmgr_config.mbuf_mac_rx_low_water);
7259 tw32(BUFMGR_MB_HIGH_WATER,
7260 tp->bufmgr_config.mbuf_high_water);
7262 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7263 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7264 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7265 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7266 tw32(BUFMGR_MB_HIGH_WATER,
7267 tp->bufmgr_config.mbuf_high_water_jumbo);
7269 tw32(BUFMGR_DMA_LOW_WATER,
7270 tp->bufmgr_config.dma_low_water);
7271 tw32(BUFMGR_DMA_HIGH_WATER,
7272 tp->bufmgr_config.dma_high_water);
7274 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7275 for (i = 0; i < 2000; i++) {
7276 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7281 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7286 /* Setup replenish threshold. */
7287 val = tp->rx_pending / 8;
7290 else if (val > tp->rx_std_max_post)
7291 val = tp->rx_std_max_post;
7292 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7293 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7294 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7296 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7297 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7300 tw32(RCVBDI_STD_THRESH, val);
7302 /* Initialize TG3_BDINFO's at:
7303 * RCVDBDI_STD_BD: standard eth size rx ring
7304 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7305 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7308 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7309 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7310 * ring attribute flags
7311 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7313 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7314 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7316 * The size of each ring is fixed in the firmware, but the location is
7319 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7320 ((u64) tpr->rx_std_mapping >> 32));
7321 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7322 ((u64) tpr->rx_std_mapping & 0xffffffff));
7323 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7324 NIC_SRAM_RX_BUFFER_DESC);
7326 /* Disable the mini ring */
7327 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7328 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7329 BDINFO_FLAGS_DISABLED);
7331 /* Program the jumbo buffer descriptor ring control
7332 * blocks on those devices that have them.
7334 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7335 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7336 /* Setup replenish threshold. */
7337 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7339 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7340 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7341 ((u64) tpr->rx_jmb_mapping >> 32));
7342 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7343 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7344 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7345 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7346 BDINFO_FLAGS_USE_EXT_RECV);
7347 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7348 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7350 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7351 BDINFO_FLAGS_DISABLED);
7354 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7356 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7358 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7360 tpr->rx_std_ptr = tp->rx_pending;
7361 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7364 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7365 tp->rx_jumbo_pending : 0;
7366 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7369 tg3_rings_reset(tp);
7371 /* Initialize MAC address and backoff seed. */
7372 __tg3_set_mac_addr(tp, 0);
7374 /* MTU + ethernet header + FCS + optional VLAN tag */
7375 tw32(MAC_RX_MTU_SIZE,
7376 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7378 /* The slot time is changed by tg3_setup_phy if we
7379 * run at gigabit with half duplex.
7381 tw32(MAC_TX_LENGTHS,
7382 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7383 (6 << TX_LENGTHS_IPG_SHIFT) |
7384 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7386 /* Receive rules. */
7387 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7388 tw32(RCVLPC_CONFIG, 0x0181);
7390 /* Calculate RDMAC_MODE setting early, we need it to determine
7391 * the RCVLPC_STATE_ENABLE mask.
7393 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7394 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7395 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7396 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7397 RDMAC_MODE_LNGREAD_ENAB);
7399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7400 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7401 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7402 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7403 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7404 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7406 /* If statement applies to 5705 and 5750 PCI devices only */
7407 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7408 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7409 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7410 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7411 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7412 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7413 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7414 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7415 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7419 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7420 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7422 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7423 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7425 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7426 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7427 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7429 /* Receive/send statistics. */
7430 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7431 val = tr32(RCVLPC_STATS_ENABLE);
7432 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7433 tw32(RCVLPC_STATS_ENABLE, val);
7434 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7435 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7436 val = tr32(RCVLPC_STATS_ENABLE);
7437 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7438 tw32(RCVLPC_STATS_ENABLE, val);
7440 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7442 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7443 tw32(SNDDATAI_STATSENAB, 0xffffff);
7444 tw32(SNDDATAI_STATSCTRL,
7445 (SNDDATAI_SCTRL_ENABLE |
7446 SNDDATAI_SCTRL_FASTUPD));
7448 /* Setup host coalescing engine. */
7449 tw32(HOSTCC_MODE, 0);
7450 for (i = 0; i < 2000; i++) {
7451 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7456 __tg3_set_coalesce(tp, &tp->coal);
7458 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7459 /* Status/statistics block address. See tg3_timer,
7460 * the tg3_periodic_fetch_stats call there, and
7461 * tg3_get_stats to see how this works for 5705/5750 chips.
7463 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7464 ((u64) tp->stats_mapping >> 32));
7465 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7466 ((u64) tp->stats_mapping & 0xffffffff));
7467 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7469 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7471 /* Clear statistics and status block memory areas */
7472 for (i = NIC_SRAM_STATS_BLK;
7473 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7475 tg3_write_mem(tp, i, 0);
7480 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7482 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7483 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7484 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7485 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7487 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7488 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7489 /* reset to prevent losing 1st rx packet intermittently */
7490 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7494 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7495 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7498 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7499 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7500 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7501 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7502 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7503 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7504 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7507 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7508 * If TG3_FLG2_IS_NIC is zero, we should read the
7509 * register to preserve the GPIO settings for LOMs. The GPIOs,
7510 * whether used as inputs or outputs, are set by boot code after
7513 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7516 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7517 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7518 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7520 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7521 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7522 GRC_LCLCTRL_GPIO_OUTPUT3;
7524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7525 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7527 tp->grc_local_ctrl &= ~gpio_mask;
7528 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7530 /* GPIO1 must be driven high for eeprom write protect */
7531 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7532 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7533 GRC_LCLCTRL_GPIO_OUTPUT1);
7535 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7538 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
7539 val = tr32(MSGINT_MODE);
7540 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
7541 tw32(MSGINT_MODE, val);
7544 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7545 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7549 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7550 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7551 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7552 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7553 WDMAC_MODE_LNGREAD_ENAB);
7555 /* If statement applies to 5705 and 5750 PCI devices only */
7556 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7557 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7558 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7559 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
7560 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7561 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7563 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7564 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7565 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7566 val |= WDMAC_MODE_RX_ACCEL;
7570 /* Enable host coalescing bug fix */
7571 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7572 val |= WDMAC_MODE_STATUS_TAG_FIX;
7574 tw32_f(WDMAC_MODE, val);
7577 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7580 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7582 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7583 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7584 pcix_cmd |= PCI_X_CMD_READ_2K;
7585 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7586 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7587 pcix_cmd |= PCI_X_CMD_READ_2K;
7589 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7593 tw32_f(RDMAC_MODE, rdmac_mode);
7596 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7597 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7598 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7600 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7602 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7604 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7606 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7607 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7608 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7609 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7610 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7611 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7612 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
7613 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
7614 val |= SNDBDI_MODE_MULTI_TXQ_EN;
7615 tw32(SNDBDI_MODE, val);
7616 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7618 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7619 err = tg3_load_5701_a0_firmware_fix(tp);
7624 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7625 err = tg3_load_tso_firmware(tp);
7630 tp->tx_mode = TX_MODE_ENABLE;
7631 tw32_f(MAC_TX_MODE, tp->tx_mode);
7634 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
7635 u32 reg = MAC_RSS_INDIR_TBL_0;
7636 u8 *ent = (u8 *)&val;
7638 /* Setup the indirection table */
7639 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
7640 int idx = i % sizeof(val);
7642 ent[idx] = i % (tp->irq_cnt - 1);
7643 if (idx == sizeof(val) - 1) {
7649 /* Setup the "secret" hash key. */
7650 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
7651 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
7652 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
7653 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
7654 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
7655 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
7656 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
7657 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
7658 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
7659 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
7662 tp->rx_mode = RX_MODE_ENABLE;
7663 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7664 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7666 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
7667 tp->rx_mode |= RX_MODE_RSS_ENABLE |
7668 RX_MODE_RSS_ITBL_HASH_BITS_7 |
7669 RX_MODE_RSS_IPV6_HASH_EN |
7670 RX_MODE_RSS_TCP_IPV6_HASH_EN |
7671 RX_MODE_RSS_IPV4_HASH_EN |
7672 RX_MODE_RSS_TCP_IPV4_HASH_EN;
7674 tw32_f(MAC_RX_MODE, tp->rx_mode);
7677 tw32(MAC_LED_CTRL, tp->led_ctrl);
7679 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7680 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7681 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7684 tw32_f(MAC_RX_MODE, tp->rx_mode);
7687 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7688 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7689 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7690 /* Set drive transmission level to 1.2V */
7691 /* only if the signal pre-emphasis bit is not set */
7692 val = tr32(MAC_SERDES_CFG);
7695 tw32(MAC_SERDES_CFG, val);
7697 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7698 tw32(MAC_SERDES_CFG, 0x616000);
7701 /* Prevent chip from dropping frames when flow control
7704 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7706 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7707 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7708 /* Use hardware link auto-negotiation */
7709 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7712 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7713 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7716 tmp = tr32(SERDES_RX_CTRL);
7717 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7718 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7719 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7720 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7723 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7724 if (tp->link_config.phy_is_low_power) {
7725 tp->link_config.phy_is_low_power = 0;
7726 tp->link_config.speed = tp->link_config.orig_speed;
7727 tp->link_config.duplex = tp->link_config.orig_duplex;
7728 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7731 err = tg3_setup_phy(tp, 0);
7735 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7736 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
7739 /* Clear CRC stats. */
7740 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7741 tg3_writephy(tp, MII_TG3_TEST1,
7742 tmp | MII_TG3_TEST1_CRC_EN);
7743 tg3_readphy(tp, 0x14, &tmp);
7748 __tg3_set_rx_mode(tp->dev);
7750 /* Initialize receive rules. */
7751 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7752 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7753 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7754 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7756 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7757 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7761 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7765 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7767 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7769 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7771 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7773 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7775 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7777 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7779 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7781 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7783 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7785 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7787 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7789 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7791 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7799 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7800 /* Write our heartbeat update interval to APE. */
7801 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7802 APE_HOST_HEARTBEAT_INT_DISABLE);
7804 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7809 /* Called at device open time to get the chip ready for
7810 * packet processing. Invoked with tp->lock held.
7812 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7814 tg3_switch_clocks(tp);
7816 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7818 return tg3_reset_hw(tp, reset_phy);
7821 #define TG3_STAT_ADD32(PSTAT, REG) \
7822 do { u32 __val = tr32(REG); \
7823 (PSTAT)->low += __val; \
7824 if ((PSTAT)->low < __val) \
7825 (PSTAT)->high += 1; \
7828 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7830 struct tg3_hw_stats *sp = tp->hw_stats;
7832 if (!netif_carrier_ok(tp->dev))
7835 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7836 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7837 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7838 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7839 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7840 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7841 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7842 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7843 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7844 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7845 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7846 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7847 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7849 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7850 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7851 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7852 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7853 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7854 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7855 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7856 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7857 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7858 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7859 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7860 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7861 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7862 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7864 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7865 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7866 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7869 static void tg3_timer(unsigned long __opaque)
7871 struct tg3 *tp = (struct tg3 *) __opaque;
7876 spin_lock(&tp->lock);
7878 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7879 /* All of this garbage is because when using non-tagged
7880 * IRQ status the mailbox/status_block protocol the chip
7881 * uses with the cpu is race prone.
7883 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
7884 tw32(GRC_LOCAL_CTRL,
7885 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7887 tw32(HOSTCC_MODE, tp->coalesce_mode |
7888 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
7891 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7892 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7893 spin_unlock(&tp->lock);
7894 schedule_work(&tp->reset_task);
7899 /* This part only runs once per second. */
7900 if (!--tp->timer_counter) {
7901 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7902 tg3_periodic_fetch_stats(tp);
7904 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7908 mac_stat = tr32(MAC_STATUS);
7911 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7912 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7914 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7918 tg3_setup_phy(tp, 0);
7919 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7920 u32 mac_stat = tr32(MAC_STATUS);
7923 if (netif_carrier_ok(tp->dev) &&
7924 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7927 if (! netif_carrier_ok(tp->dev) &&
7928 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7929 MAC_STATUS_SIGNAL_DET))) {
7933 if (!tp->serdes_counter) {
7936 ~MAC_MODE_PORT_MODE_MASK));
7938 tw32_f(MAC_MODE, tp->mac_mode);
7941 tg3_setup_phy(tp, 0);
7943 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7944 tg3_serdes_parallel_detect(tp);
7946 tp->timer_counter = tp->timer_multiplier;
7949 /* Heartbeat is only sent once every 2 seconds.
7951 * The heartbeat is to tell the ASF firmware that the host
7952 * driver is still alive. In the event that the OS crashes,
7953 * ASF needs to reset the hardware to free up the FIFO space
7954 * that may be filled with rx packets destined for the host.
7955 * If the FIFO is full, ASF will no longer function properly.
7957 * Unintended resets have been reported on real time kernels
7958 * where the timer doesn't run on time. Netpoll will also have
7961 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7962 * to check the ring condition when the heartbeat is expiring
7963 * before doing the reset. This will prevent most unintended
7966 if (!--tp->asf_counter) {
7967 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7968 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7969 tg3_wait_for_event_ack(tp);
7971 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7972 FWCMD_NICDRV_ALIVE3);
7973 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7974 /* 5 seconds timeout */
7975 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7977 tg3_generate_fw_event(tp);
7979 tp->asf_counter = tp->asf_multiplier;
7982 spin_unlock(&tp->lock);
7985 tp->timer.expires = jiffies + tp->timer_offset;
7986 add_timer(&tp->timer);
7989 static int tg3_request_irq(struct tg3 *tp, int irq_num)
7992 unsigned long flags;
7994 struct tg3_napi *tnapi = &tp->napi[irq_num];
7996 if (tp->irq_cnt == 1)
7997 name = tp->dev->name;
7999 name = &tnapi->irq_lbl[0];
8000 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8001 name[IFNAMSIZ-1] = 0;
8004 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8006 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8008 flags = IRQF_SAMPLE_RANDOM;
8011 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8012 fn = tg3_interrupt_tagged;
8013 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8016 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8019 static int tg3_test_interrupt(struct tg3 *tp)
8021 struct tg3_napi *tnapi = &tp->napi[0];
8022 struct net_device *dev = tp->dev;
8023 int err, i, intr_ok = 0;
8025 if (!netif_running(dev))
8028 tg3_disable_ints(tp);
8030 free_irq(tnapi->irq_vec, tnapi);
8032 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8033 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8037 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8038 tg3_enable_ints(tp);
8040 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8043 for (i = 0; i < 5; i++) {
8044 u32 int_mbox, misc_host_ctrl;
8046 int_mbox = tr32_mailbox(tnapi->int_mbox);
8047 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8049 if ((int_mbox != 0) ||
8050 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8058 tg3_disable_ints(tp);
8060 free_irq(tnapi->irq_vec, tnapi);
8062 err = tg3_request_irq(tp, 0);
8073 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8074 * successfully restored
8076 static int tg3_test_msi(struct tg3 *tp)
8081 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8084 /* Turn off SERR reporting in case MSI terminates with Master
8087 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8088 pci_write_config_word(tp->pdev, PCI_COMMAND,
8089 pci_cmd & ~PCI_COMMAND_SERR);
8091 err = tg3_test_interrupt(tp);
8093 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8098 /* other failures */
8102 /* MSI test failed, go back to INTx mode */
8103 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8104 "switching to INTx mode. Please report this failure to "
8105 "the PCI maintainer and include system chipset information.\n",
8108 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8110 pci_disable_msi(tp->pdev);
8112 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8114 err = tg3_request_irq(tp, 0);
8118 /* Need to reset the chip because the MSI cycle may have terminated
8119 * with Master Abort.
8121 tg3_full_lock(tp, 1);
8123 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8124 err = tg3_init_hw(tp, 1);
8126 tg3_full_unlock(tp);
8129 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8134 static int tg3_request_firmware(struct tg3 *tp)
8136 const __be32 *fw_data;
8138 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8139 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
8140 tp->dev->name, tp->fw_needed);
8144 fw_data = (void *)tp->fw->data;
8146 /* Firmware blob starts with version numbers, followed by
8147 * start address and _full_ length including BSS sections
8148 * (which must be longer than the actual data, of course
8151 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8152 if (tp->fw_len < (tp->fw->size - 12)) {
8153 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
8154 tp->dev->name, tp->fw_len, tp->fw_needed);
8155 release_firmware(tp->fw);
8160 /* We no longer need firmware; we have it. */
8161 tp->fw_needed = NULL;
8165 static bool tg3_enable_msix(struct tg3 *tp)
8167 int i, rc, cpus = num_online_cpus();
8168 struct msix_entry msix_ent[tp->irq_max];
8171 /* Just fallback to the simpler MSI mode. */
8175 * We want as many rx rings enabled as there are cpus.
8176 * The first MSIX vector only deals with link interrupts, etc,
8177 * so we add one to the number of vectors we are requesting.
8179 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8181 for (i = 0; i < tp->irq_max; i++) {
8182 msix_ent[i].entry = i;
8183 msix_ent[i].vector = 0;
8186 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8188 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
8190 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8193 "%s: Requested %d MSI-X vectors, received %d\n",
8194 tp->dev->name, tp->irq_cnt, rc);
8198 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8200 for (i = 0; i < tp->irq_max; i++)
8201 tp->napi[i].irq_vec = msix_ent[i].vector;
8203 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8208 static void tg3_ints_init(struct tg3 *tp)
8210 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8211 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8212 /* All MSI supporting chips should support tagged
8213 * status. Assert that this is the case.
8215 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8216 "Not using MSI.\n", tp->dev->name);
8220 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8221 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8222 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8223 pci_enable_msi(tp->pdev) == 0)
8224 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8226 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8227 u32 msi_mode = tr32(MSGINT_MODE);
8228 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8229 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8230 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8233 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8235 tp->napi[0].irq_vec = tp->pdev->irq;
8236 tp->dev->real_num_tx_queues = 1;
8240 static void tg3_ints_fini(struct tg3 *tp)
8242 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8243 pci_disable_msix(tp->pdev);
8244 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8245 pci_disable_msi(tp->pdev);
8246 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8247 tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
8250 static int tg3_open(struct net_device *dev)
8252 struct tg3 *tp = netdev_priv(dev);
8255 if (tp->fw_needed) {
8256 err = tg3_request_firmware(tp);
8257 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8261 printk(KERN_WARNING "%s: TSO capability disabled.\n",
8263 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8264 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8265 printk(KERN_NOTICE "%s: TSO capability restored.\n",
8267 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8271 netif_carrier_off(tp->dev);
8273 err = tg3_set_power_state(tp, PCI_D0);
8277 tg3_full_lock(tp, 0);
8279 tg3_disable_ints(tp);
8280 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8282 tg3_full_unlock(tp);
8285 * Setup interrupts first so we know how
8286 * many NAPI resources to allocate
8290 /* The placement of this call is tied
8291 * to the setup and use of Host TX descriptors.
8293 err = tg3_alloc_consistent(tp);
8297 tg3_napi_enable(tp);
8299 for (i = 0; i < tp->irq_cnt; i++) {
8300 struct tg3_napi *tnapi = &tp->napi[i];
8301 err = tg3_request_irq(tp, i);
8303 for (i--; i >= 0; i--)
8304 free_irq(tnapi->irq_vec, tnapi);
8312 tg3_full_lock(tp, 0);
8314 err = tg3_init_hw(tp, 1);
8316 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8319 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8320 tp->timer_offset = HZ;
8322 tp->timer_offset = HZ / 10;
8324 BUG_ON(tp->timer_offset > HZ);
8325 tp->timer_counter = tp->timer_multiplier =
8326 (HZ / tp->timer_offset);
8327 tp->asf_counter = tp->asf_multiplier =
8328 ((HZ / tp->timer_offset) * 2);
8330 init_timer(&tp->timer);
8331 tp->timer.expires = jiffies + tp->timer_offset;
8332 tp->timer.data = (unsigned long) tp;
8333 tp->timer.function = tg3_timer;
8336 tg3_full_unlock(tp);
8341 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8342 err = tg3_test_msi(tp);
8345 tg3_full_lock(tp, 0);
8346 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8348 tg3_full_unlock(tp);
8353 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8354 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8355 u32 val = tr32(PCIE_TRANSACTION_CFG);
8357 tw32(PCIE_TRANSACTION_CFG,
8358 val | PCIE_TRANS_CFG_1SHOT_MSI);
8365 tg3_full_lock(tp, 0);
8367 add_timer(&tp->timer);
8368 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8369 tg3_enable_ints(tp);
8371 tg3_full_unlock(tp);
8373 netif_tx_start_all_queues(dev);
8378 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8379 struct tg3_napi *tnapi = &tp->napi[i];
8380 free_irq(tnapi->irq_vec, tnapi);
8384 tg3_napi_disable(tp);
8385 tg3_free_consistent(tp);
8393 /*static*/ void tg3_dump_state(struct tg3 *tp)
8395 u32 val32, val32_2, val32_3, val32_4, val32_5;
8398 struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
8400 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8401 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8402 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8406 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8407 tr32(MAC_MODE), tr32(MAC_STATUS));
8408 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8409 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8410 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8411 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8412 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8413 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8415 /* Send data initiator control block */
8416 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8417 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8418 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8419 tr32(SNDDATAI_STATSCTRL));
8421 /* Send data completion control block */
8422 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8424 /* Send BD ring selector block */
8425 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8426 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8428 /* Send BD initiator control block */
8429 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8430 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8432 /* Send BD completion control block */
8433 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8435 /* Receive list placement control block */
8436 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8437 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8438 printk(" RCVLPC_STATSCTRL[%08x]\n",
8439 tr32(RCVLPC_STATSCTRL));
8441 /* Receive data and receive BD initiator control block */
8442 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8443 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8445 /* Receive data completion control block */
8446 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8449 /* Receive BD initiator control block */
8450 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8451 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8453 /* Receive BD completion control block */
8454 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8455 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8457 /* Receive list selector control block */
8458 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8459 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8461 /* Mbuf cluster free block */
8462 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8463 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8465 /* Host coalescing control block */
8466 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8467 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8468 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8469 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8470 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8471 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8472 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8473 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8474 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8475 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8476 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8477 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8479 /* Memory arbiter control block */
8480 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8481 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8483 /* Buffer manager control block */
8484 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8485 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8486 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8487 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8488 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8489 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8490 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8491 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8493 /* Read DMA control block */
8494 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8495 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8497 /* Write DMA control block */
8498 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8499 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8501 /* DMA completion block */
8502 printk("DEBUG: DMAC_MODE[%08x]\n",
8506 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8507 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8508 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8509 tr32(GRC_LOCAL_CTRL));
8512 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8513 tr32(RCVDBDI_JUMBO_BD + 0x0),
8514 tr32(RCVDBDI_JUMBO_BD + 0x4),
8515 tr32(RCVDBDI_JUMBO_BD + 0x8),
8516 tr32(RCVDBDI_JUMBO_BD + 0xc));
8517 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8518 tr32(RCVDBDI_STD_BD + 0x0),
8519 tr32(RCVDBDI_STD_BD + 0x4),
8520 tr32(RCVDBDI_STD_BD + 0x8),
8521 tr32(RCVDBDI_STD_BD + 0xc));
8522 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8523 tr32(RCVDBDI_MINI_BD + 0x0),
8524 tr32(RCVDBDI_MINI_BD + 0x4),
8525 tr32(RCVDBDI_MINI_BD + 0x8),
8526 tr32(RCVDBDI_MINI_BD + 0xc));
8528 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8529 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8530 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8531 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8532 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8533 val32, val32_2, val32_3, val32_4);
8535 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8536 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8537 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8538 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8539 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8540 val32, val32_2, val32_3, val32_4);
8542 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8543 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8544 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8545 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8546 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8547 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8548 val32, val32_2, val32_3, val32_4, val32_5);
8550 /* SW status block */
8552 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8555 sblk->rx_jumbo_consumer,
8557 sblk->rx_mini_consumer,
8558 sblk->idx[0].rx_producer,
8559 sblk->idx[0].tx_consumer);
8561 /* SW statistics block */
8562 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8563 ((u32 *)tp->hw_stats)[0],
8564 ((u32 *)tp->hw_stats)[1],
8565 ((u32 *)tp->hw_stats)[2],
8566 ((u32 *)tp->hw_stats)[3]);
8569 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8570 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8571 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8572 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8573 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8575 /* NIC side send descriptors. */
8576 for (i = 0; i < 6; i++) {
8579 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8580 + (i * sizeof(struct tg3_tx_buffer_desc));
8581 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8583 readl(txd + 0x0), readl(txd + 0x4),
8584 readl(txd + 0x8), readl(txd + 0xc));
8587 /* NIC side RX descriptors. */
8588 for (i = 0; i < 6; i++) {
8591 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8592 + (i * sizeof(struct tg3_rx_buffer_desc));
8593 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8595 readl(rxd + 0x0), readl(rxd + 0x4),
8596 readl(rxd + 0x8), readl(rxd + 0xc));
8597 rxd += (4 * sizeof(u32));
8598 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8600 readl(rxd + 0x0), readl(rxd + 0x4),
8601 readl(rxd + 0x8), readl(rxd + 0xc));
8604 for (i = 0; i < 6; i++) {
8607 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8608 + (i * sizeof(struct tg3_rx_buffer_desc));
8609 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8611 readl(rxd + 0x0), readl(rxd + 0x4),
8612 readl(rxd + 0x8), readl(rxd + 0xc));
8613 rxd += (4 * sizeof(u32));
8614 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8616 readl(rxd + 0x0), readl(rxd + 0x4),
8617 readl(rxd + 0x8), readl(rxd + 0xc));
8622 static struct net_device_stats *tg3_get_stats(struct net_device *);
8623 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8625 static int tg3_close(struct net_device *dev)
8628 struct tg3 *tp = netdev_priv(dev);
8630 tg3_napi_disable(tp);
8631 cancel_work_sync(&tp->reset_task);
8633 netif_tx_stop_all_queues(dev);
8635 del_timer_sync(&tp->timer);
8637 tg3_full_lock(tp, 1);
8642 tg3_disable_ints(tp);
8644 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8646 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8648 tg3_full_unlock(tp);
8650 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8651 struct tg3_napi *tnapi = &tp->napi[i];
8652 free_irq(tnapi->irq_vec, tnapi);
8657 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8658 sizeof(tp->net_stats_prev));
8659 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8660 sizeof(tp->estats_prev));
8662 tg3_free_consistent(tp);
8664 tg3_set_power_state(tp, PCI_D3hot);
8666 netif_carrier_off(tp->dev);
8671 static inline unsigned long get_stat64(tg3_stat64_t *val)
8675 #if (BITS_PER_LONG == 32)
8678 ret = ((u64)val->high << 32) | ((u64)val->low);
8683 static inline u64 get_estat64(tg3_stat64_t *val)
8685 return ((u64)val->high << 32) | ((u64)val->low);
8688 static unsigned long calc_crc_errors(struct tg3 *tp)
8690 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8692 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8693 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8694 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8697 spin_lock_bh(&tp->lock);
8698 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8699 tg3_writephy(tp, MII_TG3_TEST1,
8700 val | MII_TG3_TEST1_CRC_EN);
8701 tg3_readphy(tp, 0x14, &val);
8704 spin_unlock_bh(&tp->lock);
8706 tp->phy_crc_errors += val;
8708 return tp->phy_crc_errors;
8711 return get_stat64(&hw_stats->rx_fcs_errors);
8714 #define ESTAT_ADD(member) \
8715 estats->member = old_estats->member + \
8716 get_estat64(&hw_stats->member)
8718 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8720 struct tg3_ethtool_stats *estats = &tp->estats;
8721 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8722 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8727 ESTAT_ADD(rx_octets);
8728 ESTAT_ADD(rx_fragments);
8729 ESTAT_ADD(rx_ucast_packets);
8730 ESTAT_ADD(rx_mcast_packets);
8731 ESTAT_ADD(rx_bcast_packets);
8732 ESTAT_ADD(rx_fcs_errors);
8733 ESTAT_ADD(rx_align_errors);
8734 ESTAT_ADD(rx_xon_pause_rcvd);
8735 ESTAT_ADD(rx_xoff_pause_rcvd);
8736 ESTAT_ADD(rx_mac_ctrl_rcvd);
8737 ESTAT_ADD(rx_xoff_entered);
8738 ESTAT_ADD(rx_frame_too_long_errors);
8739 ESTAT_ADD(rx_jabbers);
8740 ESTAT_ADD(rx_undersize_packets);
8741 ESTAT_ADD(rx_in_length_errors);
8742 ESTAT_ADD(rx_out_length_errors);
8743 ESTAT_ADD(rx_64_or_less_octet_packets);
8744 ESTAT_ADD(rx_65_to_127_octet_packets);
8745 ESTAT_ADD(rx_128_to_255_octet_packets);
8746 ESTAT_ADD(rx_256_to_511_octet_packets);
8747 ESTAT_ADD(rx_512_to_1023_octet_packets);
8748 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8749 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8750 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8751 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8752 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8754 ESTAT_ADD(tx_octets);
8755 ESTAT_ADD(tx_collisions);
8756 ESTAT_ADD(tx_xon_sent);
8757 ESTAT_ADD(tx_xoff_sent);
8758 ESTAT_ADD(tx_flow_control);
8759 ESTAT_ADD(tx_mac_errors);
8760 ESTAT_ADD(tx_single_collisions);
8761 ESTAT_ADD(tx_mult_collisions);
8762 ESTAT_ADD(tx_deferred);
8763 ESTAT_ADD(tx_excessive_collisions);
8764 ESTAT_ADD(tx_late_collisions);
8765 ESTAT_ADD(tx_collide_2times);
8766 ESTAT_ADD(tx_collide_3times);
8767 ESTAT_ADD(tx_collide_4times);
8768 ESTAT_ADD(tx_collide_5times);
8769 ESTAT_ADD(tx_collide_6times);
8770 ESTAT_ADD(tx_collide_7times);
8771 ESTAT_ADD(tx_collide_8times);
8772 ESTAT_ADD(tx_collide_9times);
8773 ESTAT_ADD(tx_collide_10times);
8774 ESTAT_ADD(tx_collide_11times);
8775 ESTAT_ADD(tx_collide_12times);
8776 ESTAT_ADD(tx_collide_13times);
8777 ESTAT_ADD(tx_collide_14times);
8778 ESTAT_ADD(tx_collide_15times);
8779 ESTAT_ADD(tx_ucast_packets);
8780 ESTAT_ADD(tx_mcast_packets);
8781 ESTAT_ADD(tx_bcast_packets);
8782 ESTAT_ADD(tx_carrier_sense_errors);
8783 ESTAT_ADD(tx_discards);
8784 ESTAT_ADD(tx_errors);
8786 ESTAT_ADD(dma_writeq_full);
8787 ESTAT_ADD(dma_write_prioq_full);
8788 ESTAT_ADD(rxbds_empty);
8789 ESTAT_ADD(rx_discards);
8790 ESTAT_ADD(rx_errors);
8791 ESTAT_ADD(rx_threshold_hit);
8793 ESTAT_ADD(dma_readq_full);
8794 ESTAT_ADD(dma_read_prioq_full);
8795 ESTAT_ADD(tx_comp_queue_full);
8797 ESTAT_ADD(ring_set_send_prod_index);
8798 ESTAT_ADD(ring_status_update);
8799 ESTAT_ADD(nic_irqs);
8800 ESTAT_ADD(nic_avoided_irqs);
8801 ESTAT_ADD(nic_tx_threshold_hit);
8806 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8808 struct tg3 *tp = netdev_priv(dev);
8809 struct net_device_stats *stats = &tp->net_stats;
8810 struct net_device_stats *old_stats = &tp->net_stats_prev;
8811 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8816 stats->rx_packets = old_stats->rx_packets +
8817 get_stat64(&hw_stats->rx_ucast_packets) +
8818 get_stat64(&hw_stats->rx_mcast_packets) +
8819 get_stat64(&hw_stats->rx_bcast_packets);
8821 stats->tx_packets = old_stats->tx_packets +
8822 get_stat64(&hw_stats->tx_ucast_packets) +
8823 get_stat64(&hw_stats->tx_mcast_packets) +
8824 get_stat64(&hw_stats->tx_bcast_packets);
8826 stats->rx_bytes = old_stats->rx_bytes +
8827 get_stat64(&hw_stats->rx_octets);
8828 stats->tx_bytes = old_stats->tx_bytes +
8829 get_stat64(&hw_stats->tx_octets);
8831 stats->rx_errors = old_stats->rx_errors +
8832 get_stat64(&hw_stats->rx_errors);
8833 stats->tx_errors = old_stats->tx_errors +
8834 get_stat64(&hw_stats->tx_errors) +
8835 get_stat64(&hw_stats->tx_mac_errors) +
8836 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8837 get_stat64(&hw_stats->tx_discards);
8839 stats->multicast = old_stats->multicast +
8840 get_stat64(&hw_stats->rx_mcast_packets);
8841 stats->collisions = old_stats->collisions +
8842 get_stat64(&hw_stats->tx_collisions);
8844 stats->rx_length_errors = old_stats->rx_length_errors +
8845 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8846 get_stat64(&hw_stats->rx_undersize_packets);
8848 stats->rx_over_errors = old_stats->rx_over_errors +
8849 get_stat64(&hw_stats->rxbds_empty);
8850 stats->rx_frame_errors = old_stats->rx_frame_errors +
8851 get_stat64(&hw_stats->rx_align_errors);
8852 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8853 get_stat64(&hw_stats->tx_discards);
8854 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8855 get_stat64(&hw_stats->tx_carrier_sense_errors);
8857 stats->rx_crc_errors = old_stats->rx_crc_errors +
8858 calc_crc_errors(tp);
8860 stats->rx_missed_errors = old_stats->rx_missed_errors +
8861 get_stat64(&hw_stats->rx_discards);
8866 static inline u32 calc_crc(unsigned char *buf, int len)
8874 for (j = 0; j < len; j++) {
8877 for (k = 0; k < 8; k++) {
8891 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8893 /* accept or reject all multicast frames */
8894 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8895 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8896 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8897 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8900 static void __tg3_set_rx_mode(struct net_device *dev)
8902 struct tg3 *tp = netdev_priv(dev);
8905 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8906 RX_MODE_KEEP_VLAN_TAG);
8908 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8911 #if TG3_VLAN_TAG_USED
8913 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8914 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8916 /* By definition, VLAN is disabled always in this
8919 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8920 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8923 if (dev->flags & IFF_PROMISC) {
8924 /* Promiscuous mode. */
8925 rx_mode |= RX_MODE_PROMISC;
8926 } else if (dev->flags & IFF_ALLMULTI) {
8927 /* Accept all multicast. */
8928 tg3_set_multi (tp, 1);
8929 } else if (dev->mc_count < 1) {
8930 /* Reject all multicast. */
8931 tg3_set_multi (tp, 0);
8933 /* Accept one or more multicast(s). */
8934 struct dev_mc_list *mclist;
8936 u32 mc_filter[4] = { 0, };
8941 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8942 i++, mclist = mclist->next) {
8944 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8946 regidx = (bit & 0x60) >> 5;
8948 mc_filter[regidx] |= (1 << bit);
8951 tw32(MAC_HASH_REG_0, mc_filter[0]);
8952 tw32(MAC_HASH_REG_1, mc_filter[1]);
8953 tw32(MAC_HASH_REG_2, mc_filter[2]);
8954 tw32(MAC_HASH_REG_3, mc_filter[3]);
8957 if (rx_mode != tp->rx_mode) {
8958 tp->rx_mode = rx_mode;
8959 tw32_f(MAC_RX_MODE, rx_mode);
8964 static void tg3_set_rx_mode(struct net_device *dev)
8966 struct tg3 *tp = netdev_priv(dev);
8968 if (!netif_running(dev))
8971 tg3_full_lock(tp, 0);
8972 __tg3_set_rx_mode(dev);
8973 tg3_full_unlock(tp);
8976 #define TG3_REGDUMP_LEN (32 * 1024)
8978 static int tg3_get_regs_len(struct net_device *dev)
8980 return TG3_REGDUMP_LEN;
8983 static void tg3_get_regs(struct net_device *dev,
8984 struct ethtool_regs *regs, void *_p)
8987 struct tg3 *tp = netdev_priv(dev);
8993 memset(p, 0, TG3_REGDUMP_LEN);
8995 if (tp->link_config.phy_is_low_power)
8998 tg3_full_lock(tp, 0);
9000 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9001 #define GET_REG32_LOOP(base,len) \
9002 do { p = (u32 *)(orig_p + (base)); \
9003 for (i = 0; i < len; i += 4) \
9004 __GET_REG32((base) + i); \
9006 #define GET_REG32_1(reg) \
9007 do { p = (u32 *)(orig_p + (reg)); \
9008 __GET_REG32((reg)); \
9011 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9012 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9013 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9014 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9015 GET_REG32_1(SNDDATAC_MODE);
9016 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9017 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9018 GET_REG32_1(SNDBDC_MODE);
9019 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9020 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9021 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9022 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9023 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9024 GET_REG32_1(RCVDCC_MODE);
9025 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9026 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9027 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9028 GET_REG32_1(MBFREE_MODE);
9029 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9030 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9031 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9032 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9033 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9034 GET_REG32_1(RX_CPU_MODE);
9035 GET_REG32_1(RX_CPU_STATE);
9036 GET_REG32_1(RX_CPU_PGMCTR);
9037 GET_REG32_1(RX_CPU_HWBKPT);
9038 GET_REG32_1(TX_CPU_MODE);
9039 GET_REG32_1(TX_CPU_STATE);
9040 GET_REG32_1(TX_CPU_PGMCTR);
9041 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9042 GET_REG32_LOOP(FTQ_RESET, 0x120);
9043 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9044 GET_REG32_1(DMAC_MODE);
9045 GET_REG32_LOOP(GRC_MODE, 0x4c);
9046 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9047 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9050 #undef GET_REG32_LOOP
9053 tg3_full_unlock(tp);
9056 static int tg3_get_eeprom_len(struct net_device *dev)
9058 struct tg3 *tp = netdev_priv(dev);
9060 return tp->nvram_size;
9063 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9065 struct tg3 *tp = netdev_priv(dev);
9068 u32 i, offset, len, b_offset, b_count;
9071 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9074 if (tp->link_config.phy_is_low_power)
9077 offset = eeprom->offset;
9081 eeprom->magic = TG3_EEPROM_MAGIC;
9084 /* adjustments to start on required 4 byte boundary */
9085 b_offset = offset & 3;
9086 b_count = 4 - b_offset;
9087 if (b_count > len) {
9088 /* i.e. offset=1 len=2 */
9091 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9094 memcpy(data, ((char*)&val) + b_offset, b_count);
9097 eeprom->len += b_count;
9100 /* read bytes upto the last 4 byte boundary */
9101 pd = &data[eeprom->len];
9102 for (i = 0; i < (len - (len & 3)); i += 4) {
9103 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9108 memcpy(pd + i, &val, 4);
9113 /* read last bytes not ending on 4 byte boundary */
9114 pd = &data[eeprom->len];
9116 b_offset = offset + len - b_count;
9117 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9120 memcpy(pd, &val, b_count);
9121 eeprom->len += b_count;
9126 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9128 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9130 struct tg3 *tp = netdev_priv(dev);
9132 u32 offset, len, b_offset, odd_len;
9136 if (tp->link_config.phy_is_low_power)
9139 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9140 eeprom->magic != TG3_EEPROM_MAGIC)
9143 offset = eeprom->offset;
9146 if ((b_offset = (offset & 3))) {
9147 /* adjustments to start on required 4 byte boundary */
9148 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9159 /* adjustments to end on required 4 byte boundary */
9161 len = (len + 3) & ~3;
9162 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9168 if (b_offset || odd_len) {
9169 buf = kmalloc(len, GFP_KERNEL);
9173 memcpy(buf, &start, 4);
9175 memcpy(buf+len-4, &end, 4);
9176 memcpy(buf + b_offset, data, eeprom->len);
9179 ret = tg3_nvram_write_block(tp, offset, len, buf);
9187 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9189 struct tg3 *tp = netdev_priv(dev);
9191 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9192 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9194 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9197 cmd->supported = (SUPPORTED_Autoneg);
9199 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9200 cmd->supported |= (SUPPORTED_1000baseT_Half |
9201 SUPPORTED_1000baseT_Full);
9203 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9204 cmd->supported |= (SUPPORTED_100baseT_Half |
9205 SUPPORTED_100baseT_Full |
9206 SUPPORTED_10baseT_Half |
9207 SUPPORTED_10baseT_Full |
9209 cmd->port = PORT_TP;
9211 cmd->supported |= SUPPORTED_FIBRE;
9212 cmd->port = PORT_FIBRE;
9215 cmd->advertising = tp->link_config.advertising;
9216 if (netif_running(dev)) {
9217 cmd->speed = tp->link_config.active_speed;
9218 cmd->duplex = tp->link_config.active_duplex;
9220 cmd->phy_address = PHY_ADDR;
9221 cmd->transceiver = XCVR_INTERNAL;
9222 cmd->autoneg = tp->link_config.autoneg;
9228 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9230 struct tg3 *tp = netdev_priv(dev);
9232 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9233 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9235 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9238 if (cmd->autoneg != AUTONEG_ENABLE &&
9239 cmd->autoneg != AUTONEG_DISABLE)
9242 if (cmd->autoneg == AUTONEG_DISABLE &&
9243 cmd->duplex != DUPLEX_FULL &&
9244 cmd->duplex != DUPLEX_HALF)
9247 if (cmd->autoneg == AUTONEG_ENABLE) {
9248 u32 mask = ADVERTISED_Autoneg |
9250 ADVERTISED_Asym_Pause;
9252 if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
9253 mask |= ADVERTISED_1000baseT_Half |
9254 ADVERTISED_1000baseT_Full;
9256 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9257 mask |= ADVERTISED_100baseT_Half |
9258 ADVERTISED_100baseT_Full |
9259 ADVERTISED_10baseT_Half |
9260 ADVERTISED_10baseT_Full |
9263 mask |= ADVERTISED_FIBRE;
9265 if (cmd->advertising & ~mask)
9268 mask &= (ADVERTISED_1000baseT_Half |
9269 ADVERTISED_1000baseT_Full |
9270 ADVERTISED_100baseT_Half |
9271 ADVERTISED_100baseT_Full |
9272 ADVERTISED_10baseT_Half |
9273 ADVERTISED_10baseT_Full);
9275 cmd->advertising &= mask;
9277 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9278 if (cmd->speed != SPEED_1000)
9281 if (cmd->duplex != DUPLEX_FULL)
9284 if (cmd->speed != SPEED_100 &&
9285 cmd->speed != SPEED_10)
9290 tg3_full_lock(tp, 0);
9292 tp->link_config.autoneg = cmd->autoneg;
9293 if (cmd->autoneg == AUTONEG_ENABLE) {
9294 tp->link_config.advertising = (cmd->advertising |
9295 ADVERTISED_Autoneg);
9296 tp->link_config.speed = SPEED_INVALID;
9297 tp->link_config.duplex = DUPLEX_INVALID;
9299 tp->link_config.advertising = 0;
9300 tp->link_config.speed = cmd->speed;
9301 tp->link_config.duplex = cmd->duplex;
9304 tp->link_config.orig_speed = tp->link_config.speed;
9305 tp->link_config.orig_duplex = tp->link_config.duplex;
9306 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9308 if (netif_running(dev))
9309 tg3_setup_phy(tp, 1);
9311 tg3_full_unlock(tp);
9316 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9318 struct tg3 *tp = netdev_priv(dev);
9320 strcpy(info->driver, DRV_MODULE_NAME);
9321 strcpy(info->version, DRV_MODULE_VERSION);
9322 strcpy(info->fw_version, tp->fw_ver);
9323 strcpy(info->bus_info, pci_name(tp->pdev));
9326 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9328 struct tg3 *tp = netdev_priv(dev);
9330 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9331 device_can_wakeup(&tp->pdev->dev))
9332 wol->supported = WAKE_MAGIC;
9336 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9337 device_can_wakeup(&tp->pdev->dev))
9338 wol->wolopts = WAKE_MAGIC;
9339 memset(&wol->sopass, 0, sizeof(wol->sopass));
9342 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9344 struct tg3 *tp = netdev_priv(dev);
9345 struct device *dp = &tp->pdev->dev;
9347 if (wol->wolopts & ~WAKE_MAGIC)
9349 if ((wol->wolopts & WAKE_MAGIC) &&
9350 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9353 spin_lock_bh(&tp->lock);
9354 if (wol->wolopts & WAKE_MAGIC) {
9355 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9356 device_set_wakeup_enable(dp, true);
9358 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9359 device_set_wakeup_enable(dp, false);
9361 spin_unlock_bh(&tp->lock);
9366 static u32 tg3_get_msglevel(struct net_device *dev)
9368 struct tg3 *tp = netdev_priv(dev);
9369 return tp->msg_enable;
9372 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9374 struct tg3 *tp = netdev_priv(dev);
9375 tp->msg_enable = value;
9378 static int tg3_set_tso(struct net_device *dev, u32 value)
9380 struct tg3 *tp = netdev_priv(dev);
9382 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9387 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9388 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
9390 dev->features |= NETIF_F_TSO6;
9391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9392 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9393 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9394 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9395 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9396 dev->features |= NETIF_F_TSO_ECN;
9398 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9400 return ethtool_op_set_tso(dev, value);
9403 static int tg3_nway_reset(struct net_device *dev)
9405 struct tg3 *tp = netdev_priv(dev);
9408 if (!netif_running(dev))
9411 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9414 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9415 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9417 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9421 spin_lock_bh(&tp->lock);
9423 tg3_readphy(tp, MII_BMCR, &bmcr);
9424 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9425 ((bmcr & BMCR_ANENABLE) ||
9426 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9427 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9431 spin_unlock_bh(&tp->lock);
9437 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9439 struct tg3 *tp = netdev_priv(dev);
9441 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9442 ering->rx_mini_max_pending = 0;
9443 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9444 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9446 ering->rx_jumbo_max_pending = 0;
9448 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9450 ering->rx_pending = tp->rx_pending;
9451 ering->rx_mini_pending = 0;
9452 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9453 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9455 ering->rx_jumbo_pending = 0;
9457 ering->tx_pending = tp->napi[0].tx_pending;
9460 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9462 struct tg3 *tp = netdev_priv(dev);
9463 int i, irq_sync = 0, err = 0;
9465 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9466 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9467 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9468 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9469 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9470 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9473 if (netif_running(dev)) {
9479 tg3_full_lock(tp, irq_sync);
9481 tp->rx_pending = ering->rx_pending;
9483 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9484 tp->rx_pending > 63)
9485 tp->rx_pending = 63;
9486 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9488 for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
9489 tp->napi[i].tx_pending = ering->tx_pending;
9491 if (netif_running(dev)) {
9492 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9493 err = tg3_restart_hw(tp, 1);
9495 tg3_netif_start(tp);
9498 tg3_full_unlock(tp);
9500 if (irq_sync && !err)
9506 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9508 struct tg3 *tp = netdev_priv(dev);
9510 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9512 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9513 epause->rx_pause = 1;
9515 epause->rx_pause = 0;
9517 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9518 epause->tx_pause = 1;
9520 epause->tx_pause = 0;
9523 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9525 struct tg3 *tp = netdev_priv(dev);
9528 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9529 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9532 if (epause->autoneg) {
9534 struct phy_device *phydev;
9536 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9538 if (epause->rx_pause) {
9539 if (epause->tx_pause)
9540 newadv = ADVERTISED_Pause;
9542 newadv = ADVERTISED_Pause |
9543 ADVERTISED_Asym_Pause;
9544 } else if (epause->tx_pause) {
9545 newadv = ADVERTISED_Asym_Pause;
9549 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9550 u32 oldadv = phydev->advertising &
9552 ADVERTISED_Asym_Pause);
9553 if (oldadv != newadv) {
9554 phydev->advertising &=
9555 ~(ADVERTISED_Pause |
9556 ADVERTISED_Asym_Pause);
9557 phydev->advertising |= newadv;
9558 err = phy_start_aneg(phydev);
9561 tp->link_config.advertising &=
9562 ~(ADVERTISED_Pause |
9563 ADVERTISED_Asym_Pause);
9564 tp->link_config.advertising |= newadv;
9567 if (epause->rx_pause)
9568 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9570 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9572 if (epause->tx_pause)
9573 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9575 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9577 if (netif_running(dev))
9578 tg3_setup_flow_control(tp, 0, 0);
9583 if (netif_running(dev)) {
9588 tg3_full_lock(tp, irq_sync);
9590 if (epause->autoneg)
9591 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9593 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9594 if (epause->rx_pause)
9595 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9597 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9598 if (epause->tx_pause)
9599 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9601 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9603 if (netif_running(dev)) {
9604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9605 err = tg3_restart_hw(tp, 1);
9607 tg3_netif_start(tp);
9610 tg3_full_unlock(tp);
9616 static u32 tg3_get_rx_csum(struct net_device *dev)
9618 struct tg3 *tp = netdev_priv(dev);
9619 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9622 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9624 struct tg3 *tp = netdev_priv(dev);
9626 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9632 spin_lock_bh(&tp->lock);
9634 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9636 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9637 spin_unlock_bh(&tp->lock);
9642 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9644 struct tg3 *tp = netdev_priv(dev);
9646 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9652 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9653 ethtool_op_set_tx_ipv6_csum(dev, data);
9655 ethtool_op_set_tx_csum(dev, data);
9660 static int tg3_get_sset_count (struct net_device *dev, int sset)
9664 return TG3_NUM_TEST;
9666 return TG3_NUM_STATS;
9672 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9674 switch (stringset) {
9676 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
9679 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
9682 WARN_ON(1); /* we need a WARN() */
9687 static int tg3_phys_id(struct net_device *dev, u32 data)
9689 struct tg3 *tp = netdev_priv(dev);
9692 if (!netif_running(tp->dev))
9696 data = UINT_MAX / 2;
9698 for (i = 0; i < (data * 2); i++) {
9700 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9701 LED_CTRL_1000MBPS_ON |
9702 LED_CTRL_100MBPS_ON |
9703 LED_CTRL_10MBPS_ON |
9704 LED_CTRL_TRAFFIC_OVERRIDE |
9705 LED_CTRL_TRAFFIC_BLINK |
9706 LED_CTRL_TRAFFIC_LED);
9709 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9710 LED_CTRL_TRAFFIC_OVERRIDE);
9712 if (msleep_interruptible(500))
9715 tw32(MAC_LED_CTRL, tp->led_ctrl);
9719 static void tg3_get_ethtool_stats (struct net_device *dev,
9720 struct ethtool_stats *estats, u64 *tmp_stats)
9722 struct tg3 *tp = netdev_priv(dev);
9723 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9726 #define NVRAM_TEST_SIZE 0x100
9727 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9728 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9729 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
9730 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9731 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9733 static int tg3_test_nvram(struct tg3 *tp)
9737 int i, j, k, err = 0, size;
9739 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9742 if (tg3_nvram_read(tp, 0, &magic) != 0)
9745 if (magic == TG3_EEPROM_MAGIC)
9746 size = NVRAM_TEST_SIZE;
9747 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9748 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9749 TG3_EEPROM_SB_FORMAT_1) {
9750 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9751 case TG3_EEPROM_SB_REVISION_0:
9752 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9754 case TG3_EEPROM_SB_REVISION_2:
9755 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9757 case TG3_EEPROM_SB_REVISION_3:
9758 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9765 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9766 size = NVRAM_SELFBOOT_HW_SIZE;
9770 buf = kmalloc(size, GFP_KERNEL);
9775 for (i = 0, j = 0; i < size; i += 4, j++) {
9776 err = tg3_nvram_read_be32(tp, i, &buf[j]);
9783 /* Selfboot format */
9784 magic = be32_to_cpu(buf[0]);
9785 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9786 TG3_EEPROM_MAGIC_FW) {
9787 u8 *buf8 = (u8 *) buf, csum8 = 0;
9789 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9790 TG3_EEPROM_SB_REVISION_2) {
9791 /* For rev 2, the csum doesn't include the MBA. */
9792 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9794 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9797 for (i = 0; i < size; i++)
9810 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9811 TG3_EEPROM_MAGIC_HW) {
9812 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9813 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9814 u8 *buf8 = (u8 *) buf;
9816 /* Separate the parity bits and the data bytes. */
9817 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9818 if ((i == 0) || (i == 8)) {
9822 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9823 parity[k++] = buf8[i] & msk;
9830 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9831 parity[k++] = buf8[i] & msk;
9834 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9835 parity[k++] = buf8[i] & msk;
9838 data[j++] = buf8[i];
9842 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9843 u8 hw8 = hweight8(data[i]);
9845 if ((hw8 & 0x1) && parity[i])
9847 else if (!(hw8 & 0x1) && !parity[i])
9854 /* Bootstrap checksum at offset 0x10 */
9855 csum = calc_crc((unsigned char *) buf, 0x10);
9856 if (csum != be32_to_cpu(buf[0x10/4]))
9859 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9860 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9861 if (csum != be32_to_cpu(buf[0xfc/4]))
9871 #define TG3_SERDES_TIMEOUT_SEC 2
9872 #define TG3_COPPER_TIMEOUT_SEC 6
9874 static int tg3_test_link(struct tg3 *tp)
9878 if (!netif_running(tp->dev))
9881 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9882 max = TG3_SERDES_TIMEOUT_SEC;
9884 max = TG3_COPPER_TIMEOUT_SEC;
9886 for (i = 0; i < max; i++) {
9887 if (netif_carrier_ok(tp->dev))
9890 if (msleep_interruptible(1000))
9897 /* Only test the commonly used registers */
9898 static int tg3_test_registers(struct tg3 *tp)
9900 int i, is_5705, is_5750;
9901 u32 offset, read_mask, write_mask, val, save_val, read_val;
9905 #define TG3_FL_5705 0x1
9906 #define TG3_FL_NOT_5705 0x2
9907 #define TG3_FL_NOT_5788 0x4
9908 #define TG3_FL_NOT_5750 0x8
9912 /* MAC Control Registers */
9913 { MAC_MODE, TG3_FL_NOT_5705,
9914 0x00000000, 0x00ef6f8c },
9915 { MAC_MODE, TG3_FL_5705,
9916 0x00000000, 0x01ef6b8c },
9917 { MAC_STATUS, TG3_FL_NOT_5705,
9918 0x03800107, 0x00000000 },
9919 { MAC_STATUS, TG3_FL_5705,
9920 0x03800100, 0x00000000 },
9921 { MAC_ADDR_0_HIGH, 0x0000,
9922 0x00000000, 0x0000ffff },
9923 { MAC_ADDR_0_LOW, 0x0000,
9924 0x00000000, 0xffffffff },
9925 { MAC_RX_MTU_SIZE, 0x0000,
9926 0x00000000, 0x0000ffff },
9927 { MAC_TX_MODE, 0x0000,
9928 0x00000000, 0x00000070 },
9929 { MAC_TX_LENGTHS, 0x0000,
9930 0x00000000, 0x00003fff },
9931 { MAC_RX_MODE, TG3_FL_NOT_5705,
9932 0x00000000, 0x000007fc },
9933 { MAC_RX_MODE, TG3_FL_5705,
9934 0x00000000, 0x000007dc },
9935 { MAC_HASH_REG_0, 0x0000,
9936 0x00000000, 0xffffffff },
9937 { MAC_HASH_REG_1, 0x0000,
9938 0x00000000, 0xffffffff },
9939 { MAC_HASH_REG_2, 0x0000,
9940 0x00000000, 0xffffffff },
9941 { MAC_HASH_REG_3, 0x0000,
9942 0x00000000, 0xffffffff },
9944 /* Receive Data and Receive BD Initiator Control Registers. */
9945 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9946 0x00000000, 0xffffffff },
9947 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9948 0x00000000, 0xffffffff },
9949 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9950 0x00000000, 0x00000003 },
9951 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9952 0x00000000, 0xffffffff },
9953 { RCVDBDI_STD_BD+0, 0x0000,
9954 0x00000000, 0xffffffff },
9955 { RCVDBDI_STD_BD+4, 0x0000,
9956 0x00000000, 0xffffffff },
9957 { RCVDBDI_STD_BD+8, 0x0000,
9958 0x00000000, 0xffff0002 },
9959 { RCVDBDI_STD_BD+0xc, 0x0000,
9960 0x00000000, 0xffffffff },
9962 /* Receive BD Initiator Control Registers. */
9963 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9964 0x00000000, 0xffffffff },
9965 { RCVBDI_STD_THRESH, TG3_FL_5705,
9966 0x00000000, 0x000003ff },
9967 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9968 0x00000000, 0xffffffff },
9970 /* Host Coalescing Control Registers. */
9971 { HOSTCC_MODE, TG3_FL_NOT_5705,
9972 0x00000000, 0x00000004 },
9973 { HOSTCC_MODE, TG3_FL_5705,
9974 0x00000000, 0x000000f6 },
9975 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9976 0x00000000, 0xffffffff },
9977 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9978 0x00000000, 0x000003ff },
9979 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9980 0x00000000, 0xffffffff },
9981 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9982 0x00000000, 0x000003ff },
9983 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9984 0x00000000, 0xffffffff },
9985 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9986 0x00000000, 0x000000ff },
9987 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9988 0x00000000, 0xffffffff },
9989 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9990 0x00000000, 0x000000ff },
9991 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9992 0x00000000, 0xffffffff },
9993 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9994 0x00000000, 0xffffffff },
9995 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9996 0x00000000, 0xffffffff },
9997 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9998 0x00000000, 0x000000ff },
9999 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10000 0x00000000, 0xffffffff },
10001 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10002 0x00000000, 0x000000ff },
10003 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10004 0x00000000, 0xffffffff },
10005 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10006 0x00000000, 0xffffffff },
10007 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10008 0x00000000, 0xffffffff },
10009 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10010 0x00000000, 0xffffffff },
10011 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10012 0x00000000, 0xffffffff },
10013 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10014 0xffffffff, 0x00000000 },
10015 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10016 0xffffffff, 0x00000000 },
10018 /* Buffer Manager Control Registers. */
10019 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10020 0x00000000, 0x007fff80 },
10021 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10022 0x00000000, 0x007fffff },
10023 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10024 0x00000000, 0x0000003f },
10025 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10026 0x00000000, 0x000001ff },
10027 { BUFMGR_MB_HIGH_WATER, 0x0000,
10028 0x00000000, 0x000001ff },
10029 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10030 0xffffffff, 0x00000000 },
10031 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10032 0xffffffff, 0x00000000 },
10034 /* Mailbox Registers */
10035 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10036 0x00000000, 0x000001ff },
10037 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10038 0x00000000, 0x000001ff },
10039 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10040 0x00000000, 0x000007ff },
10041 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10042 0x00000000, 0x000001ff },
10044 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10047 is_5705 = is_5750 = 0;
10048 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10050 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10054 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10055 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10058 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10061 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10062 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10065 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10068 offset = (u32) reg_tbl[i].offset;
10069 read_mask = reg_tbl[i].read_mask;
10070 write_mask = reg_tbl[i].write_mask;
10072 /* Save the original register content */
10073 save_val = tr32(offset);
10075 /* Determine the read-only value. */
10076 read_val = save_val & read_mask;
10078 /* Write zero to the register, then make sure the read-only bits
10079 * are not changed and the read/write bits are all zeros.
10083 val = tr32(offset);
10085 /* Test the read-only and read/write bits. */
10086 if (((val & read_mask) != read_val) || (val & write_mask))
10089 /* Write ones to all the bits defined by RdMask and WrMask, then
10090 * make sure the read-only bits are not changed and the
10091 * read/write bits are all ones.
10093 tw32(offset, read_mask | write_mask);
10095 val = tr32(offset);
10097 /* Test the read-only bits. */
10098 if ((val & read_mask) != read_val)
10101 /* Test the read/write bits. */
10102 if ((val & write_mask) != write_mask)
10105 tw32(offset, save_val);
10111 if (netif_msg_hw(tp))
10112 printk(KERN_ERR PFX "Register test failed at offset %x\n",
10114 tw32(offset, save_val);
10118 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10120 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10124 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10125 for (j = 0; j < len; j += 4) {
10128 tg3_write_mem(tp, offset + j, test_pattern[i]);
10129 tg3_read_mem(tp, offset + j, &val);
10130 if (val != test_pattern[i])
10137 static int tg3_test_memory(struct tg3 *tp)
10139 static struct mem_entry {
10142 } mem_tbl_570x[] = {
10143 { 0x00000000, 0x00b50},
10144 { 0x00002000, 0x1c000},
10145 { 0xffffffff, 0x00000}
10146 }, mem_tbl_5705[] = {
10147 { 0x00000100, 0x0000c},
10148 { 0x00000200, 0x00008},
10149 { 0x00004000, 0x00800},
10150 { 0x00006000, 0x01000},
10151 { 0x00008000, 0x02000},
10152 { 0x00010000, 0x0e000},
10153 { 0xffffffff, 0x00000}
10154 }, mem_tbl_5755[] = {
10155 { 0x00000200, 0x00008},
10156 { 0x00004000, 0x00800},
10157 { 0x00006000, 0x00800},
10158 { 0x00008000, 0x02000},
10159 { 0x00010000, 0x0c000},
10160 { 0xffffffff, 0x00000}
10161 }, mem_tbl_5906[] = {
10162 { 0x00000200, 0x00008},
10163 { 0x00004000, 0x00400},
10164 { 0x00006000, 0x00400},
10165 { 0x00008000, 0x01000},
10166 { 0x00010000, 0x01000},
10167 { 0xffffffff, 0x00000}
10169 struct mem_entry *mem_tbl;
10173 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10174 mem_tbl = mem_tbl_5755;
10175 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10176 mem_tbl = mem_tbl_5906;
10177 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10178 mem_tbl = mem_tbl_5705;
10180 mem_tbl = mem_tbl_570x;
10182 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10183 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10184 mem_tbl[i].len)) != 0)
10191 #define TG3_MAC_LOOPBACK 0
10192 #define TG3_PHY_LOOPBACK 1
10194 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10196 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10197 u32 desc_idx, coal_now;
10198 struct sk_buff *skb, *rx_skb;
10201 int num_pkts, tx_len, rx_len, i, err;
10202 struct tg3_rx_buffer_desc *desc;
10203 struct tg3_napi *tnapi, *rnapi;
10204 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10206 if (tp->irq_cnt > 1) {
10207 tnapi = &tp->napi[1];
10208 rnapi = &tp->napi[1];
10210 tnapi = &tp->napi[0];
10211 rnapi = &tp->napi[0];
10213 coal_now = tnapi->coal_now | rnapi->coal_now;
10215 if (loopback_mode == TG3_MAC_LOOPBACK) {
10216 /* HW errata - mac loopback fails in some cases on 5780.
10217 * Normal traffic and PHY loopback are not affected by
10220 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10223 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10224 MAC_MODE_PORT_INT_LPBACK;
10225 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10226 mac_mode |= MAC_MODE_LINK_POLARITY;
10227 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10228 mac_mode |= MAC_MODE_PORT_MODE_MII;
10230 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10231 tw32(MAC_MODE, mac_mode);
10232 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10235 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10236 tg3_phy_fet_toggle_apd(tp, false);
10237 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10239 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10241 tg3_phy_toggle_automdix(tp, 0);
10243 tg3_writephy(tp, MII_BMCR, val);
10246 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10247 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10248 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10249 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
10250 mac_mode |= MAC_MODE_PORT_MODE_MII;
10252 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10254 /* reset to prevent losing 1st rx packet intermittently */
10255 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10256 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10258 tw32_f(MAC_RX_MODE, tp->rx_mode);
10260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10261 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10262 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10263 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10264 mac_mode |= MAC_MODE_LINK_POLARITY;
10265 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10266 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10268 tw32(MAC_MODE, mac_mode);
10276 skb = netdev_alloc_skb(tp->dev, tx_len);
10280 tx_data = skb_put(skb, tx_len);
10281 memcpy(tx_data, tp->dev->dev_addr, 6);
10282 memset(tx_data + 6, 0x0, 8);
10284 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10286 for (i = 14; i < tx_len; i++)
10287 tx_data[i] = (u8) (i & 0xff);
10289 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10291 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10296 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10300 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10305 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10306 tr32_mailbox(tnapi->prodmbox);
10310 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10311 for (i = 0; i < 25; i++) {
10312 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10317 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10318 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10319 if ((tx_idx == tnapi->tx_prod) &&
10320 (rx_idx == (rx_start_idx + num_pkts)))
10324 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10325 dev_kfree_skb(skb);
10327 if (tx_idx != tnapi->tx_prod)
10330 if (rx_idx != rx_start_idx + num_pkts)
10333 desc = &rnapi->rx_rcb[rx_start_idx];
10334 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10335 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10336 if (opaque_key != RXD_OPAQUE_RING_STD)
10339 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10340 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10343 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10344 if (rx_len != tx_len)
10347 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10349 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10350 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10352 for (i = 14; i < tx_len; i++) {
10353 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10358 /* tg3_free_rings will unmap and free the rx_skb */
10363 #define TG3_MAC_LOOPBACK_FAILED 1
10364 #define TG3_PHY_LOOPBACK_FAILED 2
10365 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10366 TG3_PHY_LOOPBACK_FAILED)
10368 static int tg3_test_loopback(struct tg3 *tp)
10373 if (!netif_running(tp->dev))
10374 return TG3_LOOPBACK_FAILED;
10376 err = tg3_reset_hw(tp, 1);
10378 return TG3_LOOPBACK_FAILED;
10380 /* Turn off gphy autopowerdown. */
10381 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10382 tg3_phy_toggle_apd(tp, false);
10384 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10388 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10390 /* Wait for up to 40 microseconds to acquire lock. */
10391 for (i = 0; i < 4; i++) {
10392 status = tr32(TG3_CPMU_MUTEX_GNT);
10393 if (status == CPMU_MUTEX_GNT_DRIVER)
10398 if (status != CPMU_MUTEX_GNT_DRIVER)
10399 return TG3_LOOPBACK_FAILED;
10401 /* Turn off link-based power management. */
10402 cpmuctrl = tr32(TG3_CPMU_CTRL);
10403 tw32(TG3_CPMU_CTRL,
10404 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10405 CPMU_CTRL_LINK_AWARE_MODE));
10408 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10409 err |= TG3_MAC_LOOPBACK_FAILED;
10411 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10412 tw32(TG3_CPMU_CTRL, cpmuctrl);
10414 /* Release the mutex */
10415 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10418 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10419 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10420 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10421 err |= TG3_PHY_LOOPBACK_FAILED;
10424 /* Re-enable gphy autopowerdown. */
10425 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10426 tg3_phy_toggle_apd(tp, true);
10431 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10434 struct tg3 *tp = netdev_priv(dev);
10436 if (tp->link_config.phy_is_low_power)
10437 tg3_set_power_state(tp, PCI_D0);
10439 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10441 if (tg3_test_nvram(tp) != 0) {
10442 etest->flags |= ETH_TEST_FL_FAILED;
10445 if (tg3_test_link(tp) != 0) {
10446 etest->flags |= ETH_TEST_FL_FAILED;
10449 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10450 int err, err2 = 0, irq_sync = 0;
10452 if (netif_running(dev)) {
10454 tg3_netif_stop(tp);
10458 tg3_full_lock(tp, irq_sync);
10460 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10461 err = tg3_nvram_lock(tp);
10462 tg3_halt_cpu(tp, RX_CPU_BASE);
10463 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10464 tg3_halt_cpu(tp, TX_CPU_BASE);
10466 tg3_nvram_unlock(tp);
10468 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10471 if (tg3_test_registers(tp) != 0) {
10472 etest->flags |= ETH_TEST_FL_FAILED;
10475 if (tg3_test_memory(tp) != 0) {
10476 etest->flags |= ETH_TEST_FL_FAILED;
10479 if ((data[4] = tg3_test_loopback(tp)) != 0)
10480 etest->flags |= ETH_TEST_FL_FAILED;
10482 tg3_full_unlock(tp);
10484 if (tg3_test_interrupt(tp) != 0) {
10485 etest->flags |= ETH_TEST_FL_FAILED;
10489 tg3_full_lock(tp, 0);
10491 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10492 if (netif_running(dev)) {
10493 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10494 err2 = tg3_restart_hw(tp, 1);
10496 tg3_netif_start(tp);
10499 tg3_full_unlock(tp);
10501 if (irq_sync && !err2)
10504 if (tp->link_config.phy_is_low_power)
10505 tg3_set_power_state(tp, PCI_D3hot);
10509 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10511 struct mii_ioctl_data *data = if_mii(ifr);
10512 struct tg3 *tp = netdev_priv(dev);
10515 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10516 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10518 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10523 data->phy_id = PHY_ADDR;
10526 case SIOCGMIIREG: {
10529 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10530 break; /* We have no PHY */
10532 if (tp->link_config.phy_is_low_power)
10535 spin_lock_bh(&tp->lock);
10536 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10537 spin_unlock_bh(&tp->lock);
10539 data->val_out = mii_regval;
10545 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10546 break; /* We have no PHY */
10548 if (!capable(CAP_NET_ADMIN))
10551 if (tp->link_config.phy_is_low_power)
10554 spin_lock_bh(&tp->lock);
10555 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10556 spin_unlock_bh(&tp->lock);
10564 return -EOPNOTSUPP;
10567 #if TG3_VLAN_TAG_USED
10568 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10570 struct tg3 *tp = netdev_priv(dev);
10572 if (!netif_running(dev)) {
10577 tg3_netif_stop(tp);
10579 tg3_full_lock(tp, 0);
10583 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10584 __tg3_set_rx_mode(dev);
10586 tg3_netif_start(tp);
10588 tg3_full_unlock(tp);
10592 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10594 struct tg3 *tp = netdev_priv(dev);
10596 memcpy(ec, &tp->coal, sizeof(*ec));
10600 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10602 struct tg3 *tp = netdev_priv(dev);
10603 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10604 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10606 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10607 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10608 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10609 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10610 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10613 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10614 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10615 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10616 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10617 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10618 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10619 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10620 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10621 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10622 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10625 /* No rx interrupts will be generated if both are zero */
10626 if ((ec->rx_coalesce_usecs == 0) &&
10627 (ec->rx_max_coalesced_frames == 0))
10630 /* No tx interrupts will be generated if both are zero */
10631 if ((ec->tx_coalesce_usecs == 0) &&
10632 (ec->tx_max_coalesced_frames == 0))
10635 /* Only copy relevant parameters, ignore all others. */
10636 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10637 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10638 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10639 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10640 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10641 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10642 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10643 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10644 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10646 if (netif_running(dev)) {
10647 tg3_full_lock(tp, 0);
10648 __tg3_set_coalesce(tp, &tp->coal);
10649 tg3_full_unlock(tp);
10654 static const struct ethtool_ops tg3_ethtool_ops = {
10655 .get_settings = tg3_get_settings,
10656 .set_settings = tg3_set_settings,
10657 .get_drvinfo = tg3_get_drvinfo,
10658 .get_regs_len = tg3_get_regs_len,
10659 .get_regs = tg3_get_regs,
10660 .get_wol = tg3_get_wol,
10661 .set_wol = tg3_set_wol,
10662 .get_msglevel = tg3_get_msglevel,
10663 .set_msglevel = tg3_set_msglevel,
10664 .nway_reset = tg3_nway_reset,
10665 .get_link = ethtool_op_get_link,
10666 .get_eeprom_len = tg3_get_eeprom_len,
10667 .get_eeprom = tg3_get_eeprom,
10668 .set_eeprom = tg3_set_eeprom,
10669 .get_ringparam = tg3_get_ringparam,
10670 .set_ringparam = tg3_set_ringparam,
10671 .get_pauseparam = tg3_get_pauseparam,
10672 .set_pauseparam = tg3_set_pauseparam,
10673 .get_rx_csum = tg3_get_rx_csum,
10674 .set_rx_csum = tg3_set_rx_csum,
10675 .set_tx_csum = tg3_set_tx_csum,
10676 .set_sg = ethtool_op_set_sg,
10677 .set_tso = tg3_set_tso,
10678 .self_test = tg3_self_test,
10679 .get_strings = tg3_get_strings,
10680 .phys_id = tg3_phys_id,
10681 .get_ethtool_stats = tg3_get_ethtool_stats,
10682 .get_coalesce = tg3_get_coalesce,
10683 .set_coalesce = tg3_set_coalesce,
10684 .get_sset_count = tg3_get_sset_count,
10687 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10689 u32 cursize, val, magic;
10691 tp->nvram_size = EEPROM_CHIP_SIZE;
10693 if (tg3_nvram_read(tp, 0, &magic) != 0)
10696 if ((magic != TG3_EEPROM_MAGIC) &&
10697 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10698 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10702 * Size the chip by reading offsets at increasing powers of two.
10703 * When we encounter our validation signature, we know the addressing
10704 * has wrapped around, and thus have our chip size.
10708 while (cursize < tp->nvram_size) {
10709 if (tg3_nvram_read(tp, cursize, &val) != 0)
10718 tp->nvram_size = cursize;
10721 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10725 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
10726 tg3_nvram_read(tp, 0, &val) != 0)
10729 /* Selfboot format */
10730 if (val != TG3_EEPROM_MAGIC) {
10731 tg3_get_eeprom_size(tp);
10735 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10737 /* This is confusing. We want to operate on the
10738 * 16-bit value at offset 0xf2. The tg3_nvram_read()
10739 * call will read from NVRAM and byteswap the data
10740 * according to the byteswapping settings for all
10741 * other register accesses. This ensures the data we
10742 * want will always reside in the lower 16-bits.
10743 * However, the data in NVRAM is in LE format, which
10744 * means the data from the NVRAM read will always be
10745 * opposite the endianness of the CPU. The 16-bit
10746 * byteswap then brings the data to CPU endianness.
10748 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
10752 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10755 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10759 nvcfg1 = tr32(NVRAM_CFG1);
10760 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10761 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10763 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10764 tw32(NVRAM_CFG1, nvcfg1);
10767 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10768 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10769 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10770 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10771 tp->nvram_jedecnum = JEDEC_ATMEL;
10772 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10773 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10775 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10776 tp->nvram_jedecnum = JEDEC_ATMEL;
10777 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10779 case FLASH_VENDOR_ATMEL_EEPROM:
10780 tp->nvram_jedecnum = JEDEC_ATMEL;
10781 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10782 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10784 case FLASH_VENDOR_ST:
10785 tp->nvram_jedecnum = JEDEC_ST;
10786 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10787 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10789 case FLASH_VENDOR_SAIFUN:
10790 tp->nvram_jedecnum = JEDEC_SAIFUN;
10791 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10793 case FLASH_VENDOR_SST_SMALL:
10794 case FLASH_VENDOR_SST_LARGE:
10795 tp->nvram_jedecnum = JEDEC_SST;
10796 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10800 tp->nvram_jedecnum = JEDEC_ATMEL;
10801 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10802 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10806 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10810 nvcfg1 = tr32(NVRAM_CFG1);
10812 /* NVRAM protection for TPM */
10813 if (nvcfg1 & (1 << 27))
10814 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10816 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10817 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10818 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10819 tp->nvram_jedecnum = JEDEC_ATMEL;
10820 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10822 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10823 tp->nvram_jedecnum = JEDEC_ATMEL;
10824 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10825 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10827 case FLASH_5752VENDOR_ST_M45PE10:
10828 case FLASH_5752VENDOR_ST_M45PE20:
10829 case FLASH_5752VENDOR_ST_M45PE40:
10830 tp->nvram_jedecnum = JEDEC_ST;
10831 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10832 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10836 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10837 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10838 case FLASH_5752PAGE_SIZE_256:
10839 tp->nvram_pagesize = 256;
10841 case FLASH_5752PAGE_SIZE_512:
10842 tp->nvram_pagesize = 512;
10844 case FLASH_5752PAGE_SIZE_1K:
10845 tp->nvram_pagesize = 1024;
10847 case FLASH_5752PAGE_SIZE_2K:
10848 tp->nvram_pagesize = 2048;
10850 case FLASH_5752PAGE_SIZE_4K:
10851 tp->nvram_pagesize = 4096;
10853 case FLASH_5752PAGE_SIZE_264:
10854 tp->nvram_pagesize = 264;
10858 /* For eeprom, set pagesize to maximum eeprom size */
10859 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10861 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10862 tw32(NVRAM_CFG1, nvcfg1);
10866 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10868 u32 nvcfg1, protect = 0;
10870 nvcfg1 = tr32(NVRAM_CFG1);
10872 /* NVRAM protection for TPM */
10873 if (nvcfg1 & (1 << 27)) {
10874 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10878 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10880 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10881 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10882 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10883 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10884 tp->nvram_jedecnum = JEDEC_ATMEL;
10885 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10886 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10887 tp->nvram_pagesize = 264;
10888 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10889 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10890 tp->nvram_size = (protect ? 0x3e200 :
10891 TG3_NVRAM_SIZE_512KB);
10892 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10893 tp->nvram_size = (protect ? 0x1f200 :
10894 TG3_NVRAM_SIZE_256KB);
10896 tp->nvram_size = (protect ? 0x1f200 :
10897 TG3_NVRAM_SIZE_128KB);
10899 case FLASH_5752VENDOR_ST_M45PE10:
10900 case FLASH_5752VENDOR_ST_M45PE20:
10901 case FLASH_5752VENDOR_ST_M45PE40:
10902 tp->nvram_jedecnum = JEDEC_ST;
10903 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10904 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10905 tp->nvram_pagesize = 256;
10906 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10907 tp->nvram_size = (protect ?
10908 TG3_NVRAM_SIZE_64KB :
10909 TG3_NVRAM_SIZE_128KB);
10910 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10911 tp->nvram_size = (protect ?
10912 TG3_NVRAM_SIZE_64KB :
10913 TG3_NVRAM_SIZE_256KB);
10915 tp->nvram_size = (protect ?
10916 TG3_NVRAM_SIZE_128KB :
10917 TG3_NVRAM_SIZE_512KB);
10922 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10926 nvcfg1 = tr32(NVRAM_CFG1);
10928 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10929 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10930 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10931 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10932 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10933 tp->nvram_jedecnum = JEDEC_ATMEL;
10934 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10935 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10937 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10938 tw32(NVRAM_CFG1, nvcfg1);
10940 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10941 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10942 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10943 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10944 tp->nvram_jedecnum = JEDEC_ATMEL;
10945 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10946 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10947 tp->nvram_pagesize = 264;
10949 case FLASH_5752VENDOR_ST_M45PE10:
10950 case FLASH_5752VENDOR_ST_M45PE20:
10951 case FLASH_5752VENDOR_ST_M45PE40:
10952 tp->nvram_jedecnum = JEDEC_ST;
10953 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10954 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10955 tp->nvram_pagesize = 256;
10960 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10962 u32 nvcfg1, protect = 0;
10964 nvcfg1 = tr32(NVRAM_CFG1);
10966 /* NVRAM protection for TPM */
10967 if (nvcfg1 & (1 << 27)) {
10968 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10972 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10974 case FLASH_5761VENDOR_ATMEL_ADB021D:
10975 case FLASH_5761VENDOR_ATMEL_ADB041D:
10976 case FLASH_5761VENDOR_ATMEL_ADB081D:
10977 case FLASH_5761VENDOR_ATMEL_ADB161D:
10978 case FLASH_5761VENDOR_ATMEL_MDB021D:
10979 case FLASH_5761VENDOR_ATMEL_MDB041D:
10980 case FLASH_5761VENDOR_ATMEL_MDB081D:
10981 case FLASH_5761VENDOR_ATMEL_MDB161D:
10982 tp->nvram_jedecnum = JEDEC_ATMEL;
10983 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10984 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10985 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10986 tp->nvram_pagesize = 256;
10988 case FLASH_5761VENDOR_ST_A_M45PE20:
10989 case FLASH_5761VENDOR_ST_A_M45PE40:
10990 case FLASH_5761VENDOR_ST_A_M45PE80:
10991 case FLASH_5761VENDOR_ST_A_M45PE16:
10992 case FLASH_5761VENDOR_ST_M_M45PE20:
10993 case FLASH_5761VENDOR_ST_M_M45PE40:
10994 case FLASH_5761VENDOR_ST_M_M45PE80:
10995 case FLASH_5761VENDOR_ST_M_M45PE16:
10996 tp->nvram_jedecnum = JEDEC_ST;
10997 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10998 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10999 tp->nvram_pagesize = 256;
11004 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11007 case FLASH_5761VENDOR_ATMEL_ADB161D:
11008 case FLASH_5761VENDOR_ATMEL_MDB161D:
11009 case FLASH_5761VENDOR_ST_A_M45PE16:
11010 case FLASH_5761VENDOR_ST_M_M45PE16:
11011 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11013 case FLASH_5761VENDOR_ATMEL_ADB081D:
11014 case FLASH_5761VENDOR_ATMEL_MDB081D:
11015 case FLASH_5761VENDOR_ST_A_M45PE80:
11016 case FLASH_5761VENDOR_ST_M_M45PE80:
11017 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11019 case FLASH_5761VENDOR_ATMEL_ADB041D:
11020 case FLASH_5761VENDOR_ATMEL_MDB041D:
11021 case FLASH_5761VENDOR_ST_A_M45PE40:
11022 case FLASH_5761VENDOR_ST_M_M45PE40:
11023 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11025 case FLASH_5761VENDOR_ATMEL_ADB021D:
11026 case FLASH_5761VENDOR_ATMEL_MDB021D:
11027 case FLASH_5761VENDOR_ST_A_M45PE20:
11028 case FLASH_5761VENDOR_ST_M_M45PE20:
11029 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11035 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11037 tp->nvram_jedecnum = JEDEC_ATMEL;
11038 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11039 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11042 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11046 nvcfg1 = tr32(NVRAM_CFG1);
11048 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11049 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11050 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11051 tp->nvram_jedecnum = JEDEC_ATMEL;
11052 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11053 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11055 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11056 tw32(NVRAM_CFG1, nvcfg1);
11058 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11059 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11060 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11061 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11062 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11063 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11064 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11065 tp->nvram_jedecnum = JEDEC_ATMEL;
11066 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11067 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11069 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11070 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11071 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11072 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11073 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11075 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11076 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11077 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11079 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11080 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11081 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11085 case FLASH_5752VENDOR_ST_M45PE10:
11086 case FLASH_5752VENDOR_ST_M45PE20:
11087 case FLASH_5752VENDOR_ST_M45PE40:
11088 tp->nvram_jedecnum = JEDEC_ST;
11089 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11090 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11092 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11093 case FLASH_5752VENDOR_ST_M45PE10:
11094 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11096 case FLASH_5752VENDOR_ST_M45PE20:
11097 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11099 case FLASH_5752VENDOR_ST_M45PE40:
11100 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11105 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11109 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11110 case FLASH_5752PAGE_SIZE_256:
11111 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11112 tp->nvram_pagesize = 256;
11114 case FLASH_5752PAGE_SIZE_512:
11115 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11116 tp->nvram_pagesize = 512;
11118 case FLASH_5752PAGE_SIZE_1K:
11119 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11120 tp->nvram_pagesize = 1024;
11122 case FLASH_5752PAGE_SIZE_2K:
11123 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11124 tp->nvram_pagesize = 2048;
11126 case FLASH_5752PAGE_SIZE_4K:
11127 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11128 tp->nvram_pagesize = 4096;
11130 case FLASH_5752PAGE_SIZE_264:
11131 tp->nvram_pagesize = 264;
11133 case FLASH_5752PAGE_SIZE_528:
11134 tp->nvram_pagesize = 528;
11139 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11140 static void __devinit tg3_nvram_init(struct tg3 *tp)
11142 tw32_f(GRC_EEPROM_ADDR,
11143 (EEPROM_ADDR_FSM_RESET |
11144 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11145 EEPROM_ADDR_CLKPERD_SHIFT)));
11149 /* Enable seeprom accesses. */
11150 tw32_f(GRC_LOCAL_CTRL,
11151 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11154 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11155 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11156 tp->tg3_flags |= TG3_FLAG_NVRAM;
11158 if (tg3_nvram_lock(tp)) {
11159 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
11160 "tg3_nvram_init failed.\n", tp->dev->name);
11163 tg3_enable_nvram_access(tp);
11165 tp->nvram_size = 0;
11167 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11168 tg3_get_5752_nvram_info(tp);
11169 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11170 tg3_get_5755_nvram_info(tp);
11171 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11174 tg3_get_5787_nvram_info(tp);
11175 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11176 tg3_get_5761_nvram_info(tp);
11177 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11178 tg3_get_5906_nvram_info(tp);
11179 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11180 tg3_get_57780_nvram_info(tp);
11182 tg3_get_nvram_info(tp);
11184 if (tp->nvram_size == 0)
11185 tg3_get_nvram_size(tp);
11187 tg3_disable_nvram_access(tp);
11188 tg3_nvram_unlock(tp);
11191 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11193 tg3_get_eeprom_size(tp);
11197 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11198 u32 offset, u32 len, u8 *buf)
11203 for (i = 0; i < len; i += 4) {
11209 memcpy(&data, buf + i, 4);
11212 * The SEEPROM interface expects the data to always be opposite
11213 * the native endian format. We accomplish this by reversing
11214 * all the operations that would have been performed on the
11215 * data from a call to tg3_nvram_read_be32().
11217 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11219 val = tr32(GRC_EEPROM_ADDR);
11220 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11222 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11224 tw32(GRC_EEPROM_ADDR, val |
11225 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11226 (addr & EEPROM_ADDR_ADDR_MASK) |
11227 EEPROM_ADDR_START |
11228 EEPROM_ADDR_WRITE);
11230 for (j = 0; j < 1000; j++) {
11231 val = tr32(GRC_EEPROM_ADDR);
11233 if (val & EEPROM_ADDR_COMPLETE)
11237 if (!(val & EEPROM_ADDR_COMPLETE)) {
11246 /* offset and length are dword aligned */
11247 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11251 u32 pagesize = tp->nvram_pagesize;
11252 u32 pagemask = pagesize - 1;
11256 tmp = kmalloc(pagesize, GFP_KERNEL);
11262 u32 phy_addr, page_off, size;
11264 phy_addr = offset & ~pagemask;
11266 for (j = 0; j < pagesize; j += 4) {
11267 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11268 (__be32 *) (tmp + j));
11275 page_off = offset & pagemask;
11282 memcpy(tmp + page_off, buf, size);
11284 offset = offset + (pagesize - page_off);
11286 tg3_enable_nvram_access(tp);
11289 * Before we can erase the flash page, we need
11290 * to issue a special "write enable" command.
11292 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11294 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11297 /* Erase the target page */
11298 tw32(NVRAM_ADDR, phy_addr);
11300 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11301 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11303 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11306 /* Issue another write enable to start the write. */
11307 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11309 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11312 for (j = 0; j < pagesize; j += 4) {
11315 data = *((__be32 *) (tmp + j));
11317 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11319 tw32(NVRAM_ADDR, phy_addr + j);
11321 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11325 nvram_cmd |= NVRAM_CMD_FIRST;
11326 else if (j == (pagesize - 4))
11327 nvram_cmd |= NVRAM_CMD_LAST;
11329 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11336 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11337 tg3_nvram_exec_cmd(tp, nvram_cmd);
11344 /* offset and length are dword aligned */
11345 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11350 for (i = 0; i < len; i += 4, offset += 4) {
11351 u32 page_off, phy_addr, nvram_cmd;
11354 memcpy(&data, buf + i, 4);
11355 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11357 page_off = offset % tp->nvram_pagesize;
11359 phy_addr = tg3_nvram_phys_addr(tp, offset);
11361 tw32(NVRAM_ADDR, phy_addr);
11363 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11365 if ((page_off == 0) || (i == 0))
11366 nvram_cmd |= NVRAM_CMD_FIRST;
11367 if (page_off == (tp->nvram_pagesize - 4))
11368 nvram_cmd |= NVRAM_CMD_LAST;
11370 if (i == (len - 4))
11371 nvram_cmd |= NVRAM_CMD_LAST;
11373 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11374 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11375 (tp->nvram_jedecnum == JEDEC_ST) &&
11376 (nvram_cmd & NVRAM_CMD_FIRST)) {
11378 if ((ret = tg3_nvram_exec_cmd(tp,
11379 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11384 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11385 /* We always do complete word writes to eeprom. */
11386 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11389 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11395 /* offset and length are dword aligned */
11396 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11400 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11401 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11402 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11406 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11407 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11412 ret = tg3_nvram_lock(tp);
11416 tg3_enable_nvram_access(tp);
11417 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11418 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11419 tw32(NVRAM_WRITE1, 0x406);
11421 grc_mode = tr32(GRC_MODE);
11422 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11424 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11425 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11427 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11431 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11435 grc_mode = tr32(GRC_MODE);
11436 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11438 tg3_disable_nvram_access(tp);
11439 tg3_nvram_unlock(tp);
11442 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11443 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11450 struct subsys_tbl_ent {
11451 u16 subsys_vendor, subsys_devid;
11455 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11456 /* Broadcom boards. */
11457 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11458 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11459 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11460 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11461 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11462 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11463 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11464 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11465 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11466 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11467 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11470 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11471 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11472 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11473 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11474 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11477 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11478 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11479 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11480 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11482 /* Compaq boards. */
11483 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11484 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11485 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11486 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11487 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11490 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11493 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11497 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11498 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11499 tp->pdev->subsystem_vendor) &&
11500 (subsys_id_to_phy_id[i].subsys_devid ==
11501 tp->pdev->subsystem_device))
11502 return &subsys_id_to_phy_id[i];
11507 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11512 /* On some early chips the SRAM cannot be accessed in D3hot state,
11513 * so need make sure we're in D0.
11515 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11516 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11517 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11520 /* Make sure register accesses (indirect or otherwise)
11521 * will function correctly.
11523 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11524 tp->misc_host_ctrl);
11526 /* The memory arbiter has to be enabled in order for SRAM accesses
11527 * to succeed. Normally on powerup the tg3 chip firmware will make
11528 * sure it is enabled, but other entities such as system netboot
11529 * code might disable it.
11531 val = tr32(MEMARB_MODE);
11532 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11534 tp->phy_id = PHY_ID_INVALID;
11535 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11537 /* Assume an onboard device and WOL capable by default. */
11538 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11540 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11541 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11542 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11543 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11545 val = tr32(VCPU_CFGSHDW);
11546 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11547 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11548 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11549 (val & VCPU_CFGSHDW_WOL_MAGPKT))
11550 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11554 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11555 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11556 u32 nic_cfg, led_cfg;
11557 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11558 int eeprom_phy_serdes = 0;
11560 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11561 tp->nic_sram_data_cfg = nic_cfg;
11563 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11564 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11565 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11566 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11567 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11568 (ver > 0) && (ver < 0x100))
11569 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11572 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11574 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11575 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11576 eeprom_phy_serdes = 1;
11578 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11579 if (nic_phy_id != 0) {
11580 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11581 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11583 eeprom_phy_id = (id1 >> 16) << 10;
11584 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11585 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11589 tp->phy_id = eeprom_phy_id;
11590 if (eeprom_phy_serdes) {
11591 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11592 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11594 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11597 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11598 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11599 SHASTA_EXT_LED_MODE_MASK);
11601 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11605 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11606 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11609 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11610 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11613 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11614 tp->led_ctrl = LED_CTRL_MODE_MAC;
11616 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11617 * read on some older 5700/5701 bootcode.
11619 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11621 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11623 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11627 case SHASTA_EXT_LED_SHARED:
11628 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11629 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11630 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11631 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11632 LED_CTRL_MODE_PHY_2);
11635 case SHASTA_EXT_LED_MAC:
11636 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11639 case SHASTA_EXT_LED_COMBO:
11640 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11641 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11642 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11643 LED_CTRL_MODE_PHY_2);
11648 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11649 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11650 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11651 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11653 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11654 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11656 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11657 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11658 if ((tp->pdev->subsystem_vendor ==
11659 PCI_VENDOR_ID_ARIMA) &&
11660 (tp->pdev->subsystem_device == 0x205a ||
11661 tp->pdev->subsystem_device == 0x2063))
11662 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11664 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11665 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11668 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11669 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11670 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11671 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11674 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11675 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11676 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11678 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11679 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11680 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11682 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11683 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11684 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11686 if (cfg2 & (1 << 17))
11687 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11689 /* serdes signal pre-emphasis in register 0x590 set by */
11690 /* bootcode if bit 18 is set */
11691 if (cfg2 & (1 << 18))
11692 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11694 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11695 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11696 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11697 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11699 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11702 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11703 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11704 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11707 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11708 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11709 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11710 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11711 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11712 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11715 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11716 device_set_wakeup_enable(&tp->pdev->dev,
11717 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11720 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11725 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11726 tw32(OTP_CTRL, cmd);
11728 /* Wait for up to 1 ms for command to execute. */
11729 for (i = 0; i < 100; i++) {
11730 val = tr32(OTP_STATUS);
11731 if (val & OTP_STATUS_CMD_DONE)
11736 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11739 /* Read the gphy configuration from the OTP region of the chip. The gphy
11740 * configuration is a 32-bit value that straddles the alignment boundary.
11741 * We do two 32-bit reads and then shift and merge the results.
11743 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11745 u32 bhalf_otp, thalf_otp;
11747 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11749 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11752 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11754 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11757 thalf_otp = tr32(OTP_READ_DATA);
11759 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11761 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11764 bhalf_otp = tr32(OTP_READ_DATA);
11766 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11769 static int __devinit tg3_phy_probe(struct tg3 *tp)
11771 u32 hw_phy_id_1, hw_phy_id_2;
11772 u32 hw_phy_id, hw_phy_id_masked;
11775 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11776 return tg3_phy_init(tp);
11778 /* Reading the PHY ID register can conflict with ASF
11779 * firmware access to the PHY hardware.
11782 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11783 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11784 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11786 /* Now read the physical PHY_ID from the chip and verify
11787 * that it is sane. If it doesn't look good, we fall back
11788 * to either the hard-coded table based PHY_ID and failing
11789 * that the value found in the eeprom area.
11791 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11792 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11794 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11795 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11796 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11798 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11801 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11802 tp->phy_id = hw_phy_id;
11803 if (hw_phy_id_masked == PHY_ID_BCM8002)
11804 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11806 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11808 if (tp->phy_id != PHY_ID_INVALID) {
11809 /* Do nothing, phy ID already set up in
11810 * tg3_get_eeprom_hw_cfg().
11813 struct subsys_tbl_ent *p;
11815 /* No eeprom signature? Try the hardcoded
11816 * subsys device table.
11818 p = lookup_by_subsys(tp);
11822 tp->phy_id = p->phy_id;
11824 tp->phy_id == PHY_ID_BCM8002)
11825 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11829 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11830 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11831 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11832 u32 bmsr, adv_reg, tg3_ctrl, mask;
11834 tg3_readphy(tp, MII_BMSR, &bmsr);
11835 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11836 (bmsr & BMSR_LSTATUS))
11837 goto skip_phy_reset;
11839 err = tg3_phy_reset(tp);
11843 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11844 ADVERTISE_100HALF | ADVERTISE_100FULL |
11845 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11847 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11848 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11849 MII_TG3_CTRL_ADV_1000_FULL);
11850 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11851 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11852 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11853 MII_TG3_CTRL_ENABLE_AS_MASTER);
11856 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11857 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11858 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11859 if (!tg3_copper_is_advertising_all(tp, mask)) {
11860 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11862 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11863 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11865 tg3_writephy(tp, MII_BMCR,
11866 BMCR_ANENABLE | BMCR_ANRESTART);
11868 tg3_phy_set_wirespeed(tp);
11870 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11871 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11872 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11876 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11877 err = tg3_init_5401phy_dsp(tp);
11882 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11883 err = tg3_init_5401phy_dsp(tp);
11886 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11887 tp->link_config.advertising =
11888 (ADVERTISED_1000baseT_Half |
11889 ADVERTISED_1000baseT_Full |
11890 ADVERTISED_Autoneg |
11892 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11893 tp->link_config.advertising &=
11894 ~(ADVERTISED_1000baseT_Half |
11895 ADVERTISED_1000baseT_Full);
11900 static void __devinit tg3_read_partno(struct tg3 *tp)
11902 unsigned char vpd_data[256]; /* in little-endian format */
11906 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11907 tg3_nvram_read(tp, 0x0, &magic))
11908 goto out_not_found;
11910 if (magic == TG3_EEPROM_MAGIC) {
11911 for (i = 0; i < 256; i += 4) {
11914 /* The data is in little-endian format in NVRAM.
11915 * Use the big-endian read routines to preserve
11916 * the byte order as it exists in NVRAM.
11918 if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp))
11919 goto out_not_found;
11921 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
11926 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11927 for (i = 0; i < 256; i += 4) {
11932 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11934 while (j++ < 100) {
11935 pci_read_config_word(tp->pdev, vpd_cap +
11936 PCI_VPD_ADDR, &tmp16);
11937 if (tmp16 & 0x8000)
11941 if (!(tmp16 & 0x8000))
11942 goto out_not_found;
11944 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11946 v = cpu_to_le32(tmp);
11947 memcpy(&vpd_data[i], &v, sizeof(v));
11951 /* Now parse and find the part number. */
11952 for (i = 0; i < 254; ) {
11953 unsigned char val = vpd_data[i];
11954 unsigned int block_end;
11956 if (val == 0x82 || val == 0x91) {
11959 (vpd_data[i + 2] << 8)));
11964 goto out_not_found;
11966 block_end = (i + 3 +
11968 (vpd_data[i + 2] << 8)));
11971 if (block_end > 256)
11972 goto out_not_found;
11974 while (i < (block_end - 2)) {
11975 if (vpd_data[i + 0] == 'P' &&
11976 vpd_data[i + 1] == 'N') {
11977 int partno_len = vpd_data[i + 2];
11980 if (partno_len > 24 || (partno_len + i) > 256)
11981 goto out_not_found;
11983 memcpy(tp->board_part_number,
11984 &vpd_data[i], partno_len);
11989 i += 3 + vpd_data[i + 2];
11992 /* Part number not found. */
11993 goto out_not_found;
11997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11998 strcpy(tp->board_part_number, "BCM95906");
11999 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12000 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12001 strcpy(tp->board_part_number, "BCM57780");
12002 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12003 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12004 strcpy(tp->board_part_number, "BCM57760");
12005 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12006 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12007 strcpy(tp->board_part_number, "BCM57790");
12008 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12009 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12010 strcpy(tp->board_part_number, "BCM57788");
12012 strcpy(tp->board_part_number, "none");
12015 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12019 if (tg3_nvram_read(tp, offset, &val) ||
12020 (val & 0xfc000000) != 0x0c000000 ||
12021 tg3_nvram_read(tp, offset + 4, &val) ||
12028 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12030 u32 val, offset, start, ver_offset;
12032 bool newver = false;
12034 if (tg3_nvram_read(tp, 0xc, &offset) ||
12035 tg3_nvram_read(tp, 0x4, &start))
12038 offset = tg3_nvram_logical_addr(tp, offset);
12040 if (tg3_nvram_read(tp, offset, &val))
12043 if ((val & 0xfc000000) == 0x0c000000) {
12044 if (tg3_nvram_read(tp, offset + 4, &val))
12052 if (tg3_nvram_read(tp, offset + 8, &ver_offset))
12055 offset = offset + ver_offset - start;
12056 for (i = 0; i < 16; i += 4) {
12058 if (tg3_nvram_read_be32(tp, offset + i, &v))
12061 memcpy(tp->fw_ver + i, &v, sizeof(v));
12066 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12069 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12070 TG3_NVM_BCVER_MAJSFT;
12071 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12072 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
12076 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12078 u32 val, major, minor;
12080 /* Use native endian representation */
12081 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12084 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12085 TG3_NVM_HWSB_CFG1_MAJSFT;
12086 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12087 TG3_NVM_HWSB_CFG1_MINSFT;
12089 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12092 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12094 u32 offset, major, minor, build;
12096 tp->fw_ver[0] = 's';
12097 tp->fw_ver[1] = 'b';
12098 tp->fw_ver[2] = '\0';
12100 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12103 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12104 case TG3_EEPROM_SB_REVISION_0:
12105 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12107 case TG3_EEPROM_SB_REVISION_2:
12108 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12110 case TG3_EEPROM_SB_REVISION_3:
12111 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12117 if (tg3_nvram_read(tp, offset, &val))
12120 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12121 TG3_EEPROM_SB_EDH_BLD_SHFT;
12122 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12123 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12124 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12126 if (minor > 99 || build > 26)
12129 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
12132 tp->fw_ver[8] = 'a' + build - 1;
12133 tp->fw_ver[9] = '\0';
12137 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12139 u32 val, offset, start;
12142 for (offset = TG3_NVM_DIR_START;
12143 offset < TG3_NVM_DIR_END;
12144 offset += TG3_NVM_DIRENT_SIZE) {
12145 if (tg3_nvram_read(tp, offset, &val))
12148 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12152 if (offset == TG3_NVM_DIR_END)
12155 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12156 start = 0x08000000;
12157 else if (tg3_nvram_read(tp, offset - 4, &start))
12160 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12161 !tg3_fw_img_is_valid(tp, offset) ||
12162 tg3_nvram_read(tp, offset + 8, &val))
12165 offset += val - start;
12167 vlen = strlen(tp->fw_ver);
12169 tp->fw_ver[vlen++] = ',';
12170 tp->fw_ver[vlen++] = ' ';
12172 for (i = 0; i < 4; i++) {
12174 if (tg3_nvram_read_be32(tp, offset, &v))
12177 offset += sizeof(v);
12179 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12180 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12184 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12189 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12194 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12195 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12198 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12199 if (apedata != APE_SEG_SIG_MAGIC)
12202 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12203 if (!(apedata & APE_FW_STATUS_READY))
12206 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12208 vlen = strlen(tp->fw_ver);
12210 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
12211 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12212 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12213 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12214 (apedata & APE_FW_VERSION_BLDMSK));
12217 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12221 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12222 tp->fw_ver[0] = 's';
12223 tp->fw_ver[1] = 'b';
12224 tp->fw_ver[2] = '\0';
12229 if (tg3_nvram_read(tp, 0, &val))
12232 if (val == TG3_EEPROM_MAGIC)
12233 tg3_read_bc_ver(tp);
12234 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12235 tg3_read_sb_ver(tp, val);
12236 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12237 tg3_read_hwsb_ver(tp);
12241 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12242 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
12245 tg3_read_mgmtfw_ver(tp);
12247 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12250 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12252 static int __devinit tg3_get_invariants(struct tg3 *tp)
12254 static struct pci_device_id write_reorder_chipsets[] = {
12255 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12256 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12257 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12258 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12259 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12260 PCI_DEVICE_ID_VIA_8385_0) },
12264 u32 pci_state_reg, grc_misc_cfg;
12269 /* Force memory write invalidate off. If we leave it on,
12270 * then on 5700_BX chips we have to enable a workaround.
12271 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12272 * to match the cacheline size. The Broadcom driver have this
12273 * workaround but turns MWI off all the times so never uses
12274 * it. This seems to suggest that the workaround is insufficient.
12276 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12277 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12278 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12280 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12281 * has the register indirect write enable bit set before
12282 * we try to access any of the MMIO registers. It is also
12283 * critical that the PCI-X hw workaround situation is decided
12284 * before that as well.
12286 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12289 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12290 MISC_HOST_CTRL_CHIPREV_SHIFT);
12291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12292 u32 prod_id_asic_rev;
12294 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12295 &prod_id_asic_rev);
12296 tp->pci_chip_rev_id = prod_id_asic_rev;
12299 /* Wrong chip ID in 5752 A0. This code can be removed later
12300 * as A0 is not in production.
12302 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12303 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12305 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12306 * we need to disable memory and use config. cycles
12307 * only to access all registers. The 5702/03 chips
12308 * can mistakenly decode the special cycles from the
12309 * ICH chipsets as memory write cycles, causing corruption
12310 * of register and memory space. Only certain ICH bridges
12311 * will drive special cycles with non-zero data during the
12312 * address phase which can fall within the 5703's address
12313 * range. This is not an ICH bug as the PCI spec allows
12314 * non-zero address during special cycles. However, only
12315 * these ICH bridges are known to drive non-zero addresses
12316 * during special cycles.
12318 * Since special cycles do not cross PCI bridges, we only
12319 * enable this workaround if the 5703 is on the secondary
12320 * bus of these ICH bridges.
12322 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12323 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12324 static struct tg3_dev_id {
12328 } ich_chipsets[] = {
12329 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12331 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12333 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12335 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12339 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12340 struct pci_dev *bridge = NULL;
12342 while (pci_id->vendor != 0) {
12343 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12349 if (pci_id->rev != PCI_ANY_ID) {
12350 if (bridge->revision > pci_id->rev)
12353 if (bridge->subordinate &&
12354 (bridge->subordinate->number ==
12355 tp->pdev->bus->number)) {
12357 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12358 pci_dev_put(bridge);
12364 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12365 static struct tg3_dev_id {
12368 } bridge_chipsets[] = {
12369 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12370 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12373 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12374 struct pci_dev *bridge = NULL;
12376 while (pci_id->vendor != 0) {
12377 bridge = pci_get_device(pci_id->vendor,
12384 if (bridge->subordinate &&
12385 (bridge->subordinate->number <=
12386 tp->pdev->bus->number) &&
12387 (bridge->subordinate->subordinate >=
12388 tp->pdev->bus->number)) {
12389 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12390 pci_dev_put(bridge);
12396 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12397 * DMA addresses > 40-bit. This bridge may have other additional
12398 * 57xx devices behind it in some 4-port NIC designs for example.
12399 * Any tg3 device found behind the bridge will also need the 40-bit
12402 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12403 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12404 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12405 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12406 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12409 struct pci_dev *bridge = NULL;
12412 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12413 PCI_DEVICE_ID_SERVERWORKS_EPB,
12415 if (bridge && bridge->subordinate &&
12416 (bridge->subordinate->number <=
12417 tp->pdev->bus->number) &&
12418 (bridge->subordinate->subordinate >=
12419 tp->pdev->bus->number)) {
12420 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12421 pci_dev_put(bridge);
12427 /* Initialize misc host control in PCI block. */
12428 tp->misc_host_ctrl |= (misc_ctrl_reg &
12429 MISC_HOST_CTRL_CHIPREV);
12430 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12431 tp->misc_host_ctrl);
12433 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12434 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12435 tp->pdev_peer = tg3_find_peer(tp);
12437 /* Intentionally exclude ASIC_REV_5906 */
12438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12439 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12440 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12443 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12444 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12446 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12447 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12448 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12449 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12450 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12451 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12453 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12454 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12455 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12457 /* 5700 B0 chips do not support checksumming correctly due
12458 * to hardware bugs.
12460 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12461 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12463 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12464 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12465 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12466 tp->dev->features |= NETIF_F_IPV6_CSUM;
12469 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12470 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12471 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12472 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12473 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12474 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12475 tp->pdev_peer == tp->pdev))
12476 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12478 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12480 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12481 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12483 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12484 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12486 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12487 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12493 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12494 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12495 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
12497 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12500 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12501 if (tp->pcie_cap != 0) {
12504 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12506 pcie_set_readrq(tp->pdev, 4096);
12508 pci_read_config_word(tp->pdev,
12509 tp->pcie_cap + PCI_EXP_LNKCTL,
12511 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12512 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12513 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12516 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
12517 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
12518 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12520 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12521 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12522 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12523 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12524 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12525 if (!tp->pcix_cap) {
12526 printk(KERN_ERR PFX "Cannot find PCI-X "
12527 "capability, aborting.\n");
12531 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12532 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12535 /* If we have an AMD 762 or VIA K8T800 chipset, write
12536 * reordering to the mailbox registers done by the host
12537 * controller can cause major troubles. We read back from
12538 * every mailbox register write to force the writes to be
12539 * posted to the chip in order.
12541 if (pci_dev_present(write_reorder_chipsets) &&
12542 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12543 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12545 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
12546 &tp->pci_cacheline_sz);
12547 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12548 &tp->pci_lat_timer);
12549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12550 tp->pci_lat_timer < 64) {
12551 tp->pci_lat_timer = 64;
12552 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12553 tp->pci_lat_timer);
12556 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12557 /* 5700 BX chips need to have their TX producer index
12558 * mailboxes written twice to workaround a bug.
12560 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12562 /* If we are in PCI-X mode, enable register write workaround.
12564 * The workaround is to use indirect register accesses
12565 * for all chip writes not to mailbox registers.
12567 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12570 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12572 /* The chip can have it's power management PCI config
12573 * space registers clobbered due to this bug.
12574 * So explicitly force the chip into D0 here.
12576 pci_read_config_dword(tp->pdev,
12577 tp->pm_cap + PCI_PM_CTRL,
12579 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12580 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12581 pci_write_config_dword(tp->pdev,
12582 tp->pm_cap + PCI_PM_CTRL,
12585 /* Also, force SERR#/PERR# in PCI command. */
12586 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12587 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12588 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12592 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12593 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12594 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12595 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12597 /* Chip-specific fixup from Broadcom driver */
12598 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12599 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12600 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12601 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12604 /* Default fast path register access methods */
12605 tp->read32 = tg3_read32;
12606 tp->write32 = tg3_write32;
12607 tp->read32_mbox = tg3_read32;
12608 tp->write32_mbox = tg3_write32;
12609 tp->write32_tx_mbox = tg3_write32;
12610 tp->write32_rx_mbox = tg3_write32;
12612 /* Various workaround register access methods */
12613 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12614 tp->write32 = tg3_write_indirect_reg32;
12615 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12616 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12617 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12619 * Back to back register writes can cause problems on these
12620 * chips, the workaround is to read back all reg writes
12621 * except those to mailbox regs.
12623 * See tg3_write_indirect_reg32().
12625 tp->write32 = tg3_write_flush_reg32;
12629 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12630 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12631 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12632 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12633 tp->write32_rx_mbox = tg3_write_flush_reg32;
12636 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12637 tp->read32 = tg3_read_indirect_reg32;
12638 tp->write32 = tg3_write_indirect_reg32;
12639 tp->read32_mbox = tg3_read_indirect_mbox;
12640 tp->write32_mbox = tg3_write_indirect_mbox;
12641 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12642 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12647 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12648 pci_cmd &= ~PCI_COMMAND_MEMORY;
12649 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12651 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12652 tp->read32_mbox = tg3_read32_mbox_5906;
12653 tp->write32_mbox = tg3_write32_mbox_5906;
12654 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12655 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12658 if (tp->write32 == tg3_write_indirect_reg32 ||
12659 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12660 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12662 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12664 /* Get eeprom hw config before calling tg3_set_power_state().
12665 * In particular, the TG3_FLG2_IS_NIC flag must be
12666 * determined before calling tg3_set_power_state() so that
12667 * we know whether or not to switch out of Vaux power.
12668 * When the flag is set, it means that GPIO1 is used for eeprom
12669 * write protect and also implies that it is a LOM where GPIOs
12670 * are not used to switch power.
12672 tg3_get_eeprom_hw_cfg(tp);
12674 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12675 /* Allow reads and writes to the
12676 * APE register and memory space.
12678 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12679 PCISTATE_ALLOW_APE_SHMEM_WR;
12680 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12685 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12688 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12690 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12691 * GPIO1 driven high will bring 5700's external PHY out of reset.
12692 * It is also used as eeprom write protect on LOMs.
12694 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12695 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12696 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12697 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12698 GRC_LCLCTRL_GPIO_OUTPUT1);
12699 /* Unused GPIO3 must be driven as output on 5752 because there
12700 * are no pull-up resistors on unused GPIO pins.
12702 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12703 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12705 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12707 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12709 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
12710 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
12711 /* Turn off the debug UART. */
12712 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12713 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12714 /* Keep VMain power. */
12715 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12716 GRC_LCLCTRL_GPIO_OUTPUT0;
12719 /* Force the chip into D0. */
12720 err = tg3_set_power_state(tp, PCI_D0);
12722 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12723 pci_name(tp->pdev));
12727 /* Derive initial jumbo mode from MTU assigned in
12728 * ether_setup() via the alloc_etherdev() call
12730 if (tp->dev->mtu > ETH_DATA_LEN &&
12731 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12732 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12734 /* Determine WakeOnLan speed to use. */
12735 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12736 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12737 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12738 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12739 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12741 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12745 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
12747 /* A few boards don't want Ethernet@WireSpeed phy feature */
12748 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12749 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12750 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12751 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12752 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
12753 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12754 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12756 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12757 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12758 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12759 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12760 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12762 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12763 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
12764 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12765 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
12766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12768 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12769 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12770 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12771 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12772 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12773 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12774 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12776 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12779 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12780 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12781 tp->phy_otp = tg3_read_otp_phycfg(tp);
12782 if (tp->phy_otp == 0)
12783 tp->phy_otp = TG3_OTP_DEFAULT;
12786 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12787 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12789 tp->mi_mode = MAC_MI_MODE_BASE;
12791 tp->coalesce_mode = 0;
12792 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12793 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12794 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12796 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12797 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12798 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12800 if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
12801 tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
12802 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
12803 tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
12805 err = tg3_mdio_init(tp);
12809 /* Initialize data/descriptor byte/word swapping. */
12810 val = tr32(GRC_MODE);
12811 val &= GRC_MODE_HOST_STACKUP;
12812 tw32(GRC_MODE, val | tp->grc_mode);
12814 tg3_switch_clocks(tp);
12816 /* Clear this out for sanity. */
12817 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12819 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12821 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12822 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12823 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12825 if (chiprevid == CHIPREV_ID_5701_A0 ||
12826 chiprevid == CHIPREV_ID_5701_B0 ||
12827 chiprevid == CHIPREV_ID_5701_B2 ||
12828 chiprevid == CHIPREV_ID_5701_B5) {
12829 void __iomem *sram_base;
12831 /* Write some dummy words into the SRAM status block
12832 * area, see if it reads back correctly. If the return
12833 * value is bad, force enable the PCIX workaround.
12835 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12837 writel(0x00000000, sram_base);
12838 writel(0x00000000, sram_base + 4);
12839 writel(0xffffffff, sram_base + 4);
12840 if (readl(sram_base) != 0x00000000)
12841 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12846 tg3_nvram_init(tp);
12848 grc_misc_cfg = tr32(GRC_MISC_CFG);
12849 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12851 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12852 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12853 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12854 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12856 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12857 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12858 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12859 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12860 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12861 HOSTCC_MODE_CLRTICK_TXBD);
12863 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12864 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12865 tp->misc_host_ctrl);
12868 /* Preserve the APE MAC_MODE bits */
12869 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12870 tp->mac_mode = tr32(MAC_MODE) |
12871 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12873 tp->mac_mode = TG3_DEF_MAC_MODE;
12875 /* these are limited to 10/100 only */
12876 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12877 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12878 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12879 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12880 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12881 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12882 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12883 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12884 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12885 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12886 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12887 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
12888 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
12889 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12891 err = tg3_phy_probe(tp);
12893 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12894 pci_name(tp->pdev), err);
12895 /* ... but do not return immediately ... */
12899 tg3_read_partno(tp);
12900 tg3_read_fw_ver(tp);
12902 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12903 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12906 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12908 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12911 /* 5700 {AX,BX} chips have a broken status block link
12912 * change bit implementation, so we must use the
12913 * status register in those cases.
12915 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12916 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12918 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12920 /* The led_ctrl is set during tg3_phy_probe, here we might
12921 * have to force the link status polling mechanism based
12922 * upon subsystem IDs.
12924 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12926 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12927 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12928 TG3_FLAG_USE_LINKCHG_REG);
12931 /* For all SERDES we poll the MAC status register. */
12932 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12933 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12935 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12937 tp->rx_offset = NET_IP_ALIGN;
12938 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12939 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12942 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12944 /* Increment the rx prod index on the rx std ring by at most
12945 * 8 for these chips to workaround hw errata.
12947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12950 tp->rx_std_max_post = 8;
12952 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12953 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12954 PCIE_PWR_MGMT_L1_THRESH_MSK;
12959 #ifdef CONFIG_SPARC
12960 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12962 struct net_device *dev = tp->dev;
12963 struct pci_dev *pdev = tp->pdev;
12964 struct device_node *dp = pci_device_to_OF_node(pdev);
12965 const unsigned char *addr;
12968 addr = of_get_property(dp, "local-mac-address", &len);
12969 if (addr && len == 6) {
12970 memcpy(dev->dev_addr, addr, 6);
12971 memcpy(dev->perm_addr, dev->dev_addr, 6);
12977 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12979 struct net_device *dev = tp->dev;
12981 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12982 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12987 static int __devinit tg3_get_device_address(struct tg3 *tp)
12989 struct net_device *dev = tp->dev;
12990 u32 hi, lo, mac_offset;
12993 #ifdef CONFIG_SPARC
12994 if (!tg3_get_macaddr_sparc(tp))
12999 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13000 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13001 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13003 if (tg3_nvram_lock(tp))
13004 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13006 tg3_nvram_unlock(tp);
13008 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13011 /* First try to get it from MAC address mailbox. */
13012 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13013 if ((hi >> 16) == 0x484b) {
13014 dev->dev_addr[0] = (hi >> 8) & 0xff;
13015 dev->dev_addr[1] = (hi >> 0) & 0xff;
13017 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13018 dev->dev_addr[2] = (lo >> 24) & 0xff;
13019 dev->dev_addr[3] = (lo >> 16) & 0xff;
13020 dev->dev_addr[4] = (lo >> 8) & 0xff;
13021 dev->dev_addr[5] = (lo >> 0) & 0xff;
13023 /* Some old bootcode may report a 0 MAC address in SRAM */
13024 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13027 /* Next, try NVRAM. */
13028 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13029 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13030 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13031 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13032 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13034 /* Finally just fetch it out of the MAC control regs. */
13036 hi = tr32(MAC_ADDR_0_HIGH);
13037 lo = tr32(MAC_ADDR_0_LOW);
13039 dev->dev_addr[5] = lo & 0xff;
13040 dev->dev_addr[4] = (lo >> 8) & 0xff;
13041 dev->dev_addr[3] = (lo >> 16) & 0xff;
13042 dev->dev_addr[2] = (lo >> 24) & 0xff;
13043 dev->dev_addr[1] = hi & 0xff;
13044 dev->dev_addr[0] = (hi >> 8) & 0xff;
13048 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13049 #ifdef CONFIG_SPARC
13050 if (!tg3_get_default_macaddr_sparc(tp))
13055 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13059 #define BOUNDARY_SINGLE_CACHELINE 1
13060 #define BOUNDARY_MULTI_CACHELINE 2
13062 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13064 int cacheline_size;
13068 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13070 cacheline_size = 1024;
13072 cacheline_size = (int) byte * 4;
13074 /* On 5703 and later chips, the boundary bits have no
13077 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13078 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13079 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13082 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13083 goal = BOUNDARY_MULTI_CACHELINE;
13085 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13086 goal = BOUNDARY_SINGLE_CACHELINE;
13095 /* PCI controllers on most RISC systems tend to disconnect
13096 * when a device tries to burst across a cache-line boundary.
13097 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13099 * Unfortunately, for PCI-E there are only limited
13100 * write-side controls for this, and thus for reads
13101 * we will still get the disconnects. We'll also waste
13102 * these PCI cycles for both read and write for chips
13103 * other than 5700 and 5701 which do not implement the
13106 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13107 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13108 switch (cacheline_size) {
13113 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13114 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13115 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13117 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13118 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13123 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13124 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13128 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13129 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13132 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13133 switch (cacheline_size) {
13137 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13138 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13139 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13145 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13146 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13150 switch (cacheline_size) {
13152 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13153 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13154 DMA_RWCTRL_WRITE_BNDRY_16);
13159 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13160 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13161 DMA_RWCTRL_WRITE_BNDRY_32);
13166 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13167 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13168 DMA_RWCTRL_WRITE_BNDRY_64);
13173 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13174 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13175 DMA_RWCTRL_WRITE_BNDRY_128);
13180 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13181 DMA_RWCTRL_WRITE_BNDRY_256);
13184 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13185 DMA_RWCTRL_WRITE_BNDRY_512);
13189 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13190 DMA_RWCTRL_WRITE_BNDRY_1024);
13199 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13201 struct tg3_internal_buffer_desc test_desc;
13202 u32 sram_dma_descs;
13205 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13207 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13208 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13209 tw32(RDMAC_STATUS, 0);
13210 tw32(WDMAC_STATUS, 0);
13212 tw32(BUFMGR_MODE, 0);
13213 tw32(FTQ_RESET, 0);
13215 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13216 test_desc.addr_lo = buf_dma & 0xffffffff;
13217 test_desc.nic_mbuf = 0x00002100;
13218 test_desc.len = size;
13221 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13222 * the *second* time the tg3 driver was getting loaded after an
13225 * Broadcom tells me:
13226 * ...the DMA engine is connected to the GRC block and a DMA
13227 * reset may affect the GRC block in some unpredictable way...
13228 * The behavior of resets to individual blocks has not been tested.
13230 * Broadcom noted the GRC reset will also reset all sub-components.
13233 test_desc.cqid_sqid = (13 << 8) | 2;
13235 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13238 test_desc.cqid_sqid = (16 << 8) | 7;
13240 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13243 test_desc.flags = 0x00000005;
13245 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13248 val = *(((u32 *)&test_desc) + i);
13249 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13250 sram_dma_descs + (i * sizeof(u32)));
13251 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13253 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13256 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13258 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13262 for (i = 0; i < 40; i++) {
13266 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13268 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13269 if ((val & 0xffff) == sram_dma_descs) {
13280 #define TEST_BUFFER_SIZE 0x2000
13282 static int __devinit tg3_test_dma(struct tg3 *tp)
13284 dma_addr_t buf_dma;
13285 u32 *buf, saved_dma_rwctrl;
13288 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13294 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13295 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13297 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13299 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13300 /* DMA read watermark not used on PCIE */
13301 tp->dma_rwctrl |= 0x00180000;
13302 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13304 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13305 tp->dma_rwctrl |= 0x003f0000;
13307 tp->dma_rwctrl |= 0x003f000f;
13309 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13310 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13311 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13312 u32 read_water = 0x7;
13314 /* If the 5704 is behind the EPB bridge, we can
13315 * do the less restrictive ONE_DMA workaround for
13316 * better performance.
13318 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13319 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13320 tp->dma_rwctrl |= 0x8000;
13321 else if (ccval == 0x6 || ccval == 0x7)
13322 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13326 /* Set bit 23 to enable PCIX hw bug fix */
13328 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13329 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13331 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13332 /* 5780 always in PCIX mode */
13333 tp->dma_rwctrl |= 0x00144000;
13334 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13335 /* 5714 always in PCIX mode */
13336 tp->dma_rwctrl |= 0x00148000;
13338 tp->dma_rwctrl |= 0x001b000f;
13342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13343 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13344 tp->dma_rwctrl &= 0xfffffff0;
13346 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13348 /* Remove this if it causes problems for some boards. */
13349 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13351 /* On 5700/5701 chips, we need to set this bit.
13352 * Otherwise the chip will issue cacheline transactions
13353 * to streamable DMA memory with not all the byte
13354 * enables turned on. This is an error on several
13355 * RISC PCI controllers, in particular sparc64.
13357 * On 5703/5704 chips, this bit has been reassigned
13358 * a different meaning. In particular, it is used
13359 * on those chips to enable a PCI-X workaround.
13361 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13364 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13367 /* Unneeded, already done by tg3_get_invariants. */
13368 tg3_switch_clocks(tp);
13372 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13373 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13376 /* It is best to perform DMA test with maximum write burst size
13377 * to expose the 5700/5701 write DMA bug.
13379 saved_dma_rwctrl = tp->dma_rwctrl;
13380 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13381 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13386 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13389 /* Send the buffer to the chip. */
13390 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13392 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13397 /* validate data reached card RAM correctly. */
13398 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13400 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13401 if (le32_to_cpu(val) != p[i]) {
13402 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13403 /* ret = -ENODEV here? */
13408 /* Now read it back. */
13409 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13411 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13417 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13421 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13422 DMA_RWCTRL_WRITE_BNDRY_16) {
13423 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13424 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13425 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13428 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13434 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13440 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13441 DMA_RWCTRL_WRITE_BNDRY_16) {
13442 static struct pci_device_id dma_wait_state_chipsets[] = {
13443 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13444 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13448 /* DMA test passed without adjusting DMA boundary,
13449 * now look for chipsets that are known to expose the
13450 * DMA bug without failing the test.
13452 if (pci_dev_present(dma_wait_state_chipsets)) {
13453 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13454 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13457 /* Safe to use the calculated DMA boundary. */
13458 tp->dma_rwctrl = saved_dma_rwctrl;
13460 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13464 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13469 static void __devinit tg3_init_link_config(struct tg3 *tp)
13471 tp->link_config.advertising =
13472 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13473 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13474 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13475 ADVERTISED_Autoneg | ADVERTISED_MII);
13476 tp->link_config.speed = SPEED_INVALID;
13477 tp->link_config.duplex = DUPLEX_INVALID;
13478 tp->link_config.autoneg = AUTONEG_ENABLE;
13479 tp->link_config.active_speed = SPEED_INVALID;
13480 tp->link_config.active_duplex = DUPLEX_INVALID;
13481 tp->link_config.phy_is_low_power = 0;
13482 tp->link_config.orig_speed = SPEED_INVALID;
13483 tp->link_config.orig_duplex = DUPLEX_INVALID;
13484 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13487 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13489 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13490 tp->bufmgr_config.mbuf_read_dma_low_water =
13491 DEFAULT_MB_RDMA_LOW_WATER_5705;
13492 tp->bufmgr_config.mbuf_mac_rx_low_water =
13493 DEFAULT_MB_MACRX_LOW_WATER_5705;
13494 tp->bufmgr_config.mbuf_high_water =
13495 DEFAULT_MB_HIGH_WATER_5705;
13496 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13497 tp->bufmgr_config.mbuf_mac_rx_low_water =
13498 DEFAULT_MB_MACRX_LOW_WATER_5906;
13499 tp->bufmgr_config.mbuf_high_water =
13500 DEFAULT_MB_HIGH_WATER_5906;
13503 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13504 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13505 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13506 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13507 tp->bufmgr_config.mbuf_high_water_jumbo =
13508 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13510 tp->bufmgr_config.mbuf_read_dma_low_water =
13511 DEFAULT_MB_RDMA_LOW_WATER;
13512 tp->bufmgr_config.mbuf_mac_rx_low_water =
13513 DEFAULT_MB_MACRX_LOW_WATER;
13514 tp->bufmgr_config.mbuf_high_water =
13515 DEFAULT_MB_HIGH_WATER;
13517 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13518 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13519 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13520 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13521 tp->bufmgr_config.mbuf_high_water_jumbo =
13522 DEFAULT_MB_HIGH_WATER_JUMBO;
13525 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13526 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13529 static char * __devinit tg3_phy_string(struct tg3 *tp)
13531 switch (tp->phy_id & PHY_ID_MASK) {
13532 case PHY_ID_BCM5400: return "5400";
13533 case PHY_ID_BCM5401: return "5401";
13534 case PHY_ID_BCM5411: return "5411";
13535 case PHY_ID_BCM5701: return "5701";
13536 case PHY_ID_BCM5703: return "5703";
13537 case PHY_ID_BCM5704: return "5704";
13538 case PHY_ID_BCM5705: return "5705";
13539 case PHY_ID_BCM5750: return "5750";
13540 case PHY_ID_BCM5752: return "5752";
13541 case PHY_ID_BCM5714: return "5714";
13542 case PHY_ID_BCM5780: return "5780";
13543 case PHY_ID_BCM5755: return "5755";
13544 case PHY_ID_BCM5787: return "5787";
13545 case PHY_ID_BCM5784: return "5784";
13546 case PHY_ID_BCM5756: return "5722/5756";
13547 case PHY_ID_BCM5906: return "5906";
13548 case PHY_ID_BCM5761: return "5761";
13549 case PHY_ID_BCM8002: return "8002/serdes";
13550 case 0: return "serdes";
13551 default: return "unknown";
13555 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13557 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13558 strcpy(str, "PCI Express");
13560 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13561 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13563 strcpy(str, "PCIX:");
13565 if ((clock_ctrl == 7) ||
13566 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13567 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13568 strcat(str, "133MHz");
13569 else if (clock_ctrl == 0)
13570 strcat(str, "33MHz");
13571 else if (clock_ctrl == 2)
13572 strcat(str, "50MHz");
13573 else if (clock_ctrl == 4)
13574 strcat(str, "66MHz");
13575 else if (clock_ctrl == 6)
13576 strcat(str, "100MHz");
13578 strcpy(str, "PCI:");
13579 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13580 strcat(str, "66MHz");
13582 strcat(str, "33MHz");
13584 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13585 strcat(str, ":32-bit");
13587 strcat(str, ":64-bit");
13591 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13593 struct pci_dev *peer;
13594 unsigned int func, devnr = tp->pdev->devfn & ~7;
13596 for (func = 0; func < 8; func++) {
13597 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13598 if (peer && peer != tp->pdev)
13602 /* 5704 can be configured in single-port mode, set peer to
13603 * tp->pdev in that case.
13611 * We don't need to keep the refcount elevated; there's no way
13612 * to remove one half of this device without removing the other
13619 static void __devinit tg3_init_coal(struct tg3 *tp)
13621 struct ethtool_coalesce *ec = &tp->coal;
13623 memset(ec, 0, sizeof(*ec));
13624 ec->cmd = ETHTOOL_GCOALESCE;
13625 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13626 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13627 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13628 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13629 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13630 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13631 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13632 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13633 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13635 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13636 HOSTCC_MODE_CLRTICK_TXBD)) {
13637 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13638 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13639 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13640 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13643 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13644 ec->rx_coalesce_usecs_irq = 0;
13645 ec->tx_coalesce_usecs_irq = 0;
13646 ec->stats_block_coalesce_usecs = 0;
13650 static const struct net_device_ops tg3_netdev_ops = {
13651 .ndo_open = tg3_open,
13652 .ndo_stop = tg3_close,
13653 .ndo_start_xmit = tg3_start_xmit,
13654 .ndo_get_stats = tg3_get_stats,
13655 .ndo_validate_addr = eth_validate_addr,
13656 .ndo_set_multicast_list = tg3_set_rx_mode,
13657 .ndo_set_mac_address = tg3_set_mac_addr,
13658 .ndo_do_ioctl = tg3_ioctl,
13659 .ndo_tx_timeout = tg3_tx_timeout,
13660 .ndo_change_mtu = tg3_change_mtu,
13661 #if TG3_VLAN_TAG_USED
13662 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13664 #ifdef CONFIG_NET_POLL_CONTROLLER
13665 .ndo_poll_controller = tg3_poll_controller,
13669 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13670 .ndo_open = tg3_open,
13671 .ndo_stop = tg3_close,
13672 .ndo_start_xmit = tg3_start_xmit_dma_bug,
13673 .ndo_get_stats = tg3_get_stats,
13674 .ndo_validate_addr = eth_validate_addr,
13675 .ndo_set_multicast_list = tg3_set_rx_mode,
13676 .ndo_set_mac_address = tg3_set_mac_addr,
13677 .ndo_do_ioctl = tg3_ioctl,
13678 .ndo_tx_timeout = tg3_tx_timeout,
13679 .ndo_change_mtu = tg3_change_mtu,
13680 #if TG3_VLAN_TAG_USED
13681 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13683 #ifdef CONFIG_NET_POLL_CONTROLLER
13684 .ndo_poll_controller = tg3_poll_controller,
13688 static int __devinit tg3_init_one(struct pci_dev *pdev,
13689 const struct pci_device_id *ent)
13691 static int tg3_version_printed = 0;
13692 struct net_device *dev;
13694 int i, err, pm_cap;
13695 u32 sndmbx, rcvmbx, intmbx;
13697 u64 dma_mask, persist_dma_mask;
13699 if (tg3_version_printed++ == 0)
13700 printk(KERN_INFO "%s", version);
13702 err = pci_enable_device(pdev);
13704 printk(KERN_ERR PFX "Cannot enable PCI device, "
13709 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13711 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13713 goto err_out_disable_pdev;
13716 pci_set_master(pdev);
13718 /* Find power-management capability. */
13719 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13721 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13724 goto err_out_free_res;
13727 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
13729 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13731 goto err_out_free_res;
13734 SET_NETDEV_DEV(dev, &pdev->dev);
13736 #if TG3_VLAN_TAG_USED
13737 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13740 tp = netdev_priv(dev);
13743 tp->pm_cap = pm_cap;
13744 tp->rx_mode = TG3_DEF_RX_MODE;
13745 tp->tx_mode = TG3_DEF_TX_MODE;
13748 tp->msg_enable = tg3_debug;
13750 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13752 /* The word/byte swap controls here control register access byte
13753 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13756 tp->misc_host_ctrl =
13757 MISC_HOST_CTRL_MASK_PCI_INT |
13758 MISC_HOST_CTRL_WORD_SWAP |
13759 MISC_HOST_CTRL_INDIR_ACCESS |
13760 MISC_HOST_CTRL_PCISTATE_RW;
13762 /* The NONFRM (non-frame) byte/word swap controls take effect
13763 * on descriptor entries, anything which isn't packet data.
13765 * The StrongARM chips on the board (one for tx, one for rx)
13766 * are running in big-endian mode.
13768 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13769 GRC_MODE_WSWAP_NONFRM_DATA);
13770 #ifdef __BIG_ENDIAN
13771 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13773 spin_lock_init(&tp->lock);
13774 spin_lock_init(&tp->indirect_lock);
13775 INIT_WORK(&tp->reset_task, tg3_reset_task);
13777 tp->regs = pci_ioremap_bar(pdev, BAR_0);
13779 printk(KERN_ERR PFX "Cannot map device registers, "
13782 goto err_out_free_dev;
13785 tg3_init_link_config(tp);
13787 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13788 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13790 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
13791 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
13792 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
13793 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
13794 struct tg3_napi *tnapi = &tp->napi[i];
13797 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
13799 tnapi->int_mbox = intmbx;
13805 tnapi->consmbox = rcvmbx;
13806 tnapi->prodmbox = sndmbx;
13809 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
13811 tnapi->coal_now = HOSTCC_MODE_NOW;
13813 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
13817 * If we support MSIX, we'll be using RSS. If we're using
13818 * RSS, the first vector only handles link interrupts and the
13819 * remaining vectors handle rx and tx interrupts. Reuse the
13820 * mailbox values for the next iteration. The values we setup
13821 * above are still useful for the single vectored mode.
13834 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
13835 dev->ethtool_ops = &tg3_ethtool_ops;
13836 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13837 dev->irq = pdev->irq;
13839 err = tg3_get_invariants(tp);
13841 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13843 goto err_out_iounmap;
13846 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13847 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13848 dev->netdev_ops = &tg3_netdev_ops;
13850 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13853 /* The EPB bridge inside 5714, 5715, and 5780 and any
13854 * device behind the EPB cannot support DMA addresses > 40-bit.
13855 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13856 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13857 * do DMA address check in tg3_start_xmit().
13859 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13860 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
13861 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13862 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
13863 #ifdef CONFIG_HIGHMEM
13864 dma_mask = DMA_BIT_MASK(64);
13867 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
13869 /* Configure DMA attributes. */
13870 if (dma_mask > DMA_BIT_MASK(32)) {
13871 err = pci_set_dma_mask(pdev, dma_mask);
13873 dev->features |= NETIF_F_HIGHDMA;
13874 err = pci_set_consistent_dma_mask(pdev,
13877 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13878 "DMA for consistent allocations\n");
13879 goto err_out_iounmap;
13883 if (err || dma_mask == DMA_BIT_MASK(32)) {
13884 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
13886 printk(KERN_ERR PFX "No usable DMA configuration, "
13888 goto err_out_iounmap;
13892 tg3_init_bufmgr_config(tp);
13894 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13895 tp->fw_needed = FIRMWARE_TG3;
13897 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13898 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13900 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13901 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13902 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13903 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13904 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13905 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13907 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13909 tp->fw_needed = FIRMWARE_TG3TSO5;
13911 tp->fw_needed = FIRMWARE_TG3TSO;
13914 /* TSO is on by default on chips that support hardware TSO.
13915 * Firmware TSO on older chips gives lower performance, so it
13916 * is off by default, but can be enabled using ethtool.
13918 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13919 if (dev->features & NETIF_F_IP_CSUM)
13920 dev->features |= NETIF_F_TSO;
13921 if ((dev->features & NETIF_F_IPV6_CSUM) &&
13922 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
13923 dev->features |= NETIF_F_TSO6;
13924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13925 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13926 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13927 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13928 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13929 dev->features |= NETIF_F_TSO_ECN;
13933 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13934 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13935 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13936 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13937 tp->rx_pending = 63;
13940 err = tg3_get_device_address(tp);
13942 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13947 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13948 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13949 if (!tp->aperegs) {
13950 printk(KERN_ERR PFX "Cannot map APE registers, "
13956 tg3_ape_lock_init(tp);
13958 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
13959 tg3_read_dash_ver(tp);
13963 * Reset chip in case UNDI or EFI driver did not shutdown
13964 * DMA self test will enable WDMAC and we'll see (spurious)
13965 * pending DMA on the PCI bus at that point.
13967 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13968 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13969 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13970 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13973 err = tg3_test_dma(tp);
13975 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13976 goto err_out_apeunmap;
13979 /* flow control autonegotiation is default behavior */
13980 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13981 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13985 pci_set_drvdata(pdev, dev);
13987 err = register_netdev(dev);
13989 printk(KERN_ERR PFX "Cannot register net device, "
13991 goto err_out_apeunmap;
13994 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13996 tp->board_part_number,
13997 tp->pci_chip_rev_id,
13998 tg3_bus_string(tp, str),
14001 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
14003 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14005 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
14006 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
14009 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
14010 tp->dev->name, tg3_phy_string(tp),
14011 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14012 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14013 "10/100/1000Base-T")),
14014 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
14016 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14018 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14019 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14020 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
14021 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14022 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14023 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14024 dev->name, tp->dma_rwctrl,
14025 (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
14026 (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
14032 iounmap(tp->aperegs);
14033 tp->aperegs = NULL;
14038 release_firmware(tp->fw);
14050 pci_release_regions(pdev);
14052 err_out_disable_pdev:
14053 pci_disable_device(pdev);
14054 pci_set_drvdata(pdev, NULL);
14058 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14060 struct net_device *dev = pci_get_drvdata(pdev);
14063 struct tg3 *tp = netdev_priv(dev);
14066 release_firmware(tp->fw);
14068 flush_scheduled_work();
14070 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14075 unregister_netdev(dev);
14077 iounmap(tp->aperegs);
14078 tp->aperegs = NULL;
14085 pci_release_regions(pdev);
14086 pci_disable_device(pdev);
14087 pci_set_drvdata(pdev, NULL);
14091 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14093 struct net_device *dev = pci_get_drvdata(pdev);
14094 struct tg3 *tp = netdev_priv(dev);
14095 pci_power_t target_state;
14098 /* PCI register 4 needs to be saved whether netif_running() or not.
14099 * MSI address and data need to be saved if using MSI and
14102 pci_save_state(pdev);
14104 if (!netif_running(dev))
14107 flush_scheduled_work();
14109 tg3_netif_stop(tp);
14111 del_timer_sync(&tp->timer);
14113 tg3_full_lock(tp, 1);
14114 tg3_disable_ints(tp);
14115 tg3_full_unlock(tp);
14117 netif_device_detach(dev);
14119 tg3_full_lock(tp, 0);
14120 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14121 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14122 tg3_full_unlock(tp);
14124 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14126 err = tg3_set_power_state(tp, target_state);
14130 tg3_full_lock(tp, 0);
14132 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14133 err2 = tg3_restart_hw(tp, 1);
14137 tp->timer.expires = jiffies + tp->timer_offset;
14138 add_timer(&tp->timer);
14140 netif_device_attach(dev);
14141 tg3_netif_start(tp);
14144 tg3_full_unlock(tp);
14153 static int tg3_resume(struct pci_dev *pdev)
14155 struct net_device *dev = pci_get_drvdata(pdev);
14156 struct tg3 *tp = netdev_priv(dev);
14159 pci_restore_state(tp->pdev);
14161 if (!netif_running(dev))
14164 err = tg3_set_power_state(tp, PCI_D0);
14168 netif_device_attach(dev);
14170 tg3_full_lock(tp, 0);
14172 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14173 err = tg3_restart_hw(tp, 1);
14177 tp->timer.expires = jiffies + tp->timer_offset;
14178 add_timer(&tp->timer);
14180 tg3_netif_start(tp);
14183 tg3_full_unlock(tp);
14191 static struct pci_driver tg3_driver = {
14192 .name = DRV_MODULE_NAME,
14193 .id_table = tg3_pci_tbl,
14194 .probe = tg3_init_one,
14195 .remove = __devexit_p(tg3_remove_one),
14196 .suspend = tg3_suspend,
14197 .resume = tg3_resume
14200 static int __init tg3_init(void)
14202 return pci_register_driver(&tg3_driver);
14205 static void __exit tg3_cleanup(void)
14207 pci_unregister_driver(&tg3_driver);
14210 module_init(tg3_init);
14211 module_exit(tg3_cleanup);