2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2007 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/if_vlan.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
43 #include <net/checksum.h>
46 #include <asm/system.h>
48 #include <asm/byteorder.h>
49 #include <asm/uaccess.h>
52 #include <asm/idprom.h>
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
59 #define TG3_VLAN_TAG_USED 0
62 #define TG3_TSO_SUPPORT 1
66 #define DRV_MODULE_NAME "tg3"
67 #define PFX DRV_MODULE_NAME ": "
68 #define DRV_MODULE_VERSION "3.92"
69 #define DRV_MODULE_RELDATE "May 2, 2008"
71 #define TG3_DEF_MAC_MODE 0
72 #define TG3_DEF_RX_MODE 0
73 #define TG3_DEF_TX_MODE 0
74 #define TG3_DEF_MSG_ENABLE \
84 /* length of time before we decide the hardware is borked,
85 * and dev->tx_timeout() should be called to fix the problem
87 #define TG3_TX_TIMEOUT (5 * HZ)
89 /* hardware minimum and maximum for a single frame's data payload */
90 #define TG3_MIN_MTU 60
91 #define TG3_MAX_MTU(tp) \
92 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
94 /* These numbers seem to be hard coded in the NIC firmware somehow.
95 * You can't change the ring sizes, but you can change where you place
96 * them in the NIC onboard memory.
98 #define TG3_RX_RING_SIZE 512
99 #define TG3_DEF_RX_RING_PENDING 200
100 #define TG3_RX_JUMBO_RING_SIZE 256
101 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
103 /* Do not place this n-ring entries value into the tp struct itself,
104 * we really want to expose these constants to GCC so that modulo et
105 * al. operations are done with shifts and masks instead of with
106 * hw multiply/modulo instructions. Another solution would be to
107 * replace things like '% foo' with '& (foo - 1)'.
109 #define TG3_RX_RCB_RING_SIZE(tp) \
110 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
112 #define TG3_TX_RING_SIZE 512
113 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
115 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
118 TG3_RX_JUMBO_RING_SIZE)
119 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
120 TG3_RX_RCB_RING_SIZE(tp))
121 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
123 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
125 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
126 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
128 /* minimum number of free TX descriptors required to wake up TX process */
129 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
131 /* number of ETHTOOL_GSTATS u64's */
132 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
134 #define TG3_NUM_TEST 6
136 static char version[] __devinitdata =
137 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
140 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
141 MODULE_LICENSE("GPL");
142 MODULE_VERSION(DRV_MODULE_VERSION);
144 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
145 module_param(tg3_debug, int, 0);
146 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
148 static struct pci_device_id tg3_pci_tbl[] = {
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
212 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
213 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
217 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
219 static const struct {
220 const char string[ETH_GSTRING_LEN];
221 } ethtool_stats_keys[TG3_NUM_STATS] = {
224 { "rx_ucast_packets" },
225 { "rx_mcast_packets" },
226 { "rx_bcast_packets" },
228 { "rx_align_errors" },
229 { "rx_xon_pause_rcvd" },
230 { "rx_xoff_pause_rcvd" },
231 { "rx_mac_ctrl_rcvd" },
232 { "rx_xoff_entered" },
233 { "rx_frame_too_long_errors" },
235 { "rx_undersize_packets" },
236 { "rx_in_length_errors" },
237 { "rx_out_length_errors" },
238 { "rx_64_or_less_octet_packets" },
239 { "rx_65_to_127_octet_packets" },
240 { "rx_128_to_255_octet_packets" },
241 { "rx_256_to_511_octet_packets" },
242 { "rx_512_to_1023_octet_packets" },
243 { "rx_1024_to_1522_octet_packets" },
244 { "rx_1523_to_2047_octet_packets" },
245 { "rx_2048_to_4095_octet_packets" },
246 { "rx_4096_to_8191_octet_packets" },
247 { "rx_8192_to_9022_octet_packets" },
254 { "tx_flow_control" },
256 { "tx_single_collisions" },
257 { "tx_mult_collisions" },
259 { "tx_excessive_collisions" },
260 { "tx_late_collisions" },
261 { "tx_collide_2times" },
262 { "tx_collide_3times" },
263 { "tx_collide_4times" },
264 { "tx_collide_5times" },
265 { "tx_collide_6times" },
266 { "tx_collide_7times" },
267 { "tx_collide_8times" },
268 { "tx_collide_9times" },
269 { "tx_collide_10times" },
270 { "tx_collide_11times" },
271 { "tx_collide_12times" },
272 { "tx_collide_13times" },
273 { "tx_collide_14times" },
274 { "tx_collide_15times" },
275 { "tx_ucast_packets" },
276 { "tx_mcast_packets" },
277 { "tx_bcast_packets" },
278 { "tx_carrier_sense_errors" },
282 { "dma_writeq_full" },
283 { "dma_write_prioq_full" },
287 { "rx_threshold_hit" },
289 { "dma_readq_full" },
290 { "dma_read_prioq_full" },
291 { "tx_comp_queue_full" },
293 { "ring_set_send_prod_index" },
294 { "ring_status_update" },
296 { "nic_avoided_irqs" },
297 { "nic_tx_threshold_hit" }
300 static const struct {
301 const char string[ETH_GSTRING_LEN];
302 } ethtool_test_keys[TG3_NUM_TEST] = {
303 { "nvram test (online) " },
304 { "link test (online) " },
305 { "register test (offline)" },
306 { "memory test (offline)" },
307 { "loopback test (offline)" },
308 { "interrupt test (offline)" },
311 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
313 writel(val, tp->regs + off);
316 static u32 tg3_read32(struct tg3 *tp, u32 off)
318 return (readl(tp->regs + off));
321 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
323 writel(val, tp->aperegs + off);
326 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
328 return (readl(tp->aperegs + off));
331 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
335 spin_lock_irqsave(&tp->indirect_lock, flags);
336 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
337 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
338 spin_unlock_irqrestore(&tp->indirect_lock, flags);
341 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
343 writel(val, tp->regs + off);
344 readl(tp->regs + off);
347 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
352 spin_lock_irqsave(&tp->indirect_lock, flags);
353 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
354 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
355 spin_unlock_irqrestore(&tp->indirect_lock, flags);
359 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
363 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
364 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
365 TG3_64BIT_REG_LOW, val);
368 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
369 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
370 TG3_64BIT_REG_LOW, val);
374 spin_lock_irqsave(&tp->indirect_lock, flags);
375 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
376 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
377 spin_unlock_irqrestore(&tp->indirect_lock, flags);
379 /* In indirect mode when disabling interrupts, we also need
380 * to clear the interrupt bit in the GRC local ctrl register.
382 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
384 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
385 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
389 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
394 spin_lock_irqsave(&tp->indirect_lock, flags);
395 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
396 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
397 spin_unlock_irqrestore(&tp->indirect_lock, flags);
401 /* usec_wait specifies the wait time in usec when writing to certain registers
402 * where it is unsafe to read back the register without some delay.
403 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
404 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
406 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
408 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
409 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
410 /* Non-posted methods */
411 tp->write32(tp, off, val);
414 tg3_write32(tp, off, val);
419 /* Wait again after the read for the posted method to guarantee that
420 * the wait time is met.
426 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
428 tp->write32_mbox(tp, off, val);
429 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
430 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
431 tp->read32_mbox(tp, off);
434 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
436 void __iomem *mbox = tp->regs + off;
438 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
440 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
444 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
446 return (readl(tp->regs + off + GRCMBOX_BASE));
449 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
451 writel(val, tp->regs + off + GRCMBOX_BASE);
454 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
455 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
456 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
457 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
458 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
460 #define tw32(reg,val) tp->write32(tp, reg, val)
461 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
462 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
463 #define tr32(reg) tp->read32(tp, reg)
465 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
469 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
470 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
473 spin_lock_irqsave(&tp->indirect_lock, flags);
474 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
476 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
478 /* Always leave this as zero. */
479 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
481 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
482 tw32_f(TG3PCI_MEM_WIN_DATA, val);
484 /* Always leave this as zero. */
485 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
487 spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
494 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
495 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
500 spin_lock_irqsave(&tp->indirect_lock, flags);
501 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
502 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
503 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
505 /* Always leave this as zero. */
506 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
508 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
509 *val = tr32(TG3PCI_MEM_WIN_DATA);
511 /* Always leave this as zero. */
512 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
514 spin_unlock_irqrestore(&tp->indirect_lock, flags);
517 static void tg3_ape_lock_init(struct tg3 *tp)
521 /* Make sure the driver hasn't any stale locks. */
522 for (i = 0; i < 8; i++)
523 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
524 APE_LOCK_GRANT_DRIVER);
527 static int tg3_ape_lock(struct tg3 *tp, int locknum)
533 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
537 case TG3_APE_LOCK_MEM:
545 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
547 /* Wait for up to 1 millisecond to acquire lock. */
548 for (i = 0; i < 100; i++) {
549 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
550 if (status == APE_LOCK_GRANT_DRIVER)
555 if (status != APE_LOCK_GRANT_DRIVER) {
556 /* Revoke the lock request. */
557 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
558 APE_LOCK_GRANT_DRIVER);
566 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
570 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
574 case TG3_APE_LOCK_MEM:
581 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
584 static void tg3_disable_ints(struct tg3 *tp)
586 tw32(TG3PCI_MISC_HOST_CTRL,
587 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
588 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
591 static inline void tg3_cond_int(struct tg3 *tp)
593 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
594 (tp->hw_status->status & SD_STATUS_UPDATED))
595 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
597 tw32(HOSTCC_MODE, tp->coalesce_mode |
598 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
601 static void tg3_enable_ints(struct tg3 *tp)
606 tw32(TG3PCI_MISC_HOST_CTRL,
607 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
608 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
609 (tp->last_tag << 24));
610 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
611 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
612 (tp->last_tag << 24));
616 static inline unsigned int tg3_has_work(struct tg3 *tp)
618 struct tg3_hw_status *sblk = tp->hw_status;
619 unsigned int work_exists = 0;
621 /* check for phy events */
622 if (!(tp->tg3_flags &
623 (TG3_FLAG_USE_LINKCHG_REG |
624 TG3_FLAG_POLL_SERDES))) {
625 if (sblk->status & SD_STATUS_LINK_CHG)
628 /* check for RX/TX work to do */
629 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
630 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
637 * similar to tg3_enable_ints, but it accurately determines whether there
638 * is new work pending and can return without flushing the PIO write
639 * which reenables interrupts
641 static void tg3_restart_ints(struct tg3 *tp)
643 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
647 /* When doing tagged status, this work check is unnecessary.
648 * The last_tag we write above tells the chip which piece of
649 * work we've completed.
651 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
653 tw32(HOSTCC_MODE, tp->coalesce_mode |
654 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
657 static inline void tg3_netif_stop(struct tg3 *tp)
659 tp->dev->trans_start = jiffies; /* prevent tx timeout */
660 napi_disable(&tp->napi);
661 netif_tx_disable(tp->dev);
664 static inline void tg3_netif_start(struct tg3 *tp)
666 netif_wake_queue(tp->dev);
667 /* NOTE: unconditional netif_wake_queue is only appropriate
668 * so long as all callers are assured to have free tx slots
669 * (such as after tg3_init_hw)
671 napi_enable(&tp->napi);
672 tp->hw_status->status |= SD_STATUS_UPDATED;
676 static void tg3_switch_clocks(struct tg3 *tp)
678 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
681 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
682 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
685 orig_clock_ctrl = clock_ctrl;
686 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
687 CLOCK_CTRL_CLKRUN_OENABLE |
689 tp->pci_clock_ctrl = clock_ctrl;
691 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
692 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
693 tw32_wait_f(TG3PCI_CLOCK_CTRL,
694 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
696 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
697 tw32_wait_f(TG3PCI_CLOCK_CTRL,
699 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
701 tw32_wait_f(TG3PCI_CLOCK_CTRL,
702 clock_ctrl | (CLOCK_CTRL_ALTCLK),
705 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
708 #define PHY_BUSY_LOOPS 5000
710 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
716 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
718 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
724 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
725 MI_COM_PHY_ADDR_MASK);
726 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
727 MI_COM_REG_ADDR_MASK);
728 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
730 tw32_f(MAC_MI_COM, frame_val);
732 loops = PHY_BUSY_LOOPS;
735 frame_val = tr32(MAC_MI_COM);
737 if ((frame_val & MI_COM_BUSY) == 0) {
739 frame_val = tr32(MAC_MI_COM);
747 *val = frame_val & MI_COM_DATA_MASK;
751 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
752 tw32_f(MAC_MI_MODE, tp->mi_mode);
759 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
765 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
766 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
769 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
771 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
775 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
776 MI_COM_PHY_ADDR_MASK);
777 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
778 MI_COM_REG_ADDR_MASK);
779 frame_val |= (val & MI_COM_DATA_MASK);
780 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
782 tw32_f(MAC_MI_COM, frame_val);
784 loops = PHY_BUSY_LOOPS;
787 frame_val = tr32(MAC_MI_COM);
788 if ((frame_val & MI_COM_BUSY) == 0) {
790 frame_val = tr32(MAC_MI_COM);
800 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
801 tw32_f(MAC_MI_MODE, tp->mi_mode);
808 static int tg3_bmcr_reset(struct tg3 *tp)
813 /* OK, reset it, and poll the BMCR_RESET bit until it
814 * clears or we time out.
816 phy_control = BMCR_RESET;
817 err = tg3_writephy(tp, MII_BMCR, phy_control);
823 err = tg3_readphy(tp, MII_BMCR, &phy_control);
827 if ((phy_control & BMCR_RESET) == 0) {
839 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
841 struct tg3 *tp = (struct tg3 *)bp->priv;
844 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
847 if (tg3_readphy(tp, reg, &val))
853 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
855 struct tg3 *tp = (struct tg3 *)bp->priv;
857 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
860 if (tg3_writephy(tp, reg, val))
866 static int tg3_mdio_reset(struct mii_bus *bp)
871 static void tg3_mdio_start(struct tg3 *tp)
873 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
874 mutex_lock(&tp->mdio_bus.mdio_lock);
875 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
876 mutex_unlock(&tp->mdio_bus.mdio_lock);
879 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
880 tw32_f(MAC_MI_MODE, tp->mi_mode);
884 static void tg3_mdio_stop(struct tg3 *tp)
886 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
887 mutex_lock(&tp->mdio_bus.mdio_lock);
888 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
889 mutex_unlock(&tp->mdio_bus.mdio_lock);
893 static int tg3_mdio_init(struct tg3 *tp)
897 struct mii_bus *mdio_bus = &tp->mdio_bus;
901 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
902 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
905 memset(mdio_bus, 0, sizeof(*mdio_bus));
907 mdio_bus->name = "tg3 mdio bus";
908 snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%x",
909 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
911 mdio_bus->dev = &tp->pdev->dev;
912 mdio_bus->read = &tg3_mdio_read;
913 mdio_bus->write = &tg3_mdio_write;
914 mdio_bus->reset = &tg3_mdio_reset;
915 mdio_bus->phy_mask = ~(1 << PHY_ADDR);
916 mdio_bus->irq = &tp->mdio_irq[0];
918 for (i = 0; i < PHY_MAX_ADDR; i++)
919 mdio_bus->irq[i] = PHY_POLL;
921 /* The bus registration will look for all the PHYs on the mdio bus.
922 * Unfortunately, it does not ensure the PHY is powered up before
923 * accessing the PHY ID registers. A chip reset is the
924 * quickest way to bring the device back to an operational state..
926 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
929 i = mdiobus_register(mdio_bus);
931 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
933 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
939 static void tg3_mdio_fini(struct tg3 *tp)
941 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
942 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
943 mdiobus_unregister(&tp->mdio_bus);
944 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
948 /* tp->lock is held. */
949 static void tg3_wait_for_event_ack(struct tg3 *tp)
953 /* Wait for up to 2.5 milliseconds */
954 for (i = 0; i < 250000; i++) {
955 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
961 /* tp->lock is held. */
962 static void tg3_ump_link_report(struct tg3 *tp)
967 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
968 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
971 tg3_wait_for_event_ack(tp);
973 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
975 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
978 if (!tg3_readphy(tp, MII_BMCR, ®))
980 if (!tg3_readphy(tp, MII_BMSR, ®))
981 val |= (reg & 0xffff);
982 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
985 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
987 if (!tg3_readphy(tp, MII_LPA, ®))
988 val |= (reg & 0xffff);
989 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
992 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
993 if (!tg3_readphy(tp, MII_CTRL1000, ®))
995 if (!tg3_readphy(tp, MII_STAT1000, ®))
996 val |= (reg & 0xffff);
998 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1000 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1004 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1006 val = tr32(GRC_RX_CPU_EVENT);
1007 val |= GRC_RX_CPU_DRIVER_EVENT;
1008 tw32_f(GRC_RX_CPU_EVENT, val);
1011 static void tg3_link_report(struct tg3 *tp)
1013 if (!netif_carrier_ok(tp->dev)) {
1014 if (netif_msg_link(tp))
1015 printk(KERN_INFO PFX "%s: Link is down.\n",
1017 tg3_ump_link_report(tp);
1018 } else if (netif_msg_link(tp)) {
1019 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1021 (tp->link_config.active_speed == SPEED_1000 ?
1023 (tp->link_config.active_speed == SPEED_100 ?
1025 (tp->link_config.active_duplex == DUPLEX_FULL ?
1028 printk(KERN_INFO PFX
1029 "%s: Flow control is %s for TX and %s for RX.\n",
1031 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1033 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1035 tg3_ump_link_report(tp);
1039 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1043 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1044 miireg = ADVERTISE_PAUSE_CAP;
1045 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1046 miireg = ADVERTISE_PAUSE_ASYM;
1047 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1048 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1055 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1059 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1060 miireg = ADVERTISE_1000XPAUSE;
1061 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1062 miireg = ADVERTISE_1000XPSE_ASYM;
1063 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1064 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1071 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1075 if (lcladv & ADVERTISE_PAUSE_CAP) {
1076 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1077 if (rmtadv & LPA_PAUSE_CAP)
1078 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1079 else if (rmtadv & LPA_PAUSE_ASYM)
1080 cap = TG3_FLOW_CTRL_RX;
1082 if (rmtadv & LPA_PAUSE_CAP)
1083 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1085 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1086 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1087 cap = TG3_FLOW_CTRL_TX;
1093 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1097 if (lcladv & ADVERTISE_1000XPAUSE) {
1098 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1099 if (rmtadv & LPA_1000XPAUSE)
1100 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1101 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1102 cap = TG3_FLOW_CTRL_RX;
1104 if (rmtadv & LPA_1000XPAUSE)
1105 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1107 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1108 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1109 cap = TG3_FLOW_CTRL_TX;
1115 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1118 u32 old_rx_mode = tp->rx_mode;
1119 u32 old_tx_mode = tp->tx_mode;
1121 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1122 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1123 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1124 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1126 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1128 flowctrl = tp->link_config.flowctrl;
1130 tp->link_config.active_flowctrl = flowctrl;
1132 if (flowctrl & TG3_FLOW_CTRL_RX)
1133 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1135 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1137 if (old_rx_mode != tp->rx_mode)
1138 tw32_f(MAC_RX_MODE, tp->rx_mode);
1140 if (flowctrl & TG3_FLOW_CTRL_TX)
1141 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1143 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1145 if (old_tx_mode != tp->tx_mode)
1146 tw32_f(MAC_TX_MODE, tp->tx_mode);
1149 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1151 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1152 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1155 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1159 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1160 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1163 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1166 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1167 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1168 ephy | MII_TG3_EPHY_SHADOW_EN);
1169 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1171 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1173 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1174 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1176 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1179 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1180 MII_TG3_AUXCTL_SHDWSEL_MISC;
1181 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1182 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1184 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1186 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1187 phy |= MII_TG3_AUXCTL_MISC_WREN;
1188 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1193 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1197 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1200 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1201 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1202 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1203 (val | (1 << 15) | (1 << 4)));
1206 static void tg3_phy_apply_otp(struct tg3 *tp)
1215 /* Enable SM_DSP clock and tx 6dB coding. */
1216 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1217 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1218 MII_TG3_AUXCTL_ACTL_TX_6DB;
1219 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1221 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1222 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1223 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1225 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1226 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1227 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1229 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1230 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1231 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1233 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1234 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1236 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1237 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1239 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1240 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1241 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1243 /* Turn off SM_DSP clock. */
1244 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1245 MII_TG3_AUXCTL_ACTL_TX_6DB;
1246 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1249 static int tg3_wait_macro_done(struct tg3 *tp)
1256 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1257 if ((tmp32 & 0x1000) == 0)
1267 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1269 static const u32 test_pat[4][6] = {
1270 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1271 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1272 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1273 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1277 for (chan = 0; chan < 4; chan++) {
1280 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1281 (chan * 0x2000) | 0x0200);
1282 tg3_writephy(tp, 0x16, 0x0002);
1284 for (i = 0; i < 6; i++)
1285 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1288 tg3_writephy(tp, 0x16, 0x0202);
1289 if (tg3_wait_macro_done(tp)) {
1294 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1295 (chan * 0x2000) | 0x0200);
1296 tg3_writephy(tp, 0x16, 0x0082);
1297 if (tg3_wait_macro_done(tp)) {
1302 tg3_writephy(tp, 0x16, 0x0802);
1303 if (tg3_wait_macro_done(tp)) {
1308 for (i = 0; i < 6; i += 2) {
1311 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1312 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1313 tg3_wait_macro_done(tp)) {
1319 if (low != test_pat[chan][i] ||
1320 high != test_pat[chan][i+1]) {
1321 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1322 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1323 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1333 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1337 for (chan = 0; chan < 4; chan++) {
1340 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1341 (chan * 0x2000) | 0x0200);
1342 tg3_writephy(tp, 0x16, 0x0002);
1343 for (i = 0; i < 6; i++)
1344 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1345 tg3_writephy(tp, 0x16, 0x0202);
1346 if (tg3_wait_macro_done(tp))
1353 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1355 u32 reg32, phy9_orig;
1356 int retries, do_phy_reset, err;
1362 err = tg3_bmcr_reset(tp);
1368 /* Disable transmitter and interrupt. */
1369 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1373 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1375 /* Set full-duplex, 1000 mbps. */
1376 tg3_writephy(tp, MII_BMCR,
1377 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1379 /* Set to master mode. */
1380 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1383 tg3_writephy(tp, MII_TG3_CTRL,
1384 (MII_TG3_CTRL_AS_MASTER |
1385 MII_TG3_CTRL_ENABLE_AS_MASTER));
1387 /* Enable SM_DSP_CLOCK and 6dB. */
1388 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1390 /* Block the PHY control access. */
1391 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1392 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1394 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1397 } while (--retries);
1399 err = tg3_phy_reset_chanpat(tp);
1403 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1404 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1406 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1407 tg3_writephy(tp, 0x16, 0x0000);
1409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1410 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1411 /* Set Extended packet length bit for jumbo frames */
1412 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1415 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1418 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1420 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1422 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1429 /* This will reset the tigon3 PHY if there is no valid
1430 * link unless the FORCE argument is non-zero.
1432 static int tg3_phy_reset(struct tg3 *tp)
1438 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1441 val = tr32(GRC_MISC_CFG);
1442 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1445 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1446 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1450 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1451 netif_carrier_off(tp->dev);
1452 tg3_link_report(tp);
1455 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1457 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1458 err = tg3_phy_reset_5703_4_5(tp);
1465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1466 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1467 cpmuctrl = tr32(TG3_CPMU_CTRL);
1468 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1470 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1473 err = tg3_bmcr_reset(tp);
1477 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1480 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1481 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1483 tw32(TG3_CPMU_CTRL, cpmuctrl);
1486 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1489 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1490 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1491 CPMU_LSPD_1000MB_MACCLK_12_5) {
1492 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1494 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1497 /* Disable GPHY autopowerdown. */
1498 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1499 MII_TG3_MISC_SHDW_WREN |
1500 MII_TG3_MISC_SHDW_APD_SEL |
1501 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1504 tg3_phy_apply_otp(tp);
1507 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1508 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1509 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1510 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1511 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1512 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1513 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1515 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1516 tg3_writephy(tp, 0x1c, 0x8d68);
1517 tg3_writephy(tp, 0x1c, 0x8d68);
1519 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1520 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1521 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1522 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1523 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1524 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1525 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1526 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1527 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1529 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1530 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1531 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1532 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1533 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1534 tg3_writephy(tp, MII_TG3_TEST1,
1535 MII_TG3_TEST1_TRIM_EN | 0x4);
1537 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1538 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1540 /* Set Extended packet length bit (bit 14) on all chips that */
1541 /* support jumbo frames */
1542 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1543 /* Cannot do read-modify-write on 5401 */
1544 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1545 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1548 /* Set bit 14 with read-modify-write to preserve other bits */
1549 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1550 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1551 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1554 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1555 * jumbo frames transmission.
1557 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1560 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1561 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1562 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1565 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1566 /* adjust output voltage */
1567 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1570 tg3_phy_toggle_automdix(tp, 1);
1571 tg3_phy_set_wirespeed(tp);
1575 static void tg3_frob_aux_power(struct tg3 *tp)
1577 struct tg3 *tp_peer = tp;
1579 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1582 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1583 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1584 struct net_device *dev_peer;
1586 dev_peer = pci_get_drvdata(tp->pdev_peer);
1587 /* remove_one() may have been run on the peer. */
1591 tp_peer = netdev_priv(dev_peer);
1594 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1595 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1596 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1597 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1600 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1601 (GRC_LCLCTRL_GPIO_OE0 |
1602 GRC_LCLCTRL_GPIO_OE1 |
1603 GRC_LCLCTRL_GPIO_OE2 |
1604 GRC_LCLCTRL_GPIO_OUTPUT0 |
1605 GRC_LCLCTRL_GPIO_OUTPUT1),
1609 u32 grc_local_ctrl = 0;
1611 if (tp_peer != tp &&
1612 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1615 /* Workaround to prevent overdrawing Amps. */
1616 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1618 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1619 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1620 grc_local_ctrl, 100);
1623 /* On 5753 and variants, GPIO2 cannot be used. */
1624 no_gpio2 = tp->nic_sram_data_cfg &
1625 NIC_SRAM_DATA_CFG_NO_GPIO2;
1627 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1628 GRC_LCLCTRL_GPIO_OE1 |
1629 GRC_LCLCTRL_GPIO_OE2 |
1630 GRC_LCLCTRL_GPIO_OUTPUT1 |
1631 GRC_LCLCTRL_GPIO_OUTPUT2;
1633 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1634 GRC_LCLCTRL_GPIO_OUTPUT2);
1636 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1637 grc_local_ctrl, 100);
1639 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1641 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1642 grc_local_ctrl, 100);
1645 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1646 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1647 grc_local_ctrl, 100);
1651 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1652 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1653 if (tp_peer != tp &&
1654 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1657 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1658 (GRC_LCLCTRL_GPIO_OE1 |
1659 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1661 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1662 GRC_LCLCTRL_GPIO_OE1, 100);
1664 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1665 (GRC_LCLCTRL_GPIO_OE1 |
1666 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1671 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1673 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1675 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1676 if (speed != SPEED_10)
1678 } else if (speed == SPEED_10)
1684 static int tg3_setup_phy(struct tg3 *, int);
1686 #define RESET_KIND_SHUTDOWN 0
1687 #define RESET_KIND_INIT 1
1688 #define RESET_KIND_SUSPEND 2
1690 static void tg3_write_sig_post_reset(struct tg3 *, int);
1691 static int tg3_halt_cpu(struct tg3 *, u32);
1692 static int tg3_nvram_lock(struct tg3 *);
1693 static void tg3_nvram_unlock(struct tg3 *);
1695 static void tg3_power_down_phy(struct tg3 *tp)
1699 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1701 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1702 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1705 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1706 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1707 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1714 val = tr32(GRC_MISC_CFG);
1715 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1718 } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1719 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1720 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1721 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1724 /* The PHY should not be powered down on some chips because
1727 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1728 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1729 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1730 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1733 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1734 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1735 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1736 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1737 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1740 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1743 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1746 u16 power_control, power_caps;
1747 int pm = tp->pm_cap;
1749 /* Make sure register accesses (indirect or otherwise)
1750 * will function correctly.
1752 pci_write_config_dword(tp->pdev,
1753 TG3PCI_MISC_HOST_CTRL,
1754 tp->misc_host_ctrl);
1756 pci_read_config_word(tp->pdev,
1759 power_control |= PCI_PM_CTRL_PME_STATUS;
1760 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1764 pci_write_config_word(tp->pdev,
1767 udelay(100); /* Delay after power state change */
1769 /* Switch out of Vaux if it is a NIC */
1770 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1771 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1788 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1790 tp->dev->name, state);
1794 power_control |= PCI_PM_CTRL_PME_ENABLE;
1796 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1797 tw32(TG3PCI_MISC_HOST_CTRL,
1798 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1800 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
1801 tp->link_config.phy_is_low_power = 1;
1803 if (tp->link_config.phy_is_low_power == 0) {
1804 tp->link_config.phy_is_low_power = 1;
1805 tp->link_config.orig_speed = tp->link_config.speed;
1806 tp->link_config.orig_duplex = tp->link_config.duplex;
1807 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1810 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1811 tp->link_config.speed = SPEED_10;
1812 tp->link_config.duplex = DUPLEX_HALF;
1813 tp->link_config.autoneg = AUTONEG_ENABLE;
1814 tg3_setup_phy(tp, 0);
1818 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1821 val = tr32(GRC_VCPU_EXT_CTRL);
1822 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1823 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1827 for (i = 0; i < 200; i++) {
1828 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1829 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1834 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1835 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1836 WOL_DRV_STATE_SHUTDOWN |
1840 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1842 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1845 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1846 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
1847 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1851 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1852 mac_mode = MAC_MODE_PORT_MODE_GMII;
1854 mac_mode = MAC_MODE_PORT_MODE_MII;
1856 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1857 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1859 u32 speed = (tp->tg3_flags &
1860 TG3_FLAG_WOL_SPEED_100MB) ?
1861 SPEED_100 : SPEED_10;
1862 if (tg3_5700_link_polarity(tp, speed))
1863 mac_mode |= MAC_MODE_LINK_POLARITY;
1865 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1868 mac_mode = MAC_MODE_PORT_MODE_TBI;
1871 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1872 tw32(MAC_LED_CTRL, tp->led_ctrl);
1874 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1875 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1876 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1878 tw32_f(MAC_MODE, mac_mode);
1881 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1885 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1886 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1887 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1890 base_val = tp->pci_clock_ctrl;
1891 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1892 CLOCK_CTRL_TXCLK_DISABLE);
1894 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1895 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1896 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1897 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1898 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1900 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1901 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1902 u32 newbits1, newbits2;
1904 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1905 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1906 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1907 CLOCK_CTRL_TXCLK_DISABLE |
1909 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1910 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1911 newbits1 = CLOCK_CTRL_625_CORE;
1912 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1914 newbits1 = CLOCK_CTRL_ALTCLK;
1915 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1918 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1921 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1924 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1927 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1928 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1929 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1930 CLOCK_CTRL_TXCLK_DISABLE |
1931 CLOCK_CTRL_44MHZ_CORE);
1933 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1936 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1937 tp->pci_clock_ctrl | newbits3, 40);
1941 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1942 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1943 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1944 tg3_power_down_phy(tp);
1946 tg3_frob_aux_power(tp);
1948 /* Workaround for unstable PLL clock */
1949 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1950 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1951 u32 val = tr32(0x7d00);
1953 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1955 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1958 err = tg3_nvram_lock(tp);
1959 tg3_halt_cpu(tp, RX_CPU_BASE);
1961 tg3_nvram_unlock(tp);
1965 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1967 /* Finally, set the new power state. */
1968 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1969 udelay(100); /* Delay after power state change */
1974 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1976 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1977 case MII_TG3_AUX_STAT_10HALF:
1979 *duplex = DUPLEX_HALF;
1982 case MII_TG3_AUX_STAT_10FULL:
1984 *duplex = DUPLEX_FULL;
1987 case MII_TG3_AUX_STAT_100HALF:
1989 *duplex = DUPLEX_HALF;
1992 case MII_TG3_AUX_STAT_100FULL:
1994 *duplex = DUPLEX_FULL;
1997 case MII_TG3_AUX_STAT_1000HALF:
1998 *speed = SPEED_1000;
1999 *duplex = DUPLEX_HALF;
2002 case MII_TG3_AUX_STAT_1000FULL:
2003 *speed = SPEED_1000;
2004 *duplex = DUPLEX_FULL;
2008 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2009 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2011 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2015 *speed = SPEED_INVALID;
2016 *duplex = DUPLEX_INVALID;
2021 static void tg3_phy_copper_begin(struct tg3 *tp)
2026 if (tp->link_config.phy_is_low_power) {
2027 /* Entering low power mode. Disable gigabit and
2028 * 100baseT advertisements.
2030 tg3_writephy(tp, MII_TG3_CTRL, 0);
2032 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2033 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2034 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2035 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2037 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2038 } else if (tp->link_config.speed == SPEED_INVALID) {
2039 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2040 tp->link_config.advertising &=
2041 ~(ADVERTISED_1000baseT_Half |
2042 ADVERTISED_1000baseT_Full);
2044 new_adv = ADVERTISE_CSMA;
2045 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2046 new_adv |= ADVERTISE_10HALF;
2047 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2048 new_adv |= ADVERTISE_10FULL;
2049 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2050 new_adv |= ADVERTISE_100HALF;
2051 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2052 new_adv |= ADVERTISE_100FULL;
2054 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2056 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2058 if (tp->link_config.advertising &
2059 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2061 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2062 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2063 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2064 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2065 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2066 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2067 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2068 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2069 MII_TG3_CTRL_ENABLE_AS_MASTER);
2070 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2072 tg3_writephy(tp, MII_TG3_CTRL, 0);
2075 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2076 new_adv |= ADVERTISE_CSMA;
2078 /* Asking for a specific link mode. */
2079 if (tp->link_config.speed == SPEED_1000) {
2080 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2082 if (tp->link_config.duplex == DUPLEX_FULL)
2083 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2085 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2086 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2087 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2088 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2089 MII_TG3_CTRL_ENABLE_AS_MASTER);
2091 if (tp->link_config.speed == SPEED_100) {
2092 if (tp->link_config.duplex == DUPLEX_FULL)
2093 new_adv |= ADVERTISE_100FULL;
2095 new_adv |= ADVERTISE_100HALF;
2097 if (tp->link_config.duplex == DUPLEX_FULL)
2098 new_adv |= ADVERTISE_10FULL;
2100 new_adv |= ADVERTISE_10HALF;
2102 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2107 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2110 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2111 tp->link_config.speed != SPEED_INVALID) {
2112 u32 bmcr, orig_bmcr;
2114 tp->link_config.active_speed = tp->link_config.speed;
2115 tp->link_config.active_duplex = tp->link_config.duplex;
2118 switch (tp->link_config.speed) {
2124 bmcr |= BMCR_SPEED100;
2128 bmcr |= TG3_BMCR_SPEED1000;
2132 if (tp->link_config.duplex == DUPLEX_FULL)
2133 bmcr |= BMCR_FULLDPLX;
2135 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2136 (bmcr != orig_bmcr)) {
2137 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2138 for (i = 0; i < 1500; i++) {
2142 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2143 tg3_readphy(tp, MII_BMSR, &tmp))
2145 if (!(tmp & BMSR_LSTATUS)) {
2150 tg3_writephy(tp, MII_BMCR, bmcr);
2154 tg3_writephy(tp, MII_BMCR,
2155 BMCR_ANENABLE | BMCR_ANRESTART);
2159 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2163 /* Turn off tap power management. */
2164 /* Set Extended packet length bit */
2165 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2167 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2168 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2170 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2171 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2173 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2174 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2176 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2177 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2179 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2180 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2187 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2189 u32 adv_reg, all_mask = 0;
2191 if (mask & ADVERTISED_10baseT_Half)
2192 all_mask |= ADVERTISE_10HALF;
2193 if (mask & ADVERTISED_10baseT_Full)
2194 all_mask |= ADVERTISE_10FULL;
2195 if (mask & ADVERTISED_100baseT_Half)
2196 all_mask |= ADVERTISE_100HALF;
2197 if (mask & ADVERTISED_100baseT_Full)
2198 all_mask |= ADVERTISE_100FULL;
2200 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2203 if ((adv_reg & all_mask) != all_mask)
2205 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2209 if (mask & ADVERTISED_1000baseT_Half)
2210 all_mask |= ADVERTISE_1000HALF;
2211 if (mask & ADVERTISED_1000baseT_Full)
2212 all_mask |= ADVERTISE_1000FULL;
2214 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2217 if ((tg3_ctrl & all_mask) != all_mask)
2223 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2227 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2230 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2231 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2233 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2234 if (curadv != reqadv)
2237 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2238 tg3_readphy(tp, MII_LPA, rmtadv);
2240 /* Reprogram the advertisement register, even if it
2241 * does not affect the current link. If the link
2242 * gets renegotiated in the future, we can save an
2243 * additional renegotiation cycle by advertising
2244 * it correctly in the first place.
2246 if (curadv != reqadv) {
2247 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2248 ADVERTISE_PAUSE_ASYM);
2249 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2256 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2258 int current_link_up;
2260 u32 lcl_adv, rmt_adv;
2268 (MAC_STATUS_SYNC_CHANGED |
2269 MAC_STATUS_CFG_CHANGED |
2270 MAC_STATUS_MI_COMPLETION |
2271 MAC_STATUS_LNKSTATE_CHANGED));
2274 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2276 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2280 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2282 /* Some third-party PHYs need to be reset on link going
2285 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2287 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2288 netif_carrier_ok(tp->dev)) {
2289 tg3_readphy(tp, MII_BMSR, &bmsr);
2290 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2291 !(bmsr & BMSR_LSTATUS))
2297 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2298 tg3_readphy(tp, MII_BMSR, &bmsr);
2299 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2300 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2303 if (!(bmsr & BMSR_LSTATUS)) {
2304 err = tg3_init_5401phy_dsp(tp);
2308 tg3_readphy(tp, MII_BMSR, &bmsr);
2309 for (i = 0; i < 1000; i++) {
2311 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2312 (bmsr & BMSR_LSTATUS)) {
2318 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2319 !(bmsr & BMSR_LSTATUS) &&
2320 tp->link_config.active_speed == SPEED_1000) {
2321 err = tg3_phy_reset(tp);
2323 err = tg3_init_5401phy_dsp(tp);
2328 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2329 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2330 /* 5701 {A0,B0} CRC bug workaround */
2331 tg3_writephy(tp, 0x15, 0x0a75);
2332 tg3_writephy(tp, 0x1c, 0x8c68);
2333 tg3_writephy(tp, 0x1c, 0x8d68);
2334 tg3_writephy(tp, 0x1c, 0x8c68);
2337 /* Clear pending interrupts... */
2338 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2339 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2341 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2342 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2343 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2344 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2346 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2347 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2348 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2349 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2350 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2352 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2355 current_link_up = 0;
2356 current_speed = SPEED_INVALID;
2357 current_duplex = DUPLEX_INVALID;
2359 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2362 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2363 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2364 if (!(val & (1 << 10))) {
2366 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2372 for (i = 0; i < 100; i++) {
2373 tg3_readphy(tp, MII_BMSR, &bmsr);
2374 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2375 (bmsr & BMSR_LSTATUS))
2380 if (bmsr & BMSR_LSTATUS) {
2383 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2384 for (i = 0; i < 2000; i++) {
2386 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2391 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2396 for (i = 0; i < 200; i++) {
2397 tg3_readphy(tp, MII_BMCR, &bmcr);
2398 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2400 if (bmcr && bmcr != 0x7fff)
2408 tp->link_config.active_speed = current_speed;
2409 tp->link_config.active_duplex = current_duplex;
2411 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2412 if ((bmcr & BMCR_ANENABLE) &&
2413 tg3_copper_is_advertising_all(tp,
2414 tp->link_config.advertising)) {
2415 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2417 current_link_up = 1;
2420 if (!(bmcr & BMCR_ANENABLE) &&
2421 tp->link_config.speed == current_speed &&
2422 tp->link_config.duplex == current_duplex &&
2423 tp->link_config.flowctrl ==
2424 tp->link_config.active_flowctrl) {
2425 current_link_up = 1;
2429 if (current_link_up == 1 &&
2430 tp->link_config.active_duplex == DUPLEX_FULL)
2431 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2435 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2438 tg3_phy_copper_begin(tp);
2440 tg3_readphy(tp, MII_BMSR, &tmp);
2441 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2442 (tmp & BMSR_LSTATUS))
2443 current_link_up = 1;
2446 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2447 if (current_link_up == 1) {
2448 if (tp->link_config.active_speed == SPEED_100 ||
2449 tp->link_config.active_speed == SPEED_10)
2450 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2452 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2454 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2456 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2457 if (tp->link_config.active_duplex == DUPLEX_HALF)
2458 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2461 if (current_link_up == 1 &&
2462 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2463 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2465 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2468 /* ??? Without this setting Netgear GA302T PHY does not
2469 * ??? send/receive packets...
2471 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2472 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2473 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2474 tw32_f(MAC_MI_MODE, tp->mi_mode);
2478 tw32_f(MAC_MODE, tp->mac_mode);
2481 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2482 /* Polled via timer. */
2483 tw32_f(MAC_EVENT, 0);
2485 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2490 current_link_up == 1 &&
2491 tp->link_config.active_speed == SPEED_1000 &&
2492 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2493 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2496 (MAC_STATUS_SYNC_CHANGED |
2497 MAC_STATUS_CFG_CHANGED));
2500 NIC_SRAM_FIRMWARE_MBOX,
2501 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2504 if (current_link_up != netif_carrier_ok(tp->dev)) {
2505 if (current_link_up)
2506 netif_carrier_on(tp->dev);
2508 netif_carrier_off(tp->dev);
2509 tg3_link_report(tp);
2515 struct tg3_fiber_aneginfo {
2517 #define ANEG_STATE_UNKNOWN 0
2518 #define ANEG_STATE_AN_ENABLE 1
2519 #define ANEG_STATE_RESTART_INIT 2
2520 #define ANEG_STATE_RESTART 3
2521 #define ANEG_STATE_DISABLE_LINK_OK 4
2522 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2523 #define ANEG_STATE_ABILITY_DETECT 6
2524 #define ANEG_STATE_ACK_DETECT_INIT 7
2525 #define ANEG_STATE_ACK_DETECT 8
2526 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2527 #define ANEG_STATE_COMPLETE_ACK 10
2528 #define ANEG_STATE_IDLE_DETECT_INIT 11
2529 #define ANEG_STATE_IDLE_DETECT 12
2530 #define ANEG_STATE_LINK_OK 13
2531 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2532 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2535 #define MR_AN_ENABLE 0x00000001
2536 #define MR_RESTART_AN 0x00000002
2537 #define MR_AN_COMPLETE 0x00000004
2538 #define MR_PAGE_RX 0x00000008
2539 #define MR_NP_LOADED 0x00000010
2540 #define MR_TOGGLE_TX 0x00000020
2541 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2542 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2543 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2544 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2545 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2546 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2547 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2548 #define MR_TOGGLE_RX 0x00002000
2549 #define MR_NP_RX 0x00004000
2551 #define MR_LINK_OK 0x80000000
2553 unsigned long link_time, cur_time;
2555 u32 ability_match_cfg;
2556 int ability_match_count;
2558 char ability_match, idle_match, ack_match;
2560 u32 txconfig, rxconfig;
2561 #define ANEG_CFG_NP 0x00000080
2562 #define ANEG_CFG_ACK 0x00000040
2563 #define ANEG_CFG_RF2 0x00000020
2564 #define ANEG_CFG_RF1 0x00000010
2565 #define ANEG_CFG_PS2 0x00000001
2566 #define ANEG_CFG_PS1 0x00008000
2567 #define ANEG_CFG_HD 0x00004000
2568 #define ANEG_CFG_FD 0x00002000
2569 #define ANEG_CFG_INVAL 0x00001f06
2574 #define ANEG_TIMER_ENAB 2
2575 #define ANEG_FAILED -1
2577 #define ANEG_STATE_SETTLE_TIME 10000
2579 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2580 struct tg3_fiber_aneginfo *ap)
2583 unsigned long delta;
2587 if (ap->state == ANEG_STATE_UNKNOWN) {
2591 ap->ability_match_cfg = 0;
2592 ap->ability_match_count = 0;
2593 ap->ability_match = 0;
2599 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2600 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2602 if (rx_cfg_reg != ap->ability_match_cfg) {
2603 ap->ability_match_cfg = rx_cfg_reg;
2604 ap->ability_match = 0;
2605 ap->ability_match_count = 0;
2607 if (++ap->ability_match_count > 1) {
2608 ap->ability_match = 1;
2609 ap->ability_match_cfg = rx_cfg_reg;
2612 if (rx_cfg_reg & ANEG_CFG_ACK)
2620 ap->ability_match_cfg = 0;
2621 ap->ability_match_count = 0;
2622 ap->ability_match = 0;
2628 ap->rxconfig = rx_cfg_reg;
2632 case ANEG_STATE_UNKNOWN:
2633 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2634 ap->state = ANEG_STATE_AN_ENABLE;
2637 case ANEG_STATE_AN_ENABLE:
2638 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2639 if (ap->flags & MR_AN_ENABLE) {
2642 ap->ability_match_cfg = 0;
2643 ap->ability_match_count = 0;
2644 ap->ability_match = 0;
2648 ap->state = ANEG_STATE_RESTART_INIT;
2650 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2654 case ANEG_STATE_RESTART_INIT:
2655 ap->link_time = ap->cur_time;
2656 ap->flags &= ~(MR_NP_LOADED);
2658 tw32(MAC_TX_AUTO_NEG, 0);
2659 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2660 tw32_f(MAC_MODE, tp->mac_mode);
2663 ret = ANEG_TIMER_ENAB;
2664 ap->state = ANEG_STATE_RESTART;
2667 case ANEG_STATE_RESTART:
2668 delta = ap->cur_time - ap->link_time;
2669 if (delta > ANEG_STATE_SETTLE_TIME) {
2670 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2672 ret = ANEG_TIMER_ENAB;
2676 case ANEG_STATE_DISABLE_LINK_OK:
2680 case ANEG_STATE_ABILITY_DETECT_INIT:
2681 ap->flags &= ~(MR_TOGGLE_TX);
2682 ap->txconfig = ANEG_CFG_FD;
2683 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2684 if (flowctrl & ADVERTISE_1000XPAUSE)
2685 ap->txconfig |= ANEG_CFG_PS1;
2686 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2687 ap->txconfig |= ANEG_CFG_PS2;
2688 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2689 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2690 tw32_f(MAC_MODE, tp->mac_mode);
2693 ap->state = ANEG_STATE_ABILITY_DETECT;
2696 case ANEG_STATE_ABILITY_DETECT:
2697 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2698 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2702 case ANEG_STATE_ACK_DETECT_INIT:
2703 ap->txconfig |= ANEG_CFG_ACK;
2704 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2705 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2706 tw32_f(MAC_MODE, tp->mac_mode);
2709 ap->state = ANEG_STATE_ACK_DETECT;
2712 case ANEG_STATE_ACK_DETECT:
2713 if (ap->ack_match != 0) {
2714 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2715 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2716 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2718 ap->state = ANEG_STATE_AN_ENABLE;
2720 } else if (ap->ability_match != 0 &&
2721 ap->rxconfig == 0) {
2722 ap->state = ANEG_STATE_AN_ENABLE;
2726 case ANEG_STATE_COMPLETE_ACK_INIT:
2727 if (ap->rxconfig & ANEG_CFG_INVAL) {
2731 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2732 MR_LP_ADV_HALF_DUPLEX |
2733 MR_LP_ADV_SYM_PAUSE |
2734 MR_LP_ADV_ASYM_PAUSE |
2735 MR_LP_ADV_REMOTE_FAULT1 |
2736 MR_LP_ADV_REMOTE_FAULT2 |
2737 MR_LP_ADV_NEXT_PAGE |
2740 if (ap->rxconfig & ANEG_CFG_FD)
2741 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2742 if (ap->rxconfig & ANEG_CFG_HD)
2743 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2744 if (ap->rxconfig & ANEG_CFG_PS1)
2745 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2746 if (ap->rxconfig & ANEG_CFG_PS2)
2747 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2748 if (ap->rxconfig & ANEG_CFG_RF1)
2749 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2750 if (ap->rxconfig & ANEG_CFG_RF2)
2751 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2752 if (ap->rxconfig & ANEG_CFG_NP)
2753 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2755 ap->link_time = ap->cur_time;
2757 ap->flags ^= (MR_TOGGLE_TX);
2758 if (ap->rxconfig & 0x0008)
2759 ap->flags |= MR_TOGGLE_RX;
2760 if (ap->rxconfig & ANEG_CFG_NP)
2761 ap->flags |= MR_NP_RX;
2762 ap->flags |= MR_PAGE_RX;
2764 ap->state = ANEG_STATE_COMPLETE_ACK;
2765 ret = ANEG_TIMER_ENAB;
2768 case ANEG_STATE_COMPLETE_ACK:
2769 if (ap->ability_match != 0 &&
2770 ap->rxconfig == 0) {
2771 ap->state = ANEG_STATE_AN_ENABLE;
2774 delta = ap->cur_time - ap->link_time;
2775 if (delta > ANEG_STATE_SETTLE_TIME) {
2776 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2777 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2779 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2780 !(ap->flags & MR_NP_RX)) {
2781 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2789 case ANEG_STATE_IDLE_DETECT_INIT:
2790 ap->link_time = ap->cur_time;
2791 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2792 tw32_f(MAC_MODE, tp->mac_mode);
2795 ap->state = ANEG_STATE_IDLE_DETECT;
2796 ret = ANEG_TIMER_ENAB;
2799 case ANEG_STATE_IDLE_DETECT:
2800 if (ap->ability_match != 0 &&
2801 ap->rxconfig == 0) {
2802 ap->state = ANEG_STATE_AN_ENABLE;
2805 delta = ap->cur_time - ap->link_time;
2806 if (delta > ANEG_STATE_SETTLE_TIME) {
2807 /* XXX another gem from the Broadcom driver :( */
2808 ap->state = ANEG_STATE_LINK_OK;
2812 case ANEG_STATE_LINK_OK:
2813 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2817 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2818 /* ??? unimplemented */
2821 case ANEG_STATE_NEXT_PAGE_WAIT:
2822 /* ??? unimplemented */
2833 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
2836 struct tg3_fiber_aneginfo aninfo;
2837 int status = ANEG_FAILED;
2841 tw32_f(MAC_TX_AUTO_NEG, 0);
2843 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2844 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2847 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2850 memset(&aninfo, 0, sizeof(aninfo));
2851 aninfo.flags |= MR_AN_ENABLE;
2852 aninfo.state = ANEG_STATE_UNKNOWN;
2853 aninfo.cur_time = 0;
2855 while (++tick < 195000) {
2856 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2857 if (status == ANEG_DONE || status == ANEG_FAILED)
2863 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2864 tw32_f(MAC_MODE, tp->mac_mode);
2867 *txflags = aninfo.txconfig;
2868 *rxflags = aninfo.flags;
2870 if (status == ANEG_DONE &&
2871 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2872 MR_LP_ADV_FULL_DUPLEX)))
2878 static void tg3_init_bcm8002(struct tg3 *tp)
2880 u32 mac_status = tr32(MAC_STATUS);
2883 /* Reset when initting first time or we have a link. */
2884 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2885 !(mac_status & MAC_STATUS_PCS_SYNCED))
2888 /* Set PLL lock range. */
2889 tg3_writephy(tp, 0x16, 0x8007);
2892 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2894 /* Wait for reset to complete. */
2895 /* XXX schedule_timeout() ... */
2896 for (i = 0; i < 500; i++)
2899 /* Config mode; select PMA/Ch 1 regs. */
2900 tg3_writephy(tp, 0x10, 0x8411);
2902 /* Enable auto-lock and comdet, select txclk for tx. */
2903 tg3_writephy(tp, 0x11, 0x0a10);
2905 tg3_writephy(tp, 0x18, 0x00a0);
2906 tg3_writephy(tp, 0x16, 0x41ff);
2908 /* Assert and deassert POR. */
2909 tg3_writephy(tp, 0x13, 0x0400);
2911 tg3_writephy(tp, 0x13, 0x0000);
2913 tg3_writephy(tp, 0x11, 0x0a50);
2915 tg3_writephy(tp, 0x11, 0x0a10);
2917 /* Wait for signal to stabilize */
2918 /* XXX schedule_timeout() ... */
2919 for (i = 0; i < 15000; i++)
2922 /* Deselect the channel register so we can read the PHYID
2925 tg3_writephy(tp, 0x10, 0x8011);
2928 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2931 u32 sg_dig_ctrl, sg_dig_status;
2932 u32 serdes_cfg, expected_sg_dig_ctrl;
2933 int workaround, port_a;
2934 int current_link_up;
2937 expected_sg_dig_ctrl = 0;
2940 current_link_up = 0;
2942 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2943 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2945 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2948 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2949 /* preserve bits 20-23 for voltage regulator */
2950 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2953 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2955 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2956 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
2958 u32 val = serdes_cfg;
2964 tw32_f(MAC_SERDES_CFG, val);
2967 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
2969 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2970 tg3_setup_flow_control(tp, 0, 0);
2971 current_link_up = 1;
2976 /* Want auto-negotiation. */
2977 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
2979 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2980 if (flowctrl & ADVERTISE_1000XPAUSE)
2981 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2982 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2983 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
2985 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2986 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2987 tp->serdes_counter &&
2988 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2989 MAC_STATUS_RCVD_CFG)) ==
2990 MAC_STATUS_PCS_SYNCED)) {
2991 tp->serdes_counter--;
2992 current_link_up = 1;
2997 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2998 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3000 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3002 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3003 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3004 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3005 MAC_STATUS_SIGNAL_DET)) {
3006 sg_dig_status = tr32(SG_DIG_STATUS);
3007 mac_status = tr32(MAC_STATUS);
3009 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3010 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3011 u32 local_adv = 0, remote_adv = 0;
3013 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3014 local_adv |= ADVERTISE_1000XPAUSE;
3015 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3016 local_adv |= ADVERTISE_1000XPSE_ASYM;
3018 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3019 remote_adv |= LPA_1000XPAUSE;
3020 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3021 remote_adv |= LPA_1000XPAUSE_ASYM;
3023 tg3_setup_flow_control(tp, local_adv, remote_adv);
3024 current_link_up = 1;
3025 tp->serdes_counter = 0;
3026 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3027 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3028 if (tp->serdes_counter)
3029 tp->serdes_counter--;
3032 u32 val = serdes_cfg;
3039 tw32_f(MAC_SERDES_CFG, val);
3042 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3045 /* Link parallel detection - link is up */
3046 /* only if we have PCS_SYNC and not */
3047 /* receiving config code words */
3048 mac_status = tr32(MAC_STATUS);
3049 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3050 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3051 tg3_setup_flow_control(tp, 0, 0);
3052 current_link_up = 1;
3054 TG3_FLG2_PARALLEL_DETECT;
3055 tp->serdes_counter =
3056 SERDES_PARALLEL_DET_TIMEOUT;
3058 goto restart_autoneg;
3062 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3063 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3067 return current_link_up;
3070 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3072 int current_link_up = 0;
3074 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3077 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3078 u32 txflags, rxflags;
3081 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3082 u32 local_adv = 0, remote_adv = 0;
3084 if (txflags & ANEG_CFG_PS1)
3085 local_adv |= ADVERTISE_1000XPAUSE;
3086 if (txflags & ANEG_CFG_PS2)
3087 local_adv |= ADVERTISE_1000XPSE_ASYM;
3089 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3090 remote_adv |= LPA_1000XPAUSE;
3091 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3092 remote_adv |= LPA_1000XPAUSE_ASYM;
3094 tg3_setup_flow_control(tp, local_adv, remote_adv);
3096 current_link_up = 1;
3098 for (i = 0; i < 30; i++) {
3101 (MAC_STATUS_SYNC_CHANGED |
3102 MAC_STATUS_CFG_CHANGED));
3104 if ((tr32(MAC_STATUS) &
3105 (MAC_STATUS_SYNC_CHANGED |
3106 MAC_STATUS_CFG_CHANGED)) == 0)
3110 mac_status = tr32(MAC_STATUS);
3111 if (current_link_up == 0 &&
3112 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3113 !(mac_status & MAC_STATUS_RCVD_CFG))
3114 current_link_up = 1;
3116 tg3_setup_flow_control(tp, 0, 0);
3118 /* Forcing 1000FD link up. */
3119 current_link_up = 1;
3121 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3124 tw32_f(MAC_MODE, tp->mac_mode);
3129 return current_link_up;
3132 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3135 u16 orig_active_speed;
3136 u8 orig_active_duplex;
3138 int current_link_up;
3141 orig_pause_cfg = tp->link_config.active_flowctrl;
3142 orig_active_speed = tp->link_config.active_speed;
3143 orig_active_duplex = tp->link_config.active_duplex;
3145 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3146 netif_carrier_ok(tp->dev) &&
3147 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3148 mac_status = tr32(MAC_STATUS);
3149 mac_status &= (MAC_STATUS_PCS_SYNCED |
3150 MAC_STATUS_SIGNAL_DET |
3151 MAC_STATUS_CFG_CHANGED |
3152 MAC_STATUS_RCVD_CFG);
3153 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3154 MAC_STATUS_SIGNAL_DET)) {
3155 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3156 MAC_STATUS_CFG_CHANGED));
3161 tw32_f(MAC_TX_AUTO_NEG, 0);
3163 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3164 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3165 tw32_f(MAC_MODE, tp->mac_mode);
3168 if (tp->phy_id == PHY_ID_BCM8002)
3169 tg3_init_bcm8002(tp);
3171 /* Enable link change event even when serdes polling. */
3172 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3175 current_link_up = 0;
3176 mac_status = tr32(MAC_STATUS);
3178 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3179 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3181 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3183 tp->hw_status->status =
3184 (SD_STATUS_UPDATED |
3185 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3187 for (i = 0; i < 100; i++) {
3188 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3189 MAC_STATUS_CFG_CHANGED));
3191 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3192 MAC_STATUS_CFG_CHANGED |
3193 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3197 mac_status = tr32(MAC_STATUS);
3198 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3199 current_link_up = 0;
3200 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3201 tp->serdes_counter == 0) {
3202 tw32_f(MAC_MODE, (tp->mac_mode |
3203 MAC_MODE_SEND_CONFIGS));
3205 tw32_f(MAC_MODE, tp->mac_mode);
3209 if (current_link_up == 1) {
3210 tp->link_config.active_speed = SPEED_1000;
3211 tp->link_config.active_duplex = DUPLEX_FULL;
3212 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3213 LED_CTRL_LNKLED_OVERRIDE |
3214 LED_CTRL_1000MBPS_ON));
3216 tp->link_config.active_speed = SPEED_INVALID;
3217 tp->link_config.active_duplex = DUPLEX_INVALID;
3218 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3219 LED_CTRL_LNKLED_OVERRIDE |
3220 LED_CTRL_TRAFFIC_OVERRIDE));
3223 if (current_link_up != netif_carrier_ok(tp->dev)) {
3224 if (current_link_up)
3225 netif_carrier_on(tp->dev);
3227 netif_carrier_off(tp->dev);
3228 tg3_link_report(tp);
3230 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3231 if (orig_pause_cfg != now_pause_cfg ||
3232 orig_active_speed != tp->link_config.active_speed ||
3233 orig_active_duplex != tp->link_config.active_duplex)
3234 tg3_link_report(tp);
3240 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3242 int current_link_up, err = 0;
3246 u32 local_adv, remote_adv;
3248 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3249 tw32_f(MAC_MODE, tp->mac_mode);
3255 (MAC_STATUS_SYNC_CHANGED |
3256 MAC_STATUS_CFG_CHANGED |
3257 MAC_STATUS_MI_COMPLETION |
3258 MAC_STATUS_LNKSTATE_CHANGED));
3264 current_link_up = 0;
3265 current_speed = SPEED_INVALID;
3266 current_duplex = DUPLEX_INVALID;
3268 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3269 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3270 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3271 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3272 bmsr |= BMSR_LSTATUS;
3274 bmsr &= ~BMSR_LSTATUS;
3277 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3279 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3280 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3281 tp->link_config.flowctrl == tp->link_config.active_flowctrl) {
3282 /* do nothing, just check for link up at the end */
3283 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3286 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3287 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3288 ADVERTISE_1000XPAUSE |
3289 ADVERTISE_1000XPSE_ASYM |
3292 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3294 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3295 new_adv |= ADVERTISE_1000XHALF;
3296 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3297 new_adv |= ADVERTISE_1000XFULL;
3299 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3300 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3301 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3302 tg3_writephy(tp, MII_BMCR, bmcr);
3304 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3305 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3306 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3313 bmcr &= ~BMCR_SPEED1000;
3314 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3316 if (tp->link_config.duplex == DUPLEX_FULL)
3317 new_bmcr |= BMCR_FULLDPLX;
3319 if (new_bmcr != bmcr) {
3320 /* BMCR_SPEED1000 is a reserved bit that needs
3321 * to be set on write.
3323 new_bmcr |= BMCR_SPEED1000;
3325 /* Force a linkdown */
3326 if (netif_carrier_ok(tp->dev)) {
3329 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3330 adv &= ~(ADVERTISE_1000XFULL |
3331 ADVERTISE_1000XHALF |
3333 tg3_writephy(tp, MII_ADVERTISE, adv);
3334 tg3_writephy(tp, MII_BMCR, bmcr |
3338 netif_carrier_off(tp->dev);
3340 tg3_writephy(tp, MII_BMCR, new_bmcr);
3342 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3343 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3344 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3346 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3347 bmsr |= BMSR_LSTATUS;
3349 bmsr &= ~BMSR_LSTATUS;
3351 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3355 if (bmsr & BMSR_LSTATUS) {
3356 current_speed = SPEED_1000;
3357 current_link_up = 1;
3358 if (bmcr & BMCR_FULLDPLX)
3359 current_duplex = DUPLEX_FULL;
3361 current_duplex = DUPLEX_HALF;
3366 if (bmcr & BMCR_ANENABLE) {
3369 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3370 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3371 common = local_adv & remote_adv;
3372 if (common & (ADVERTISE_1000XHALF |
3373 ADVERTISE_1000XFULL)) {
3374 if (common & ADVERTISE_1000XFULL)
3375 current_duplex = DUPLEX_FULL;
3377 current_duplex = DUPLEX_HALF;
3380 current_link_up = 0;
3384 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3385 tg3_setup_flow_control(tp, local_adv, remote_adv);
3387 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3388 if (tp->link_config.active_duplex == DUPLEX_HALF)
3389 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3391 tw32_f(MAC_MODE, tp->mac_mode);
3394 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3396 tp->link_config.active_speed = current_speed;
3397 tp->link_config.active_duplex = current_duplex;
3399 if (current_link_up != netif_carrier_ok(tp->dev)) {
3400 if (current_link_up)
3401 netif_carrier_on(tp->dev);
3403 netif_carrier_off(tp->dev);
3404 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3406 tg3_link_report(tp);
3411 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3413 if (tp->serdes_counter) {
3414 /* Give autoneg time to complete. */
3415 tp->serdes_counter--;
3418 if (!netif_carrier_ok(tp->dev) &&
3419 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3422 tg3_readphy(tp, MII_BMCR, &bmcr);
3423 if (bmcr & BMCR_ANENABLE) {
3426 /* Select shadow register 0x1f */
3427 tg3_writephy(tp, 0x1c, 0x7c00);
3428 tg3_readphy(tp, 0x1c, &phy1);
3430 /* Select expansion interrupt status register */
3431 tg3_writephy(tp, 0x17, 0x0f01);
3432 tg3_readphy(tp, 0x15, &phy2);
3433 tg3_readphy(tp, 0x15, &phy2);
3435 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3436 /* We have signal detect and not receiving
3437 * config code words, link is up by parallel
3441 bmcr &= ~BMCR_ANENABLE;
3442 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3443 tg3_writephy(tp, MII_BMCR, bmcr);
3444 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3448 else if (netif_carrier_ok(tp->dev) &&
3449 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3450 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3453 /* Select expansion interrupt status register */
3454 tg3_writephy(tp, 0x17, 0x0f01);
3455 tg3_readphy(tp, 0x15, &phy2);
3459 /* Config code words received, turn on autoneg. */
3460 tg3_readphy(tp, MII_BMCR, &bmcr);
3461 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3463 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3469 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3473 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3474 err = tg3_setup_fiber_phy(tp, force_reset);
3475 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3476 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3478 err = tg3_setup_copper_phy(tp, force_reset);
3481 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3482 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3485 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3486 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3488 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3493 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3494 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3495 tw32(GRC_MISC_CFG, val);
3498 if (tp->link_config.active_speed == SPEED_1000 &&
3499 tp->link_config.active_duplex == DUPLEX_HALF)
3500 tw32(MAC_TX_LENGTHS,
3501 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3502 (6 << TX_LENGTHS_IPG_SHIFT) |
3503 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3505 tw32(MAC_TX_LENGTHS,
3506 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3507 (6 << TX_LENGTHS_IPG_SHIFT) |
3508 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3510 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3511 if (netif_carrier_ok(tp->dev)) {
3512 tw32(HOSTCC_STAT_COAL_TICKS,
3513 tp->coal.stats_block_coalesce_usecs);
3515 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3519 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3520 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3521 if (!netif_carrier_ok(tp->dev))
3522 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3525 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3526 tw32(PCIE_PWR_MGMT_THRESH, val);
3532 /* This is called whenever we suspect that the system chipset is re-
3533 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3534 * is bogus tx completions. We try to recover by setting the
3535 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3538 static void tg3_tx_recover(struct tg3 *tp)
3540 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3541 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3543 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3544 "mapped I/O cycles to the network device, attempting to "
3545 "recover. Please report the problem to the driver maintainer "
3546 "and include system chipset information.\n", tp->dev->name);
3548 spin_lock(&tp->lock);
3549 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3550 spin_unlock(&tp->lock);
3553 static inline u32 tg3_tx_avail(struct tg3 *tp)
3556 return (tp->tx_pending -
3557 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3560 /* Tigon3 never reports partial packet sends. So we do not
3561 * need special logic to handle SKBs that have not had all
3562 * of their frags sent yet, like SunGEM does.
3564 static void tg3_tx(struct tg3 *tp)
3566 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3567 u32 sw_idx = tp->tx_cons;
3569 while (sw_idx != hw_idx) {
3570 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3571 struct sk_buff *skb = ri->skb;
3574 if (unlikely(skb == NULL)) {
3579 pci_unmap_single(tp->pdev,
3580 pci_unmap_addr(ri, mapping),
3586 sw_idx = NEXT_TX(sw_idx);
3588 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3589 ri = &tp->tx_buffers[sw_idx];
3590 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3593 pci_unmap_page(tp->pdev,
3594 pci_unmap_addr(ri, mapping),
3595 skb_shinfo(skb)->frags[i].size,
3598 sw_idx = NEXT_TX(sw_idx);
3603 if (unlikely(tx_bug)) {
3609 tp->tx_cons = sw_idx;
3611 /* Need to make the tx_cons update visible to tg3_start_xmit()
3612 * before checking for netif_queue_stopped(). Without the
3613 * memory barrier, there is a small possibility that tg3_start_xmit()
3614 * will miss it and cause the queue to be stopped forever.
3618 if (unlikely(netif_queue_stopped(tp->dev) &&
3619 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3620 netif_tx_lock(tp->dev);
3621 if (netif_queue_stopped(tp->dev) &&
3622 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3623 netif_wake_queue(tp->dev);
3624 netif_tx_unlock(tp->dev);
3628 /* Returns size of skb allocated or < 0 on error.
3630 * We only need to fill in the address because the other members
3631 * of the RX descriptor are invariant, see tg3_init_rings.
3633 * Note the purposeful assymetry of cpu vs. chip accesses. For
3634 * posting buffers we only dirty the first cache line of the RX
3635 * descriptor (containing the address). Whereas for the RX status
3636 * buffers the cpu only reads the last cacheline of the RX descriptor
3637 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3639 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3640 int src_idx, u32 dest_idx_unmasked)
3642 struct tg3_rx_buffer_desc *desc;
3643 struct ring_info *map, *src_map;
3644 struct sk_buff *skb;
3646 int skb_size, dest_idx;
3649 switch (opaque_key) {
3650 case RXD_OPAQUE_RING_STD:
3651 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3652 desc = &tp->rx_std[dest_idx];
3653 map = &tp->rx_std_buffers[dest_idx];
3655 src_map = &tp->rx_std_buffers[src_idx];
3656 skb_size = tp->rx_pkt_buf_sz;
3659 case RXD_OPAQUE_RING_JUMBO:
3660 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3661 desc = &tp->rx_jumbo[dest_idx];
3662 map = &tp->rx_jumbo_buffers[dest_idx];
3664 src_map = &tp->rx_jumbo_buffers[src_idx];
3665 skb_size = RX_JUMBO_PKT_BUF_SZ;
3672 /* Do not overwrite any of the map or rp information
3673 * until we are sure we can commit to a new buffer.
3675 * Callers depend upon this behavior and assume that
3676 * we leave everything unchanged if we fail.
3678 skb = netdev_alloc_skb(tp->dev, skb_size);
3682 skb_reserve(skb, tp->rx_offset);
3684 mapping = pci_map_single(tp->pdev, skb->data,
3685 skb_size - tp->rx_offset,
3686 PCI_DMA_FROMDEVICE);
3689 pci_unmap_addr_set(map, mapping, mapping);
3691 if (src_map != NULL)
3692 src_map->skb = NULL;
3694 desc->addr_hi = ((u64)mapping >> 32);
3695 desc->addr_lo = ((u64)mapping & 0xffffffff);
3700 /* We only need to move over in the address because the other
3701 * members of the RX descriptor are invariant. See notes above
3702 * tg3_alloc_rx_skb for full details.
3704 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3705 int src_idx, u32 dest_idx_unmasked)
3707 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3708 struct ring_info *src_map, *dest_map;
3711 switch (opaque_key) {
3712 case RXD_OPAQUE_RING_STD:
3713 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3714 dest_desc = &tp->rx_std[dest_idx];
3715 dest_map = &tp->rx_std_buffers[dest_idx];
3716 src_desc = &tp->rx_std[src_idx];
3717 src_map = &tp->rx_std_buffers[src_idx];
3720 case RXD_OPAQUE_RING_JUMBO:
3721 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3722 dest_desc = &tp->rx_jumbo[dest_idx];
3723 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3724 src_desc = &tp->rx_jumbo[src_idx];
3725 src_map = &tp->rx_jumbo_buffers[src_idx];
3732 dest_map->skb = src_map->skb;
3733 pci_unmap_addr_set(dest_map, mapping,
3734 pci_unmap_addr(src_map, mapping));
3735 dest_desc->addr_hi = src_desc->addr_hi;
3736 dest_desc->addr_lo = src_desc->addr_lo;
3738 src_map->skb = NULL;
3741 #if TG3_VLAN_TAG_USED
3742 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3744 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3748 /* The RX ring scheme is composed of multiple rings which post fresh
3749 * buffers to the chip, and one special ring the chip uses to report
3750 * status back to the host.
3752 * The special ring reports the status of received packets to the
3753 * host. The chip does not write into the original descriptor the
3754 * RX buffer was obtained from. The chip simply takes the original
3755 * descriptor as provided by the host, updates the status and length
3756 * field, then writes this into the next status ring entry.
3758 * Each ring the host uses to post buffers to the chip is described
3759 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3760 * it is first placed into the on-chip ram. When the packet's length
3761 * is known, it walks down the TG3_BDINFO entries to select the ring.
3762 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3763 * which is within the range of the new packet's length is chosen.
3765 * The "separate ring for rx status" scheme may sound queer, but it makes
3766 * sense from a cache coherency perspective. If only the host writes
3767 * to the buffer post rings, and only the chip writes to the rx status
3768 * rings, then cache lines never move beyond shared-modified state.
3769 * If both the host and chip were to write into the same ring, cache line
3770 * eviction could occur since both entities want it in an exclusive state.
3772 static int tg3_rx(struct tg3 *tp, int budget)
3774 u32 work_mask, rx_std_posted = 0;
3775 u32 sw_idx = tp->rx_rcb_ptr;
3779 hw_idx = tp->hw_status->idx[0].rx_producer;
3781 * We need to order the read of hw_idx and the read of
3782 * the opaque cookie.
3787 while (sw_idx != hw_idx && budget > 0) {
3788 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3790 struct sk_buff *skb;
3791 dma_addr_t dma_addr;
3792 u32 opaque_key, desc_idx, *post_ptr;
3794 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3795 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3796 if (opaque_key == RXD_OPAQUE_RING_STD) {
3797 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3799 skb = tp->rx_std_buffers[desc_idx].skb;
3800 post_ptr = &tp->rx_std_ptr;
3802 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3803 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3805 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3806 post_ptr = &tp->rx_jumbo_ptr;
3809 goto next_pkt_nopost;
3812 work_mask |= opaque_key;
3814 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3815 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3817 tg3_recycle_rx(tp, opaque_key,
3818 desc_idx, *post_ptr);
3820 /* Other statistics kept track of by card. */
3821 tp->net_stats.rx_dropped++;
3825 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3827 if (len > RX_COPY_THRESHOLD
3828 && tp->rx_offset == 2
3829 /* rx_offset != 2 iff this is a 5701 card running
3830 * in PCI-X mode [see tg3_get_invariants()] */
3834 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3835 desc_idx, *post_ptr);
3839 pci_unmap_single(tp->pdev, dma_addr,
3840 skb_size - tp->rx_offset,
3841 PCI_DMA_FROMDEVICE);
3845 struct sk_buff *copy_skb;
3847 tg3_recycle_rx(tp, opaque_key,
3848 desc_idx, *post_ptr);
3850 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3851 if (copy_skb == NULL)
3852 goto drop_it_no_recycle;
3854 skb_reserve(copy_skb, 2);
3855 skb_put(copy_skb, len);
3856 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3857 skb_copy_from_linear_data(skb, copy_skb->data, len);
3858 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3860 /* We'll reuse the original ring buffer. */
3864 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3865 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3866 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3867 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3868 skb->ip_summed = CHECKSUM_UNNECESSARY;
3870 skb->ip_summed = CHECKSUM_NONE;
3872 skb->protocol = eth_type_trans(skb, tp->dev);
3873 #if TG3_VLAN_TAG_USED
3874 if (tp->vlgrp != NULL &&
3875 desc->type_flags & RXD_FLAG_VLAN) {
3876 tg3_vlan_rx(tp, skb,
3877 desc->err_vlan & RXD_VLAN_MASK);
3880 netif_receive_skb(skb);
3882 tp->dev->last_rx = jiffies;
3889 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3890 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3892 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3893 TG3_64BIT_REG_LOW, idx);
3894 work_mask &= ~RXD_OPAQUE_RING_STD;
3899 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3901 /* Refresh hw_idx to see if there is new work */
3902 if (sw_idx == hw_idx) {
3903 hw_idx = tp->hw_status->idx[0].rx_producer;
3908 /* ACK the status ring. */
3909 tp->rx_rcb_ptr = sw_idx;
3910 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3912 /* Refill RX ring(s). */
3913 if (work_mask & RXD_OPAQUE_RING_STD) {
3914 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3915 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3918 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3919 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3920 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3928 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3930 struct tg3_hw_status *sblk = tp->hw_status;
3932 /* handle link change and other phy events */
3933 if (!(tp->tg3_flags &
3934 (TG3_FLAG_USE_LINKCHG_REG |
3935 TG3_FLAG_POLL_SERDES))) {
3936 if (sblk->status & SD_STATUS_LINK_CHG) {
3937 sblk->status = SD_STATUS_UPDATED |
3938 (sblk->status & ~SD_STATUS_LINK_CHG);
3939 spin_lock(&tp->lock);
3940 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
3942 (MAC_STATUS_SYNC_CHANGED |
3943 MAC_STATUS_CFG_CHANGED |
3944 MAC_STATUS_MI_COMPLETION |
3945 MAC_STATUS_LNKSTATE_CHANGED));
3948 tg3_setup_phy(tp, 0);
3949 spin_unlock(&tp->lock);
3953 /* run TX completion thread */
3954 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3956 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3960 /* run RX thread, within the bounds set by NAPI.
3961 * All RX "locking" is done by ensuring outside
3962 * code synchronizes with tg3->napi.poll()
3964 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3965 work_done += tg3_rx(tp, budget - work_done);
3970 static int tg3_poll(struct napi_struct *napi, int budget)
3972 struct tg3 *tp = container_of(napi, struct tg3, napi);
3974 struct tg3_hw_status *sblk = tp->hw_status;
3977 work_done = tg3_poll_work(tp, work_done, budget);
3979 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3982 if (unlikely(work_done >= budget))
3985 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3986 /* tp->last_tag is used in tg3_restart_ints() below
3987 * to tell the hw how much work has been processed,
3988 * so we must read it before checking for more work.
3990 tp->last_tag = sblk->status_tag;
3993 sblk->status &= ~SD_STATUS_UPDATED;
3995 if (likely(!tg3_has_work(tp))) {
3996 netif_rx_complete(tp->dev, napi);
3997 tg3_restart_ints(tp);
4005 /* work_done is guaranteed to be less than budget. */
4006 netif_rx_complete(tp->dev, napi);
4007 schedule_work(&tp->reset_task);
4011 static void tg3_irq_quiesce(struct tg3 *tp)
4013 BUG_ON(tp->irq_sync);
4018 synchronize_irq(tp->pdev->irq);
4021 static inline int tg3_irq_sync(struct tg3 *tp)
4023 return tp->irq_sync;
4026 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4027 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4028 * with as well. Most of the time, this is not necessary except when
4029 * shutting down the device.
4031 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4033 spin_lock_bh(&tp->lock);
4035 tg3_irq_quiesce(tp);
4038 static inline void tg3_full_unlock(struct tg3 *tp)
4040 spin_unlock_bh(&tp->lock);
4043 /* One-shot MSI handler - Chip automatically disables interrupt
4044 * after sending MSI so driver doesn't have to do it.
4046 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4048 struct net_device *dev = dev_id;
4049 struct tg3 *tp = netdev_priv(dev);
4051 prefetch(tp->hw_status);
4052 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4054 if (likely(!tg3_irq_sync(tp)))
4055 netif_rx_schedule(dev, &tp->napi);
4060 /* MSI ISR - No need to check for interrupt sharing and no need to
4061 * flush status block and interrupt mailbox. PCI ordering rules
4062 * guarantee that MSI will arrive after the status block.
4064 static irqreturn_t tg3_msi(int irq, void *dev_id)
4066 struct net_device *dev = dev_id;
4067 struct tg3 *tp = netdev_priv(dev);
4069 prefetch(tp->hw_status);
4070 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4072 * Writing any value to intr-mbox-0 clears PCI INTA# and
4073 * chip-internal interrupt pending events.
4074 * Writing non-zero to intr-mbox-0 additional tells the
4075 * NIC to stop sending us irqs, engaging "in-intr-handler"
4078 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4079 if (likely(!tg3_irq_sync(tp)))
4080 netif_rx_schedule(dev, &tp->napi);
4082 return IRQ_RETVAL(1);
4085 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4087 struct net_device *dev = dev_id;
4088 struct tg3 *tp = netdev_priv(dev);
4089 struct tg3_hw_status *sblk = tp->hw_status;
4090 unsigned int handled = 1;
4092 /* In INTx mode, it is possible for the interrupt to arrive at
4093 * the CPU before the status block posted prior to the interrupt.
4094 * Reading the PCI State register will confirm whether the
4095 * interrupt is ours and will flush the status block.
4097 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4098 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4099 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4106 * Writing any value to intr-mbox-0 clears PCI INTA# and
4107 * chip-internal interrupt pending events.
4108 * Writing non-zero to intr-mbox-0 additional tells the
4109 * NIC to stop sending us irqs, engaging "in-intr-handler"
4112 * Flush the mailbox to de-assert the IRQ immediately to prevent
4113 * spurious interrupts. The flush impacts performance but
4114 * excessive spurious interrupts can be worse in some cases.
4116 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4117 if (tg3_irq_sync(tp))
4119 sblk->status &= ~SD_STATUS_UPDATED;
4120 if (likely(tg3_has_work(tp))) {
4121 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4122 netif_rx_schedule(dev, &tp->napi);
4124 /* No work, shared interrupt perhaps? re-enable
4125 * interrupts, and flush that PCI write
4127 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4131 return IRQ_RETVAL(handled);
4134 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4136 struct net_device *dev = dev_id;
4137 struct tg3 *tp = netdev_priv(dev);
4138 struct tg3_hw_status *sblk = tp->hw_status;
4139 unsigned int handled = 1;
4141 /* In INTx mode, it is possible for the interrupt to arrive at
4142 * the CPU before the status block posted prior to the interrupt.
4143 * Reading the PCI State register will confirm whether the
4144 * interrupt is ours and will flush the status block.
4146 if (unlikely(sblk->status_tag == tp->last_tag)) {
4147 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4148 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4155 * writing any value to intr-mbox-0 clears PCI INTA# and
4156 * chip-internal interrupt pending events.
4157 * writing non-zero to intr-mbox-0 additional tells the
4158 * NIC to stop sending us irqs, engaging "in-intr-handler"
4161 * Flush the mailbox to de-assert the IRQ immediately to prevent
4162 * spurious interrupts. The flush impacts performance but
4163 * excessive spurious interrupts can be worse in some cases.
4165 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4166 if (tg3_irq_sync(tp))
4168 if (netif_rx_schedule_prep(dev, &tp->napi)) {
4169 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4170 /* Update last_tag to mark that this status has been
4171 * seen. Because interrupt may be shared, we may be
4172 * racing with tg3_poll(), so only update last_tag
4173 * if tg3_poll() is not scheduled.
4175 tp->last_tag = sblk->status_tag;
4176 __netif_rx_schedule(dev, &tp->napi);
4179 return IRQ_RETVAL(handled);
4182 /* ISR for interrupt test */
4183 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4185 struct net_device *dev = dev_id;
4186 struct tg3 *tp = netdev_priv(dev);
4187 struct tg3_hw_status *sblk = tp->hw_status;
4189 if ((sblk->status & SD_STATUS_UPDATED) ||
4190 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4191 tg3_disable_ints(tp);
4192 return IRQ_RETVAL(1);
4194 return IRQ_RETVAL(0);
4197 static int tg3_init_hw(struct tg3 *, int);
4198 static int tg3_halt(struct tg3 *, int, int);
4200 /* Restart hardware after configuration changes, self-test, etc.
4201 * Invoked with tp->lock held.
4203 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4204 __releases(tp->lock)
4205 __acquires(tp->lock)
4209 err = tg3_init_hw(tp, reset_phy);
4211 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4212 "aborting.\n", tp->dev->name);
4213 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4214 tg3_full_unlock(tp);
4215 del_timer_sync(&tp->timer);
4217 napi_enable(&tp->napi);
4219 tg3_full_lock(tp, 0);
4224 #ifdef CONFIG_NET_POLL_CONTROLLER
4225 static void tg3_poll_controller(struct net_device *dev)
4227 struct tg3 *tp = netdev_priv(dev);
4229 tg3_interrupt(tp->pdev->irq, dev);
4233 static void tg3_reset_task(struct work_struct *work)
4235 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4236 unsigned int restart_timer;
4238 tg3_full_lock(tp, 0);
4240 if (!netif_running(tp->dev)) {
4241 tg3_full_unlock(tp);
4245 tg3_full_unlock(tp);
4249 tg3_full_lock(tp, 1);
4251 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4252 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4254 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4255 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4256 tp->write32_rx_mbox = tg3_write_flush_reg32;
4257 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4258 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4261 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4262 if (tg3_init_hw(tp, 1))
4265 tg3_netif_start(tp);
4268 mod_timer(&tp->timer, jiffies + 1);
4271 tg3_full_unlock(tp);
4274 static void tg3_dump_short_state(struct tg3 *tp)
4276 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4277 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4278 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4279 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4282 static void tg3_tx_timeout(struct net_device *dev)
4284 struct tg3 *tp = netdev_priv(dev);
4286 if (netif_msg_tx_err(tp)) {
4287 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4289 tg3_dump_short_state(tp);
4292 schedule_work(&tp->reset_task);
4295 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4296 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4298 u32 base = (u32) mapping & 0xffffffff;
4300 return ((base > 0xffffdcc0) &&
4301 (base + len + 8 < base));
4304 /* Test for DMA addresses > 40-bit */
4305 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4308 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4309 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4310 return (((u64) mapping + len) > DMA_40BIT_MASK);
4317 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4319 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4320 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4321 u32 last_plus_one, u32 *start,
4322 u32 base_flags, u32 mss)
4324 struct sk_buff *new_skb;
4325 dma_addr_t new_addr = 0;
4329 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4330 new_skb = skb_copy(skb, GFP_ATOMIC);
4332 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4334 new_skb = skb_copy_expand(skb,
4335 skb_headroom(skb) + more_headroom,
4336 skb_tailroom(skb), GFP_ATOMIC);
4342 /* New SKB is guaranteed to be linear. */
4344 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4346 /* Make sure new skb does not cross any 4G boundaries.
4347 * Drop the packet if it does.
4349 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4351 dev_kfree_skb(new_skb);
4354 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4355 base_flags, 1 | (mss << 1));
4356 *start = NEXT_TX(entry);
4360 /* Now clean up the sw ring entries. */
4362 while (entry != last_plus_one) {
4366 len = skb_headlen(skb);
4368 len = skb_shinfo(skb)->frags[i-1].size;
4369 pci_unmap_single(tp->pdev,
4370 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4371 len, PCI_DMA_TODEVICE);
4373 tp->tx_buffers[entry].skb = new_skb;
4374 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4376 tp->tx_buffers[entry].skb = NULL;
4378 entry = NEXT_TX(entry);
4387 static void tg3_set_txd(struct tg3 *tp, int entry,
4388 dma_addr_t mapping, int len, u32 flags,
4391 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4392 int is_end = (mss_and_is_end & 0x1);
4393 u32 mss = (mss_and_is_end >> 1);
4397 flags |= TXD_FLAG_END;
4398 if (flags & TXD_FLAG_VLAN) {
4399 vlan_tag = flags >> 16;
4402 vlan_tag |= (mss << TXD_MSS_SHIFT);
4404 txd->addr_hi = ((u64) mapping >> 32);
4405 txd->addr_lo = ((u64) mapping & 0xffffffff);
4406 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4407 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4410 /* hard_start_xmit for devices that don't have any bugs and
4411 * support TG3_FLG2_HW_TSO_2 only.
4413 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4415 struct tg3 *tp = netdev_priv(dev);
4417 u32 len, entry, base_flags, mss;
4419 len = skb_headlen(skb);
4421 /* We are running in BH disabled context with netif_tx_lock
4422 * and TX reclaim runs via tp->napi.poll inside of a software
4423 * interrupt. Furthermore, IRQ processing runs lockless so we have
4424 * no IRQ context deadlocks to worry about either. Rejoice!
4426 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4427 if (!netif_queue_stopped(dev)) {
4428 netif_stop_queue(dev);
4430 /* This is a hard error, log it. */
4431 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4432 "queue awake!\n", dev->name);
4434 return NETDEV_TX_BUSY;
4437 entry = tp->tx_prod;
4440 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4441 int tcp_opt_len, ip_tcp_len;
4443 if (skb_header_cloned(skb) &&
4444 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4449 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4450 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4452 struct iphdr *iph = ip_hdr(skb);
4454 tcp_opt_len = tcp_optlen(skb);
4455 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4458 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4459 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4462 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4463 TXD_FLAG_CPU_POST_DMA);
4465 tcp_hdr(skb)->check = 0;
4468 else if (skb->ip_summed == CHECKSUM_PARTIAL)
4469 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4470 #if TG3_VLAN_TAG_USED
4471 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4472 base_flags |= (TXD_FLAG_VLAN |
4473 (vlan_tx_tag_get(skb) << 16));
4476 /* Queue skb data, a.k.a. the main skb fragment. */
4477 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4479 tp->tx_buffers[entry].skb = skb;
4480 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4482 tg3_set_txd(tp, entry, mapping, len, base_flags,
4483 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4485 entry = NEXT_TX(entry);
4487 /* Now loop through additional data fragments, and queue them. */
4488 if (skb_shinfo(skb)->nr_frags > 0) {
4489 unsigned int i, last;
4491 last = skb_shinfo(skb)->nr_frags - 1;
4492 for (i = 0; i <= last; i++) {
4493 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4496 mapping = pci_map_page(tp->pdev,
4499 len, PCI_DMA_TODEVICE);
4501 tp->tx_buffers[entry].skb = NULL;
4502 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4504 tg3_set_txd(tp, entry, mapping, len,
4505 base_flags, (i == last) | (mss << 1));
4507 entry = NEXT_TX(entry);
4511 /* Packets are ready, update Tx producer idx local and on card. */
4512 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4514 tp->tx_prod = entry;
4515 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4516 netif_stop_queue(dev);
4517 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4518 netif_wake_queue(tp->dev);
4524 dev->trans_start = jiffies;
4526 return NETDEV_TX_OK;
4529 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4531 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4532 * TSO header is greater than 80 bytes.
4534 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4536 struct sk_buff *segs, *nskb;
4538 /* Estimate the number of fragments in the worst case */
4539 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4540 netif_stop_queue(tp->dev);
4541 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4542 return NETDEV_TX_BUSY;
4544 netif_wake_queue(tp->dev);
4547 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4549 goto tg3_tso_bug_end;
4555 tg3_start_xmit_dma_bug(nskb, tp->dev);
4561 return NETDEV_TX_OK;
4564 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4565 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4567 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4569 struct tg3 *tp = netdev_priv(dev);
4571 u32 len, entry, base_flags, mss;
4572 int would_hit_hwbug;
4574 len = skb_headlen(skb);
4576 /* We are running in BH disabled context with netif_tx_lock
4577 * and TX reclaim runs via tp->napi.poll inside of a software
4578 * interrupt. Furthermore, IRQ processing runs lockless so we have
4579 * no IRQ context deadlocks to worry about either. Rejoice!
4581 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4582 if (!netif_queue_stopped(dev)) {
4583 netif_stop_queue(dev);
4585 /* This is a hard error, log it. */
4586 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4587 "queue awake!\n", dev->name);
4589 return NETDEV_TX_BUSY;
4592 entry = tp->tx_prod;
4594 if (skb->ip_summed == CHECKSUM_PARTIAL)
4595 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4597 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4599 int tcp_opt_len, ip_tcp_len, hdr_len;
4601 if (skb_header_cloned(skb) &&
4602 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4607 tcp_opt_len = tcp_optlen(skb);
4608 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4610 hdr_len = ip_tcp_len + tcp_opt_len;
4611 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4612 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4613 return (tg3_tso_bug(tp, skb));
4615 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4616 TXD_FLAG_CPU_POST_DMA);
4620 iph->tot_len = htons(mss + hdr_len);
4621 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4622 tcp_hdr(skb)->check = 0;
4623 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4625 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4630 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4631 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4632 if (tcp_opt_len || iph->ihl > 5) {
4635 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4636 mss |= (tsflags << 11);
4639 if (tcp_opt_len || iph->ihl > 5) {
4642 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4643 base_flags |= tsflags << 12;
4647 #if TG3_VLAN_TAG_USED
4648 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4649 base_flags |= (TXD_FLAG_VLAN |
4650 (vlan_tx_tag_get(skb) << 16));
4653 /* Queue skb data, a.k.a. the main skb fragment. */
4654 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4656 tp->tx_buffers[entry].skb = skb;
4657 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4659 would_hit_hwbug = 0;
4661 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4662 would_hit_hwbug = 1;
4663 else if (tg3_4g_overflow_test(mapping, len))
4664 would_hit_hwbug = 1;
4666 tg3_set_txd(tp, entry, mapping, len, base_flags,
4667 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4669 entry = NEXT_TX(entry);
4671 /* Now loop through additional data fragments, and queue them. */
4672 if (skb_shinfo(skb)->nr_frags > 0) {
4673 unsigned int i, last;
4675 last = skb_shinfo(skb)->nr_frags - 1;
4676 for (i = 0; i <= last; i++) {
4677 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4680 mapping = pci_map_page(tp->pdev,
4683 len, PCI_DMA_TODEVICE);
4685 tp->tx_buffers[entry].skb = NULL;
4686 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4688 if (tg3_4g_overflow_test(mapping, len))
4689 would_hit_hwbug = 1;
4691 if (tg3_40bit_overflow_test(tp, mapping, len))
4692 would_hit_hwbug = 1;
4694 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4695 tg3_set_txd(tp, entry, mapping, len,
4696 base_flags, (i == last)|(mss << 1));
4698 tg3_set_txd(tp, entry, mapping, len,
4699 base_flags, (i == last));
4701 entry = NEXT_TX(entry);
4705 if (would_hit_hwbug) {
4706 u32 last_plus_one = entry;
4709 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4710 start &= (TG3_TX_RING_SIZE - 1);
4712 /* If the workaround fails due to memory/mapping
4713 * failure, silently drop this packet.
4715 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4716 &start, base_flags, mss))
4722 /* Packets are ready, update Tx producer idx local and on card. */
4723 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4725 tp->tx_prod = entry;
4726 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4727 netif_stop_queue(dev);
4728 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4729 netif_wake_queue(tp->dev);
4735 dev->trans_start = jiffies;
4737 return NETDEV_TX_OK;
4740 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4745 if (new_mtu > ETH_DATA_LEN) {
4746 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4747 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4748 ethtool_op_set_tso(dev, 0);
4751 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4753 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4754 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4755 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4759 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4761 struct tg3 *tp = netdev_priv(dev);
4764 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4767 if (!netif_running(dev)) {
4768 /* We'll just catch it later when the
4771 tg3_set_mtu(dev, tp, new_mtu);
4777 tg3_full_lock(tp, 1);
4779 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4781 tg3_set_mtu(dev, tp, new_mtu);
4783 err = tg3_restart_hw(tp, 0);
4786 tg3_netif_start(tp);
4788 tg3_full_unlock(tp);
4793 /* Free up pending packets in all rx/tx rings.
4795 * The chip has been shut down and the driver detached from
4796 * the networking, so no interrupts or new tx packets will
4797 * end up in the driver. tp->{tx,}lock is not held and we are not
4798 * in an interrupt context and thus may sleep.
4800 static void tg3_free_rings(struct tg3 *tp)
4802 struct ring_info *rxp;
4805 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4806 rxp = &tp->rx_std_buffers[i];
4808 if (rxp->skb == NULL)
4810 pci_unmap_single(tp->pdev,
4811 pci_unmap_addr(rxp, mapping),
4812 tp->rx_pkt_buf_sz - tp->rx_offset,
4813 PCI_DMA_FROMDEVICE);
4814 dev_kfree_skb_any(rxp->skb);
4818 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4819 rxp = &tp->rx_jumbo_buffers[i];
4821 if (rxp->skb == NULL)
4823 pci_unmap_single(tp->pdev,
4824 pci_unmap_addr(rxp, mapping),
4825 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4826 PCI_DMA_FROMDEVICE);
4827 dev_kfree_skb_any(rxp->skb);
4831 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4832 struct tx_ring_info *txp;
4833 struct sk_buff *skb;
4836 txp = &tp->tx_buffers[i];
4844 pci_unmap_single(tp->pdev,
4845 pci_unmap_addr(txp, mapping),
4852 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4853 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4854 pci_unmap_page(tp->pdev,
4855 pci_unmap_addr(txp, mapping),
4856 skb_shinfo(skb)->frags[j].size,
4861 dev_kfree_skb_any(skb);
4865 /* Initialize tx/rx rings for packet processing.
4867 * The chip has been shut down and the driver detached from
4868 * the networking, so no interrupts or new tx packets will
4869 * end up in the driver. tp->{tx,}lock are held and thus
4872 static int tg3_init_rings(struct tg3 *tp)
4876 /* Free up all the SKBs. */
4879 /* Zero out all descriptors. */
4880 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4881 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4882 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4883 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4885 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4886 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4887 (tp->dev->mtu > ETH_DATA_LEN))
4888 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4890 /* Initialize invariants of the rings, we only set this
4891 * stuff once. This works because the card does not
4892 * write into the rx buffer posting rings.
4894 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4895 struct tg3_rx_buffer_desc *rxd;
4897 rxd = &tp->rx_std[i];
4898 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4900 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4901 rxd->opaque = (RXD_OPAQUE_RING_STD |
4902 (i << RXD_OPAQUE_INDEX_SHIFT));
4905 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4906 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4907 struct tg3_rx_buffer_desc *rxd;
4909 rxd = &tp->rx_jumbo[i];
4910 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4912 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4914 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4915 (i << RXD_OPAQUE_INDEX_SHIFT));
4919 /* Now allocate fresh SKBs for each rx ring. */
4920 for (i = 0; i < tp->rx_pending; i++) {
4921 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4922 printk(KERN_WARNING PFX
4923 "%s: Using a smaller RX standard ring, "
4924 "only %d out of %d buffers were allocated "
4926 tp->dev->name, i, tp->rx_pending);
4934 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4935 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4936 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4938 printk(KERN_WARNING PFX
4939 "%s: Using a smaller RX jumbo ring, "
4940 "only %d out of %d buffers were "
4941 "allocated successfully.\n",
4942 tp->dev->name, i, tp->rx_jumbo_pending);
4947 tp->rx_jumbo_pending = i;
4956 * Must not be invoked with interrupt sources disabled and
4957 * the hardware shutdown down.
4959 static void tg3_free_consistent(struct tg3 *tp)
4961 kfree(tp->rx_std_buffers);
4962 tp->rx_std_buffers = NULL;
4964 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4965 tp->rx_std, tp->rx_std_mapping);
4969 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4970 tp->rx_jumbo, tp->rx_jumbo_mapping);
4971 tp->rx_jumbo = NULL;
4974 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4975 tp->rx_rcb, tp->rx_rcb_mapping);
4979 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4980 tp->tx_ring, tp->tx_desc_mapping);
4983 if (tp->hw_status) {
4984 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4985 tp->hw_status, tp->status_mapping);
4986 tp->hw_status = NULL;
4989 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4990 tp->hw_stats, tp->stats_mapping);
4991 tp->hw_stats = NULL;
4996 * Must not be invoked with interrupt sources disabled and
4997 * the hardware shutdown down. Can sleep.
4999 static int tg3_alloc_consistent(struct tg3 *tp)
5001 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5003 TG3_RX_JUMBO_RING_SIZE)) +
5004 (sizeof(struct tx_ring_info) *
5007 if (!tp->rx_std_buffers)
5010 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5011 tp->tx_buffers = (struct tx_ring_info *)
5012 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5014 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5015 &tp->rx_std_mapping);
5019 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5020 &tp->rx_jumbo_mapping);
5025 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5026 &tp->rx_rcb_mapping);
5030 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5031 &tp->tx_desc_mapping);
5035 tp->hw_status = pci_alloc_consistent(tp->pdev,
5037 &tp->status_mapping);
5041 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5042 sizeof(struct tg3_hw_stats),
5043 &tp->stats_mapping);
5047 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5048 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5053 tg3_free_consistent(tp);
5057 #define MAX_WAIT_CNT 1000
5059 /* To stop a block, clear the enable bit and poll till it
5060 * clears. tp->lock is held.
5062 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5067 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5074 /* We can't enable/disable these bits of the
5075 * 5705/5750, just say success.
5088 for (i = 0; i < MAX_WAIT_CNT; i++) {
5091 if ((val & enable_bit) == 0)
5095 if (i == MAX_WAIT_CNT && !silent) {
5096 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5097 "ofs=%lx enable_bit=%x\n",
5105 /* tp->lock is held. */
5106 static int tg3_abort_hw(struct tg3 *tp, int silent)
5110 tg3_disable_ints(tp);
5112 tp->rx_mode &= ~RX_MODE_ENABLE;
5113 tw32_f(MAC_RX_MODE, tp->rx_mode);
5116 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5117 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5118 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5119 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5120 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5121 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5123 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5124 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5125 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5126 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5127 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5128 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5129 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5131 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5132 tw32_f(MAC_MODE, tp->mac_mode);
5135 tp->tx_mode &= ~TX_MODE_ENABLE;
5136 tw32_f(MAC_TX_MODE, tp->tx_mode);
5138 for (i = 0; i < MAX_WAIT_CNT; i++) {
5140 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5143 if (i >= MAX_WAIT_CNT) {
5144 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5145 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5146 tp->dev->name, tr32(MAC_TX_MODE));
5150 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5151 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5152 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5154 tw32(FTQ_RESET, 0xffffffff);
5155 tw32(FTQ_RESET, 0x00000000);
5157 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5158 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5161 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5163 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5168 /* tp->lock is held. */
5169 static int tg3_nvram_lock(struct tg3 *tp)
5171 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5174 if (tp->nvram_lock_cnt == 0) {
5175 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5176 for (i = 0; i < 8000; i++) {
5177 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5182 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5186 tp->nvram_lock_cnt++;
5191 /* tp->lock is held. */
5192 static void tg3_nvram_unlock(struct tg3 *tp)
5194 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5195 if (tp->nvram_lock_cnt > 0)
5196 tp->nvram_lock_cnt--;
5197 if (tp->nvram_lock_cnt == 0)
5198 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5202 /* tp->lock is held. */
5203 static void tg3_enable_nvram_access(struct tg3 *tp)
5205 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5206 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5207 u32 nvaccess = tr32(NVRAM_ACCESS);
5209 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5213 /* tp->lock is held. */
5214 static void tg3_disable_nvram_access(struct tg3 *tp)
5216 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5217 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5218 u32 nvaccess = tr32(NVRAM_ACCESS);
5220 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5224 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5229 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5230 if (apedata != APE_SEG_SIG_MAGIC)
5233 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5234 if (apedata != APE_FW_STATUS_READY)
5237 /* Wait for up to 1 millisecond for APE to service previous event. */
5238 for (i = 0; i < 10; i++) {
5239 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5242 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5244 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5245 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5246 event | APE_EVENT_STATUS_EVENT_PENDING);
5248 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5250 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5256 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5257 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5260 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5265 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5269 case RESET_KIND_INIT:
5270 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5271 APE_HOST_SEG_SIG_MAGIC);
5272 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5273 APE_HOST_SEG_LEN_MAGIC);
5274 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5275 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5276 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5277 APE_HOST_DRIVER_ID_MAGIC);
5278 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5279 APE_HOST_BEHAV_NO_PHYLOCK);
5281 event = APE_EVENT_STATUS_STATE_START;
5283 case RESET_KIND_SHUTDOWN:
5284 event = APE_EVENT_STATUS_STATE_UNLOAD;
5286 case RESET_KIND_SUSPEND:
5287 event = APE_EVENT_STATUS_STATE_SUSPEND;
5293 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5295 tg3_ape_send_event(tp, event);
5298 /* tp->lock is held. */
5299 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5301 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5302 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5304 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5306 case RESET_KIND_INIT:
5307 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5311 case RESET_KIND_SHUTDOWN:
5312 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5316 case RESET_KIND_SUSPEND:
5317 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5326 if (kind == RESET_KIND_INIT ||
5327 kind == RESET_KIND_SUSPEND)
5328 tg3_ape_driver_state_change(tp, kind);
5331 /* tp->lock is held. */
5332 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5334 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5336 case RESET_KIND_INIT:
5337 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5338 DRV_STATE_START_DONE);
5341 case RESET_KIND_SHUTDOWN:
5342 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5343 DRV_STATE_UNLOAD_DONE);
5351 if (kind == RESET_KIND_SHUTDOWN)
5352 tg3_ape_driver_state_change(tp, kind);
5355 /* tp->lock is held. */
5356 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5358 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5360 case RESET_KIND_INIT:
5361 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5365 case RESET_KIND_SHUTDOWN:
5366 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5370 case RESET_KIND_SUSPEND:
5371 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5381 static int tg3_poll_fw(struct tg3 *tp)
5386 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5387 /* Wait up to 20ms for init done. */
5388 for (i = 0; i < 200; i++) {
5389 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5396 /* Wait for firmware initialization to complete. */
5397 for (i = 0; i < 100000; i++) {
5398 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5399 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5404 /* Chip might not be fitted with firmware. Some Sun onboard
5405 * parts are configured like that. So don't signal the timeout
5406 * of the above loop as an error, but do report the lack of
5407 * running firmware once.
5410 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5411 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5413 printk(KERN_INFO PFX "%s: No firmware running.\n",
5420 /* Save PCI command register before chip reset */
5421 static void tg3_save_pci_state(struct tg3 *tp)
5423 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5426 /* Restore PCI state after chip reset */
5427 static void tg3_restore_pci_state(struct tg3 *tp)
5431 /* Re-enable indirect register accesses. */
5432 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5433 tp->misc_host_ctrl);
5435 /* Set MAX PCI retry to zero. */
5436 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5437 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5438 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5439 val |= PCISTATE_RETRY_SAME_DMA;
5440 /* Allow reads and writes to the APE register and memory space. */
5441 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5442 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5443 PCISTATE_ALLOW_APE_SHMEM_WR;
5444 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5446 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5448 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5449 pcie_set_readrq(tp->pdev, 4096);
5451 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5452 tp->pci_cacheline_sz);
5453 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5457 /* Make sure PCI-X relaxed ordering bit is clear. */
5461 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5463 pcix_cmd &= ~PCI_X_CMD_ERO;
5464 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5468 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5470 /* Chip reset on 5780 will reset MSI enable bit,
5471 * so need to restore it.
5473 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5476 pci_read_config_word(tp->pdev,
5477 tp->msi_cap + PCI_MSI_FLAGS,
5479 pci_write_config_word(tp->pdev,
5480 tp->msi_cap + PCI_MSI_FLAGS,
5481 ctrl | PCI_MSI_FLAGS_ENABLE);
5482 val = tr32(MSGINT_MODE);
5483 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5488 static void tg3_stop_fw(struct tg3 *);
5490 /* tp->lock is held. */
5491 static int tg3_chip_reset(struct tg3 *tp)
5494 void (*write_op)(struct tg3 *, u32, u32);
5501 /* No matching tg3_nvram_unlock() after this because
5502 * chip reset below will undo the nvram lock.
5504 tp->nvram_lock_cnt = 0;
5506 /* GRC_MISC_CFG core clock reset will clear the memory
5507 * enable bit in PCI register 4 and the MSI enable bit
5508 * on some chips, so we save relevant registers here.
5510 tg3_save_pci_state(tp);
5512 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5513 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5517 tw32(GRC_FASTBOOT_PC, 0);
5520 * We must avoid the readl() that normally takes place.
5521 * It locks machines, causes machine checks, and other
5522 * fun things. So, temporarily disable the 5701
5523 * hardware workaround, while we do the reset.
5525 write_op = tp->write32;
5526 if (write_op == tg3_write_flush_reg32)
5527 tp->write32 = tg3_write32;
5529 /* Prevent the irq handler from reading or writing PCI registers
5530 * during chip reset when the memory enable bit in the PCI command
5531 * register may be cleared. The chip does not generate interrupt
5532 * at this time, but the irq handler may still be called due to irq
5533 * sharing or irqpoll.
5535 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5536 if (tp->hw_status) {
5537 tp->hw_status->status = 0;
5538 tp->hw_status->status_tag = 0;
5542 synchronize_irq(tp->pdev->irq);
5545 val = GRC_MISC_CFG_CORECLK_RESET;
5547 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5548 if (tr32(0x7e2c) == 0x60) {
5551 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5552 tw32(GRC_MISC_CFG, (1 << 29));
5557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5558 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5559 tw32(GRC_VCPU_EXT_CTRL,
5560 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5563 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5564 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5565 tw32(GRC_MISC_CFG, val);
5567 /* restore 5701 hardware bug workaround write method */
5568 tp->write32 = write_op;
5570 /* Unfortunately, we have to delay before the PCI read back.
5571 * Some 575X chips even will not respond to a PCI cfg access
5572 * when the reset command is given to the chip.
5574 * How do these hardware designers expect things to work
5575 * properly if the PCI write is posted for a long period
5576 * of time? It is always necessary to have some method by
5577 * which a register read back can occur to push the write
5578 * out which does the reset.
5580 * For most tg3 variants the trick below was working.
5585 /* Flush PCI posted writes. The normal MMIO registers
5586 * are inaccessible at this time so this is the only
5587 * way to make this reliably (actually, this is no longer
5588 * the case, see above). I tried to use indirect
5589 * register read/write but this upset some 5701 variants.
5591 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5595 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5596 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5600 /* Wait for link training to complete. */
5601 for (i = 0; i < 5000; i++)
5604 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5605 pci_write_config_dword(tp->pdev, 0xc4,
5606 cfg_val | (1 << 15));
5608 /* Set PCIE max payload size and clear error status. */
5609 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5612 tg3_restore_pci_state(tp);
5614 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5617 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5618 val = tr32(MEMARB_MODE);
5619 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5621 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5623 tw32(0x5000, 0x400);
5626 tw32(GRC_MODE, tp->grc_mode);
5628 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5631 tw32(0xc4, val | (1 << 15));
5634 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5636 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5637 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5638 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5639 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5642 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5643 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5644 tw32_f(MAC_MODE, tp->mac_mode);
5645 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5646 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5647 tw32_f(MAC_MODE, tp->mac_mode);
5649 tw32_f(MAC_MODE, 0);
5654 err = tg3_poll_fw(tp);
5658 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5659 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5662 tw32(0x7c00, val | (1 << 25));
5665 /* Reprobe ASF enable state. */
5666 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5667 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5668 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5669 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5672 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5673 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5674 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5675 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5676 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5683 /* tp->lock is held. */
5684 static void tg3_stop_fw(struct tg3 *tp)
5686 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5687 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5690 /* Wait for RX cpu to ACK the previous event. */
5691 tg3_wait_for_event_ack(tp);
5693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5694 val = tr32(GRC_RX_CPU_EVENT);
5695 val |= GRC_RX_CPU_DRIVER_EVENT;
5696 tw32(GRC_RX_CPU_EVENT, val);
5698 /* Wait for RX cpu to ACK this event. */
5699 tg3_wait_for_event_ack(tp);
5703 /* tp->lock is held. */
5704 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5710 tg3_write_sig_pre_reset(tp, kind);
5712 tg3_abort_hw(tp, silent);
5713 err = tg3_chip_reset(tp);
5715 tg3_write_sig_legacy(tp, kind);
5716 tg3_write_sig_post_reset(tp, kind);
5724 #define TG3_FW_RELEASE_MAJOR 0x0
5725 #define TG3_FW_RELASE_MINOR 0x0
5726 #define TG3_FW_RELEASE_FIX 0x0
5727 #define TG3_FW_START_ADDR 0x08000000
5728 #define TG3_FW_TEXT_ADDR 0x08000000
5729 #define TG3_FW_TEXT_LEN 0x9c0
5730 #define TG3_FW_RODATA_ADDR 0x080009c0
5731 #define TG3_FW_RODATA_LEN 0x60
5732 #define TG3_FW_DATA_ADDR 0x08000a40
5733 #define TG3_FW_DATA_LEN 0x20
5734 #define TG3_FW_SBSS_ADDR 0x08000a60
5735 #define TG3_FW_SBSS_LEN 0xc
5736 #define TG3_FW_BSS_ADDR 0x08000a70
5737 #define TG3_FW_BSS_LEN 0x10
5739 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5740 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5741 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5742 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5743 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5744 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5745 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5746 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5747 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5748 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5749 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5750 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5751 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5752 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5753 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5754 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5755 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5756 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5757 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5758 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5759 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5760 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5761 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5762 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5763 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5764 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5766 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5767 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5768 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5769 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5770 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5771 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5772 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5773 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5774 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5775 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5776 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5777 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5778 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5779 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5780 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5781 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5782 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5783 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5784 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5785 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5786 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5787 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5788 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5789 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5790 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5791 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5792 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5793 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5794 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5795 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5796 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5797 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5798 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5799 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5800 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5801 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5802 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5803 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5804 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5805 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5806 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5807 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5808 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5809 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5810 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5811 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5812 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5813 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5814 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5815 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5816 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5817 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5818 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5819 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5820 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5821 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5822 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5823 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5824 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5825 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5826 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5827 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5828 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5829 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5830 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5833 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5834 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5835 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5836 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5837 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5841 #if 0 /* All zeros, don't eat up space with it. */
5842 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5843 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5844 0x00000000, 0x00000000, 0x00000000, 0x00000000
5848 #define RX_CPU_SCRATCH_BASE 0x30000
5849 #define RX_CPU_SCRATCH_SIZE 0x04000
5850 #define TX_CPU_SCRATCH_BASE 0x34000
5851 #define TX_CPU_SCRATCH_SIZE 0x04000
5853 /* tp->lock is held. */
5854 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5858 BUG_ON(offset == TX_CPU_BASE &&
5859 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5862 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5864 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5867 if (offset == RX_CPU_BASE) {
5868 for (i = 0; i < 10000; i++) {
5869 tw32(offset + CPU_STATE, 0xffffffff);
5870 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5871 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5875 tw32(offset + CPU_STATE, 0xffffffff);
5876 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5879 for (i = 0; i < 10000; i++) {
5880 tw32(offset + CPU_STATE, 0xffffffff);
5881 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5882 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5888 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5891 (offset == RX_CPU_BASE ? "RX" : "TX"));
5895 /* Clear firmware's nvram arbitration. */
5896 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5897 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5902 unsigned int text_base;
5903 unsigned int text_len;
5904 const u32 *text_data;
5905 unsigned int rodata_base;
5906 unsigned int rodata_len;
5907 const u32 *rodata_data;
5908 unsigned int data_base;
5909 unsigned int data_len;
5910 const u32 *data_data;
5913 /* tp->lock is held. */
5914 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5915 int cpu_scratch_size, struct fw_info *info)
5917 int err, lock_err, i;
5918 void (*write_op)(struct tg3 *, u32, u32);
5920 if (cpu_base == TX_CPU_BASE &&
5921 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5922 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5923 "TX cpu firmware on %s which is 5705.\n",
5928 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5929 write_op = tg3_write_mem;
5931 write_op = tg3_write_indirect_reg32;
5933 /* It is possible that bootcode is still loading at this point.
5934 * Get the nvram lock first before halting the cpu.
5936 lock_err = tg3_nvram_lock(tp);
5937 err = tg3_halt_cpu(tp, cpu_base);
5939 tg3_nvram_unlock(tp);
5943 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5944 write_op(tp, cpu_scratch_base + i, 0);
5945 tw32(cpu_base + CPU_STATE, 0xffffffff);
5946 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5947 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5948 write_op(tp, (cpu_scratch_base +
5949 (info->text_base & 0xffff) +
5952 info->text_data[i] : 0));
5953 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5954 write_op(tp, (cpu_scratch_base +
5955 (info->rodata_base & 0xffff) +
5957 (info->rodata_data ?
5958 info->rodata_data[i] : 0));
5959 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5960 write_op(tp, (cpu_scratch_base +
5961 (info->data_base & 0xffff) +
5964 info->data_data[i] : 0));
5972 /* tp->lock is held. */
5973 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5975 struct fw_info info;
5978 info.text_base = TG3_FW_TEXT_ADDR;
5979 info.text_len = TG3_FW_TEXT_LEN;
5980 info.text_data = &tg3FwText[0];
5981 info.rodata_base = TG3_FW_RODATA_ADDR;
5982 info.rodata_len = TG3_FW_RODATA_LEN;
5983 info.rodata_data = &tg3FwRodata[0];
5984 info.data_base = TG3_FW_DATA_ADDR;
5985 info.data_len = TG3_FW_DATA_LEN;
5986 info.data_data = NULL;
5988 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5989 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5994 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5995 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6000 /* Now startup only the RX cpu. */
6001 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6002 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6004 for (i = 0; i < 5; i++) {
6005 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6007 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6008 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6009 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6013 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6014 "to set RX CPU PC, is %08x should be %08x\n",
6015 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6019 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6020 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6026 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
6027 #define TG3_TSO_FW_RELASE_MINOR 0x6
6028 #define TG3_TSO_FW_RELEASE_FIX 0x0
6029 #define TG3_TSO_FW_START_ADDR 0x08000000
6030 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
6031 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
6032 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6033 #define TG3_TSO_FW_RODATA_LEN 0x60
6034 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
6035 #define TG3_TSO_FW_DATA_LEN 0x30
6036 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6037 #define TG3_TSO_FW_SBSS_LEN 0x2c
6038 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
6039 #define TG3_TSO_FW_BSS_LEN 0x894
6041 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6042 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6043 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6044 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6045 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6046 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6047 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6048 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6049 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6050 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6051 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6052 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6053 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6054 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6055 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6056 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6057 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6058 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6059 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6060 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6061 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6062 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6063 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6064 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6065 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6066 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6067 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6068 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6069 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6070 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6071 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6072 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6073 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6074 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6075 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6076 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6077 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6078 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6079 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6080 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6081 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6082 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6083 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6084 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6085 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6086 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6087 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6088 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6089 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6090 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6091 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6092 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6093 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6094 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6095 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6096 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6097 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6098 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6099 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6100 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6101 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6102 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6103 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6104 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6105 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6106 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6107 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6108 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6109 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6110 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6111 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6112 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6113 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6114 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6115 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6116 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6117 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6118 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6119 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6120 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6121 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6122 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6123 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6124 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6125 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6126 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6127 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6128 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6129 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6130 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6131 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6132 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6133 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6134 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6135 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6136 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6137 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6138 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6139 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6140 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6141 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6142 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6143 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6144 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6145 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6146 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6147 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6148 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6149 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6150 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6151 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6152 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6153 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6154 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6155 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6156 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6157 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6158 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6159 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6160 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6161 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6162 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6163 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6164 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6165 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6166 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6167 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6168 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6169 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6170 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6171 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6172 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6173 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6174 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6175 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6176 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6177 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6178 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6179 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6180 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6181 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6182 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6183 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6184 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6185 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6186 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6187 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6188 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6189 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6190 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6191 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6192 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6193 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6194 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6195 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6196 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6197 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6198 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6199 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6200 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6201 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6202 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6203 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6204 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6205 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6206 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6207 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6208 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6209 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6210 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6211 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6212 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6213 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6214 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6215 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6216 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6217 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6218 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6219 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6220 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6221 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6222 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6223 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6224 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6225 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6226 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6227 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6228 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6229 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6230 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6231 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6232 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6233 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6234 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6235 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6236 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6237 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6238 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6239 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6240 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6241 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6242 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6243 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6244 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6245 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6246 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6247 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6248 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6249 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6250 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6251 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6252 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6253 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6254 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6255 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6256 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6257 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6258 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6259 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6260 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6261 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6262 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6263 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6264 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6265 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6266 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6267 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6268 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6269 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6270 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6271 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6272 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6273 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6274 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6275 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6276 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6277 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6278 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6279 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6280 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6281 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6282 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6283 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6284 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6285 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6286 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6287 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6288 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6289 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6290 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6291 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6292 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6293 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6294 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6295 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6296 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6297 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6298 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6299 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6300 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6301 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6302 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6303 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6304 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6305 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6306 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6307 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6308 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6309 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6310 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6311 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6312 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6313 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6314 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6315 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6316 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6317 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6318 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6319 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6320 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6321 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6322 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6323 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6324 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6325 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6328 static const u32 tg3TsoFwRodata[] = {
6329 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6330 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6331 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6332 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6336 static const u32 tg3TsoFwData[] = {
6337 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6338 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6342 /* 5705 needs a special version of the TSO firmware. */
6343 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6344 #define TG3_TSO5_FW_RELASE_MINOR 0x2
6345 #define TG3_TSO5_FW_RELEASE_FIX 0x0
6346 #define TG3_TSO5_FW_START_ADDR 0x00010000
6347 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6348 #define TG3_TSO5_FW_TEXT_LEN 0xe90
6349 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6350 #define TG3_TSO5_FW_RODATA_LEN 0x50
6351 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6352 #define TG3_TSO5_FW_DATA_LEN 0x20
6353 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6354 #define TG3_TSO5_FW_SBSS_LEN 0x28
6355 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6356 #define TG3_TSO5_FW_BSS_LEN 0x88
6358 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6359 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6360 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6361 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6362 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6363 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6364 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6365 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6366 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6367 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6368 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6369 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6370 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6371 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6372 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6373 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6374 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6375 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6376 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6377 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6378 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6379 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6380 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6381 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6382 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6383 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6384 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6385 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6386 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6387 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6388 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6389 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6390 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6391 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6392 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6393 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6394 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6395 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6396 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6397 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6398 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6399 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6400 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6401 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6402 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6403 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6404 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6405 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6406 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6407 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6408 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6409 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6410 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6411 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6412 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6413 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6414 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6415 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6416 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6417 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6418 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6419 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6420 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6421 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6422 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6423 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6424 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6425 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6426 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6427 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6428 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6429 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6430 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6431 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6432 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6433 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6434 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6435 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6436 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6437 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6438 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6439 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6440 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6441 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6442 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6443 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6444 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6445 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6446 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6447 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6448 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6449 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6450 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6451 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6452 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6453 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6454 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6455 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6456 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6457 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6458 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6459 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6460 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6461 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6462 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6463 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6464 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6465 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6466 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6467 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6468 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6469 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6470 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6471 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6472 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6473 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6474 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6475 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6476 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6477 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6478 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6479 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6480 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6481 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6482 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6483 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6484 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6485 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6486 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6487 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6488 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6489 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6490 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6491 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6492 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6493 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6494 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6495 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6496 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6497 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6498 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6499 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6500 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6501 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6502 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6503 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6504 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6505 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6506 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6507 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6508 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6509 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6510 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6511 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6512 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6513 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6514 0x00000000, 0x00000000, 0x00000000,
6517 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6518 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6519 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6520 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6521 0x00000000, 0x00000000, 0x00000000,
6524 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6525 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6526 0x00000000, 0x00000000, 0x00000000,
6529 /* tp->lock is held. */
6530 static int tg3_load_tso_firmware(struct tg3 *tp)
6532 struct fw_info info;
6533 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6536 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6539 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6540 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6541 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6542 info.text_data = &tg3Tso5FwText[0];
6543 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6544 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6545 info.rodata_data = &tg3Tso5FwRodata[0];
6546 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6547 info.data_len = TG3_TSO5_FW_DATA_LEN;
6548 info.data_data = &tg3Tso5FwData[0];
6549 cpu_base = RX_CPU_BASE;
6550 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6551 cpu_scratch_size = (info.text_len +
6554 TG3_TSO5_FW_SBSS_LEN +
6555 TG3_TSO5_FW_BSS_LEN);
6557 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6558 info.text_len = TG3_TSO_FW_TEXT_LEN;
6559 info.text_data = &tg3TsoFwText[0];
6560 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6561 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6562 info.rodata_data = &tg3TsoFwRodata[0];
6563 info.data_base = TG3_TSO_FW_DATA_ADDR;
6564 info.data_len = TG3_TSO_FW_DATA_LEN;
6565 info.data_data = &tg3TsoFwData[0];
6566 cpu_base = TX_CPU_BASE;
6567 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6568 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6571 err = tg3_load_firmware_cpu(tp, cpu_base,
6572 cpu_scratch_base, cpu_scratch_size,
6577 /* Now startup the cpu. */
6578 tw32(cpu_base + CPU_STATE, 0xffffffff);
6579 tw32_f(cpu_base + CPU_PC, info.text_base);
6581 for (i = 0; i < 5; i++) {
6582 if (tr32(cpu_base + CPU_PC) == info.text_base)
6584 tw32(cpu_base + CPU_STATE, 0xffffffff);
6585 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6586 tw32_f(cpu_base + CPU_PC, info.text_base);
6590 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6591 "to set CPU PC, is %08x should be %08x\n",
6592 tp->dev->name, tr32(cpu_base + CPU_PC),
6596 tw32(cpu_base + CPU_STATE, 0xffffffff);
6597 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6602 /* tp->lock is held. */
6603 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6605 u32 addr_high, addr_low;
6608 addr_high = ((tp->dev->dev_addr[0] << 8) |
6609 tp->dev->dev_addr[1]);
6610 addr_low = ((tp->dev->dev_addr[2] << 24) |
6611 (tp->dev->dev_addr[3] << 16) |
6612 (tp->dev->dev_addr[4] << 8) |
6613 (tp->dev->dev_addr[5] << 0));
6614 for (i = 0; i < 4; i++) {
6615 if (i == 1 && skip_mac_1)
6617 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6618 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6623 for (i = 0; i < 12; i++) {
6624 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6625 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6629 addr_high = (tp->dev->dev_addr[0] +
6630 tp->dev->dev_addr[1] +
6631 tp->dev->dev_addr[2] +
6632 tp->dev->dev_addr[3] +
6633 tp->dev->dev_addr[4] +
6634 tp->dev->dev_addr[5]) &
6635 TX_BACKOFF_SEED_MASK;
6636 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6639 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6641 struct tg3 *tp = netdev_priv(dev);
6642 struct sockaddr *addr = p;
6643 int err = 0, skip_mac_1 = 0;
6645 if (!is_valid_ether_addr(addr->sa_data))
6648 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6650 if (!netif_running(dev))
6653 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6654 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6656 addr0_high = tr32(MAC_ADDR_0_HIGH);
6657 addr0_low = tr32(MAC_ADDR_0_LOW);
6658 addr1_high = tr32(MAC_ADDR_1_HIGH);
6659 addr1_low = tr32(MAC_ADDR_1_LOW);
6661 /* Skip MAC addr 1 if ASF is using it. */
6662 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6663 !(addr1_high == 0 && addr1_low == 0))
6666 spin_lock_bh(&tp->lock);
6667 __tg3_set_mac_addr(tp, skip_mac_1);
6668 spin_unlock_bh(&tp->lock);
6673 /* tp->lock is held. */
6674 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6675 dma_addr_t mapping, u32 maxlen_flags,
6679 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6680 ((u64) mapping >> 32));
6682 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6683 ((u64) mapping & 0xffffffff));
6685 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6688 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6690 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6694 static void __tg3_set_rx_mode(struct net_device *);
6695 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6697 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6698 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6699 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6700 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6701 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6702 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6703 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6705 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6706 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6707 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6708 u32 val = ec->stats_block_coalesce_usecs;
6710 if (!netif_carrier_ok(tp->dev))
6713 tw32(HOSTCC_STAT_COAL_TICKS, val);
6717 /* tp->lock is held. */
6718 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6720 u32 val, rdmac_mode;
6723 tg3_disable_ints(tp);
6727 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6729 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6730 tg3_abort_hw(tp, 1);
6734 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
6737 err = tg3_chip_reset(tp);
6741 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6743 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6744 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6745 val = tr32(TG3_CPMU_CTRL);
6746 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6747 tw32(TG3_CPMU_CTRL, val);
6749 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6750 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6751 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6752 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6754 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6755 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6756 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6757 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6759 val = tr32(TG3_CPMU_HST_ACC);
6760 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6761 val |= CPMU_HST_ACC_MACCLK_6_25;
6762 tw32(TG3_CPMU_HST_ACC, val);
6765 /* This works around an issue with Athlon chipsets on
6766 * B3 tigon3 silicon. This bit has no effect on any
6767 * other revision. But do not set this on PCI Express
6768 * chips and don't even touch the clocks if the CPMU is present.
6770 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6771 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6772 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6773 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6776 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6777 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6778 val = tr32(TG3PCI_PCISTATE);
6779 val |= PCISTATE_RETRY_SAME_DMA;
6780 tw32(TG3PCI_PCISTATE, val);
6783 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6784 /* Allow reads and writes to the
6785 * APE register and memory space.
6787 val = tr32(TG3PCI_PCISTATE);
6788 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6789 PCISTATE_ALLOW_APE_SHMEM_WR;
6790 tw32(TG3PCI_PCISTATE, val);
6793 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6794 /* Enable some hw fixes. */
6795 val = tr32(TG3PCI_MSI_DATA);
6796 val |= (1 << 26) | (1 << 28) | (1 << 29);
6797 tw32(TG3PCI_MSI_DATA, val);
6800 /* Descriptor ring init may make accesses to the
6801 * NIC SRAM area to setup the TX descriptors, so we
6802 * can only do this after the hardware has been
6803 * successfully reset.
6805 err = tg3_init_rings(tp);
6809 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6810 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6811 /* This value is determined during the probe time DMA
6812 * engine test, tg3_test_dma.
6814 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6817 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6818 GRC_MODE_4X_NIC_SEND_RINGS |
6819 GRC_MODE_NO_TX_PHDR_CSUM |
6820 GRC_MODE_NO_RX_PHDR_CSUM);
6821 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6823 /* Pseudo-header checksum is done by hardware logic and not
6824 * the offload processers, so make the chip do the pseudo-
6825 * header checksums on receive. For transmit it is more
6826 * convenient to do the pseudo-header checksum in software
6827 * as Linux does that on transmit for us in all cases.
6829 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6833 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6835 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6836 val = tr32(GRC_MISC_CFG);
6838 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6839 tw32(GRC_MISC_CFG, val);
6841 /* Initialize MBUF/DESC pool. */
6842 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6844 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6845 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6846 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6847 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6849 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6850 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6851 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6853 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6856 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6857 TG3_TSO5_FW_RODATA_LEN +
6858 TG3_TSO5_FW_DATA_LEN +
6859 TG3_TSO5_FW_SBSS_LEN +
6860 TG3_TSO5_FW_BSS_LEN);
6861 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6862 tw32(BUFMGR_MB_POOL_ADDR,
6863 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6864 tw32(BUFMGR_MB_POOL_SIZE,
6865 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6868 if (tp->dev->mtu <= ETH_DATA_LEN) {
6869 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6870 tp->bufmgr_config.mbuf_read_dma_low_water);
6871 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6872 tp->bufmgr_config.mbuf_mac_rx_low_water);
6873 tw32(BUFMGR_MB_HIGH_WATER,
6874 tp->bufmgr_config.mbuf_high_water);
6876 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6877 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6878 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6879 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6880 tw32(BUFMGR_MB_HIGH_WATER,
6881 tp->bufmgr_config.mbuf_high_water_jumbo);
6883 tw32(BUFMGR_DMA_LOW_WATER,
6884 tp->bufmgr_config.dma_low_water);
6885 tw32(BUFMGR_DMA_HIGH_WATER,
6886 tp->bufmgr_config.dma_high_water);
6888 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6889 for (i = 0; i < 2000; i++) {
6890 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6895 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6900 /* Setup replenish threshold. */
6901 val = tp->rx_pending / 8;
6904 else if (val > tp->rx_std_max_post)
6905 val = tp->rx_std_max_post;
6906 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6907 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6908 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6910 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6911 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6914 tw32(RCVBDI_STD_THRESH, val);
6916 /* Initialize TG3_BDINFO's at:
6917 * RCVDBDI_STD_BD: standard eth size rx ring
6918 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6919 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6922 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6923 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6924 * ring attribute flags
6925 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6927 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6928 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6930 * The size of each ring is fixed in the firmware, but the location is
6933 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6934 ((u64) tp->rx_std_mapping >> 32));
6935 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6936 ((u64) tp->rx_std_mapping & 0xffffffff));
6937 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6938 NIC_SRAM_RX_BUFFER_DESC);
6940 /* Don't even try to program the JUMBO/MINI buffer descriptor
6943 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6944 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6945 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6947 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6948 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6950 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6951 BDINFO_FLAGS_DISABLED);
6953 /* Setup replenish threshold. */
6954 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6956 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6957 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6958 ((u64) tp->rx_jumbo_mapping >> 32));
6959 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6960 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6961 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6962 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6963 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6964 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6966 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6967 BDINFO_FLAGS_DISABLED);
6972 /* There is only one send ring on 5705/5750, no need to explicitly
6973 * disable the others.
6975 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6976 /* Clear out send RCB ring in SRAM. */
6977 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6978 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6979 BDINFO_FLAGS_DISABLED);
6984 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6985 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6987 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6988 tp->tx_desc_mapping,
6989 (TG3_TX_RING_SIZE <<
6990 BDINFO_FLAGS_MAXLEN_SHIFT),
6991 NIC_SRAM_TX_BUFFER_DESC);
6993 /* There is only one receive return ring on 5705/5750, no need
6994 * to explicitly disable the others.
6996 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6997 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6998 i += TG3_BDINFO_SIZE) {
6999 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7000 BDINFO_FLAGS_DISABLED);
7005 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7007 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7009 (TG3_RX_RCB_RING_SIZE(tp) <<
7010 BDINFO_FLAGS_MAXLEN_SHIFT),
7013 tp->rx_std_ptr = tp->rx_pending;
7014 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7017 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7018 tp->rx_jumbo_pending : 0;
7019 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7022 /* Initialize MAC address and backoff seed. */
7023 __tg3_set_mac_addr(tp, 0);
7025 /* MTU + ethernet header + FCS + optional VLAN tag */
7026 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7028 /* The slot time is changed by tg3_setup_phy if we
7029 * run at gigabit with half duplex.
7031 tw32(MAC_TX_LENGTHS,
7032 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7033 (6 << TX_LENGTHS_IPG_SHIFT) |
7034 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7036 /* Receive rules. */
7037 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7038 tw32(RCVLPC_CONFIG, 0x0181);
7040 /* Calculate RDMAC_MODE setting early, we need it to determine
7041 * the RCVLPC_STATE_ENABLE mask.
7043 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7044 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7045 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7046 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7047 RDMAC_MODE_LNGREAD_ENAB);
7049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
7050 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7051 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7052 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7054 /* If statement applies to 5705 and 5750 PCI devices only */
7055 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7056 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7057 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7058 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7060 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7061 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7062 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7063 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7067 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7068 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7070 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7071 rdmac_mode |= (1 << 27);
7073 /* Receive/send statistics. */
7074 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7075 val = tr32(RCVLPC_STATS_ENABLE);
7076 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7077 tw32(RCVLPC_STATS_ENABLE, val);
7078 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7079 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7080 val = tr32(RCVLPC_STATS_ENABLE);
7081 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7082 tw32(RCVLPC_STATS_ENABLE, val);
7084 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7086 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7087 tw32(SNDDATAI_STATSENAB, 0xffffff);
7088 tw32(SNDDATAI_STATSCTRL,
7089 (SNDDATAI_SCTRL_ENABLE |
7090 SNDDATAI_SCTRL_FASTUPD));
7092 /* Setup host coalescing engine. */
7093 tw32(HOSTCC_MODE, 0);
7094 for (i = 0; i < 2000; i++) {
7095 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7100 __tg3_set_coalesce(tp, &tp->coal);
7102 /* set status block DMA address */
7103 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7104 ((u64) tp->status_mapping >> 32));
7105 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7106 ((u64) tp->status_mapping & 0xffffffff));
7108 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7109 /* Status/statistics block address. See tg3_timer,
7110 * the tg3_periodic_fetch_stats call there, and
7111 * tg3_get_stats to see how this works for 5705/5750 chips.
7113 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7114 ((u64) tp->stats_mapping >> 32));
7115 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7116 ((u64) tp->stats_mapping & 0xffffffff));
7117 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7118 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7121 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7123 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7124 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7125 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7126 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7128 /* Clear statistics/status block in chip, and status block in ram. */
7129 for (i = NIC_SRAM_STATS_BLK;
7130 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7132 tg3_write_mem(tp, i, 0);
7135 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7137 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7138 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7139 /* reset to prevent losing 1st rx packet intermittently */
7140 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7144 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7145 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7146 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7147 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7148 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7149 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7150 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7153 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7154 * If TG3_FLG2_IS_NIC is zero, we should read the
7155 * register to preserve the GPIO settings for LOMs. The GPIOs,
7156 * whether used as inputs or outputs, are set by boot code after
7159 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7162 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7163 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7164 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7167 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7168 GRC_LCLCTRL_GPIO_OUTPUT3;
7170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7171 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7173 tp->grc_local_ctrl &= ~gpio_mask;
7174 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7176 /* GPIO1 must be driven high for eeprom write protect */
7177 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7178 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7179 GRC_LCLCTRL_GPIO_OUTPUT1);
7181 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7184 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7187 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7188 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7192 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7193 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7194 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7195 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7196 WDMAC_MODE_LNGREAD_ENAB);
7198 /* If statement applies to 5705 and 5750 PCI devices only */
7199 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7200 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7201 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7202 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7203 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7204 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7206 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7207 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7208 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7209 val |= WDMAC_MODE_RX_ACCEL;
7213 /* Enable host coalescing bug fix */
7214 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7215 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7216 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7217 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
7218 val |= WDMAC_MODE_STATUS_TAG_FIX;
7220 tw32_f(WDMAC_MODE, val);
7223 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7226 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7228 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7229 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7230 pcix_cmd |= PCI_X_CMD_READ_2K;
7231 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7232 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7233 pcix_cmd |= PCI_X_CMD_READ_2K;
7235 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7239 tw32_f(RDMAC_MODE, rdmac_mode);
7242 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7243 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7244 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7246 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7248 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7250 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7252 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7253 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7254 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7255 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7256 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7257 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7258 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7259 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7261 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7262 err = tg3_load_5701_a0_firmware_fix(tp);
7267 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7268 err = tg3_load_tso_firmware(tp);
7273 tp->tx_mode = TX_MODE_ENABLE;
7274 tw32_f(MAC_TX_MODE, tp->tx_mode);
7277 tp->rx_mode = RX_MODE_ENABLE;
7278 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7279 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7280 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7282 tw32_f(MAC_RX_MODE, tp->rx_mode);
7285 tw32(MAC_LED_CTRL, tp->led_ctrl);
7287 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7288 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7289 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7292 tw32_f(MAC_RX_MODE, tp->rx_mode);
7295 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7296 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7297 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7298 /* Set drive transmission level to 1.2V */
7299 /* only if the signal pre-emphasis bit is not set */
7300 val = tr32(MAC_SERDES_CFG);
7303 tw32(MAC_SERDES_CFG, val);
7305 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7306 tw32(MAC_SERDES_CFG, 0x616000);
7309 /* Prevent chip from dropping frames when flow control
7312 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7314 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7315 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7316 /* Use hardware link auto-negotiation */
7317 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7320 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7321 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7324 tmp = tr32(SERDES_RX_CTRL);
7325 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7326 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7327 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7328 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7331 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7332 if (tp->link_config.phy_is_low_power) {
7333 tp->link_config.phy_is_low_power = 0;
7334 tp->link_config.speed = tp->link_config.orig_speed;
7335 tp->link_config.duplex = tp->link_config.orig_duplex;
7336 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7339 err = tg3_setup_phy(tp, 0);
7343 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7344 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7347 /* Clear CRC stats. */
7348 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7349 tg3_writephy(tp, MII_TG3_TEST1,
7350 tmp | MII_TG3_TEST1_CRC_EN);
7351 tg3_readphy(tp, 0x14, &tmp);
7356 __tg3_set_rx_mode(tp->dev);
7358 /* Initialize receive rules. */
7359 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7360 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7361 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7362 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7364 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7365 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7369 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7373 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7375 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7377 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7379 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7381 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7383 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7385 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7387 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7389 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7391 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7393 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7395 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7397 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7399 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7407 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7408 /* Write our heartbeat update interval to APE. */
7409 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7410 APE_HOST_HEARTBEAT_INT_DISABLE);
7412 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7417 /* Called at device open time to get the chip ready for
7418 * packet processing. Invoked with tp->lock held.
7420 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7424 /* Force the chip into D0. */
7425 err = tg3_set_power_state(tp, PCI_D0);
7429 tg3_switch_clocks(tp);
7431 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7433 err = tg3_reset_hw(tp, reset_phy);
7439 #define TG3_STAT_ADD32(PSTAT, REG) \
7440 do { u32 __val = tr32(REG); \
7441 (PSTAT)->low += __val; \
7442 if ((PSTAT)->low < __val) \
7443 (PSTAT)->high += 1; \
7446 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7448 struct tg3_hw_stats *sp = tp->hw_stats;
7450 if (!netif_carrier_ok(tp->dev))
7453 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7454 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7455 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7456 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7457 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7458 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7459 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7460 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7461 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7462 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7463 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7464 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7465 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7467 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7468 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7469 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7470 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7471 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7472 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7473 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7474 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7475 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7476 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7477 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7478 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7479 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7480 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7482 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7483 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7484 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7487 static void tg3_timer(unsigned long __opaque)
7489 struct tg3 *tp = (struct tg3 *) __opaque;
7494 spin_lock(&tp->lock);
7496 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7497 /* All of this garbage is because when using non-tagged
7498 * IRQ status the mailbox/status_block protocol the chip
7499 * uses with the cpu is race prone.
7501 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7502 tw32(GRC_LOCAL_CTRL,
7503 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7505 tw32(HOSTCC_MODE, tp->coalesce_mode |
7506 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7509 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7510 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7511 spin_unlock(&tp->lock);
7512 schedule_work(&tp->reset_task);
7517 /* This part only runs once per second. */
7518 if (!--tp->timer_counter) {
7519 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7520 tg3_periodic_fetch_stats(tp);
7522 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7526 mac_stat = tr32(MAC_STATUS);
7529 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7530 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7532 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7536 tg3_setup_phy(tp, 0);
7537 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7538 u32 mac_stat = tr32(MAC_STATUS);
7541 if (netif_carrier_ok(tp->dev) &&
7542 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7545 if (! netif_carrier_ok(tp->dev) &&
7546 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7547 MAC_STATUS_SIGNAL_DET))) {
7551 if (!tp->serdes_counter) {
7554 ~MAC_MODE_PORT_MODE_MASK));
7556 tw32_f(MAC_MODE, tp->mac_mode);
7559 tg3_setup_phy(tp, 0);
7561 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7562 tg3_serdes_parallel_detect(tp);
7564 tp->timer_counter = tp->timer_multiplier;
7567 /* Heartbeat is only sent once every 2 seconds.
7569 * The heartbeat is to tell the ASF firmware that the host
7570 * driver is still alive. In the event that the OS crashes,
7571 * ASF needs to reset the hardware to free up the FIFO space
7572 * that may be filled with rx packets destined for the host.
7573 * If the FIFO is full, ASF will no longer function properly.
7575 * Unintended resets have been reported on real time kernels
7576 * where the timer doesn't run on time. Netpoll will also have
7579 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7580 * to check the ring condition when the heartbeat is expiring
7581 * before doing the reset. This will prevent most unintended
7584 if (!--tp->asf_counter) {
7585 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7588 tg3_wait_for_event_ack(tp);
7590 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7591 FWCMD_NICDRV_ALIVE3);
7592 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7593 /* 5 seconds timeout */
7594 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7595 val = tr32(GRC_RX_CPU_EVENT);
7596 val |= GRC_RX_CPU_DRIVER_EVENT;
7597 tw32_f(GRC_RX_CPU_EVENT, val);
7599 tp->asf_counter = tp->asf_multiplier;
7602 spin_unlock(&tp->lock);
7605 tp->timer.expires = jiffies + tp->timer_offset;
7606 add_timer(&tp->timer);
7609 static int tg3_request_irq(struct tg3 *tp)
7612 unsigned long flags;
7613 struct net_device *dev = tp->dev;
7615 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7617 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7619 flags = IRQF_SAMPLE_RANDOM;
7622 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7623 fn = tg3_interrupt_tagged;
7624 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7626 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7629 static int tg3_test_interrupt(struct tg3 *tp)
7631 struct net_device *dev = tp->dev;
7632 int err, i, intr_ok = 0;
7634 if (!netif_running(dev))
7637 tg3_disable_ints(tp);
7639 free_irq(tp->pdev->irq, dev);
7641 err = request_irq(tp->pdev->irq, tg3_test_isr,
7642 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7646 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7647 tg3_enable_ints(tp);
7649 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7652 for (i = 0; i < 5; i++) {
7653 u32 int_mbox, misc_host_ctrl;
7655 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7657 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7659 if ((int_mbox != 0) ||
7660 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7668 tg3_disable_ints(tp);
7670 free_irq(tp->pdev->irq, dev);
7672 err = tg3_request_irq(tp);
7683 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7684 * successfully restored
7686 static int tg3_test_msi(struct tg3 *tp)
7688 struct net_device *dev = tp->dev;
7692 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7695 /* Turn off SERR reporting in case MSI terminates with Master
7698 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7699 pci_write_config_word(tp->pdev, PCI_COMMAND,
7700 pci_cmd & ~PCI_COMMAND_SERR);
7702 err = tg3_test_interrupt(tp);
7704 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7709 /* other failures */
7713 /* MSI test failed, go back to INTx mode */
7714 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7715 "switching to INTx mode. Please report this failure to "
7716 "the PCI maintainer and include system chipset information.\n",
7719 free_irq(tp->pdev->irq, dev);
7720 pci_disable_msi(tp->pdev);
7722 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7724 err = tg3_request_irq(tp);
7728 /* Need to reset the chip because the MSI cycle may have terminated
7729 * with Master Abort.
7731 tg3_full_lock(tp, 1);
7733 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7734 err = tg3_init_hw(tp, 1);
7736 tg3_full_unlock(tp);
7739 free_irq(tp->pdev->irq, dev);
7744 static int tg3_open(struct net_device *dev)
7746 struct tg3 *tp = netdev_priv(dev);
7749 netif_carrier_off(tp->dev);
7751 tg3_full_lock(tp, 0);
7753 err = tg3_set_power_state(tp, PCI_D0);
7755 tg3_full_unlock(tp);
7759 tg3_disable_ints(tp);
7760 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7762 tg3_full_unlock(tp);
7764 /* The placement of this call is tied
7765 * to the setup and use of Host TX descriptors.
7767 err = tg3_alloc_consistent(tp);
7771 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7772 /* All MSI supporting chips should support tagged
7773 * status. Assert that this is the case.
7775 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7776 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7777 "Not using MSI.\n", tp->dev->name);
7778 } else if (pci_enable_msi(tp->pdev) == 0) {
7781 msi_mode = tr32(MSGINT_MODE);
7782 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7783 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7786 err = tg3_request_irq(tp);
7789 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7790 pci_disable_msi(tp->pdev);
7791 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7793 tg3_free_consistent(tp);
7797 napi_enable(&tp->napi);
7799 tg3_full_lock(tp, 0);
7801 err = tg3_init_hw(tp, 1);
7803 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7806 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7807 tp->timer_offset = HZ;
7809 tp->timer_offset = HZ / 10;
7811 BUG_ON(tp->timer_offset > HZ);
7812 tp->timer_counter = tp->timer_multiplier =
7813 (HZ / tp->timer_offset);
7814 tp->asf_counter = tp->asf_multiplier =
7815 ((HZ / tp->timer_offset) * 2);
7817 init_timer(&tp->timer);
7818 tp->timer.expires = jiffies + tp->timer_offset;
7819 tp->timer.data = (unsigned long) tp;
7820 tp->timer.function = tg3_timer;
7823 tg3_full_unlock(tp);
7826 napi_disable(&tp->napi);
7827 free_irq(tp->pdev->irq, dev);
7828 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7829 pci_disable_msi(tp->pdev);
7830 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7832 tg3_free_consistent(tp);
7836 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7837 err = tg3_test_msi(tp);
7840 tg3_full_lock(tp, 0);
7842 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7843 pci_disable_msi(tp->pdev);
7844 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7846 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7848 tg3_free_consistent(tp);
7850 tg3_full_unlock(tp);
7852 napi_disable(&tp->napi);
7857 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7858 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7859 u32 val = tr32(PCIE_TRANSACTION_CFG);
7861 tw32(PCIE_TRANSACTION_CFG,
7862 val | PCIE_TRANS_CFG_1SHOT_MSI);
7867 tg3_full_lock(tp, 0);
7869 add_timer(&tp->timer);
7870 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7871 tg3_enable_ints(tp);
7873 tg3_full_unlock(tp);
7875 netif_start_queue(dev);
7881 /*static*/ void tg3_dump_state(struct tg3 *tp)
7883 u32 val32, val32_2, val32_3, val32_4, val32_5;
7887 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7888 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7889 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7893 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7894 tr32(MAC_MODE), tr32(MAC_STATUS));
7895 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7896 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7897 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7898 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7899 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7900 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7902 /* Send data initiator control block */
7903 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7904 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7905 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7906 tr32(SNDDATAI_STATSCTRL));
7908 /* Send data completion control block */
7909 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7911 /* Send BD ring selector block */
7912 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7913 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7915 /* Send BD initiator control block */
7916 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7917 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7919 /* Send BD completion control block */
7920 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7922 /* Receive list placement control block */
7923 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7924 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7925 printk(" RCVLPC_STATSCTRL[%08x]\n",
7926 tr32(RCVLPC_STATSCTRL));
7928 /* Receive data and receive BD initiator control block */
7929 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7930 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7932 /* Receive data completion control block */
7933 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7936 /* Receive BD initiator control block */
7937 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7938 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7940 /* Receive BD completion control block */
7941 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7942 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7944 /* Receive list selector control block */
7945 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7946 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7948 /* Mbuf cluster free block */
7949 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7950 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7952 /* Host coalescing control block */
7953 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7954 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7955 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7956 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7957 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7958 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7959 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7960 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7961 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7962 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7963 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7964 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7966 /* Memory arbiter control block */
7967 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7968 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7970 /* Buffer manager control block */
7971 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7972 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7973 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7974 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7975 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7976 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7977 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7978 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7980 /* Read DMA control block */
7981 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7982 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7984 /* Write DMA control block */
7985 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7986 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7988 /* DMA completion block */
7989 printk("DEBUG: DMAC_MODE[%08x]\n",
7993 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7994 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7995 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7996 tr32(GRC_LOCAL_CTRL));
7999 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8000 tr32(RCVDBDI_JUMBO_BD + 0x0),
8001 tr32(RCVDBDI_JUMBO_BD + 0x4),
8002 tr32(RCVDBDI_JUMBO_BD + 0x8),
8003 tr32(RCVDBDI_JUMBO_BD + 0xc));
8004 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8005 tr32(RCVDBDI_STD_BD + 0x0),
8006 tr32(RCVDBDI_STD_BD + 0x4),
8007 tr32(RCVDBDI_STD_BD + 0x8),
8008 tr32(RCVDBDI_STD_BD + 0xc));
8009 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8010 tr32(RCVDBDI_MINI_BD + 0x0),
8011 tr32(RCVDBDI_MINI_BD + 0x4),
8012 tr32(RCVDBDI_MINI_BD + 0x8),
8013 tr32(RCVDBDI_MINI_BD + 0xc));
8015 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8016 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8017 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8018 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8019 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8020 val32, val32_2, val32_3, val32_4);
8022 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8023 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8024 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8025 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8026 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8027 val32, val32_2, val32_3, val32_4);
8029 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8030 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8031 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8032 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8033 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8034 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8035 val32, val32_2, val32_3, val32_4, val32_5);
8037 /* SW status block */
8038 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8039 tp->hw_status->status,
8040 tp->hw_status->status_tag,
8041 tp->hw_status->rx_jumbo_consumer,
8042 tp->hw_status->rx_consumer,
8043 tp->hw_status->rx_mini_consumer,
8044 tp->hw_status->idx[0].rx_producer,
8045 tp->hw_status->idx[0].tx_consumer);
8047 /* SW statistics block */
8048 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8049 ((u32 *)tp->hw_stats)[0],
8050 ((u32 *)tp->hw_stats)[1],
8051 ((u32 *)tp->hw_stats)[2],
8052 ((u32 *)tp->hw_stats)[3]);
8055 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8056 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8057 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8058 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8059 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8061 /* NIC side send descriptors. */
8062 for (i = 0; i < 6; i++) {
8065 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8066 + (i * sizeof(struct tg3_tx_buffer_desc));
8067 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8069 readl(txd + 0x0), readl(txd + 0x4),
8070 readl(txd + 0x8), readl(txd + 0xc));
8073 /* NIC side RX descriptors. */
8074 for (i = 0; i < 6; i++) {
8077 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8078 + (i * sizeof(struct tg3_rx_buffer_desc));
8079 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8081 readl(rxd + 0x0), readl(rxd + 0x4),
8082 readl(rxd + 0x8), readl(rxd + 0xc));
8083 rxd += (4 * sizeof(u32));
8084 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8086 readl(rxd + 0x0), readl(rxd + 0x4),
8087 readl(rxd + 0x8), readl(rxd + 0xc));
8090 for (i = 0; i < 6; i++) {
8093 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8094 + (i * sizeof(struct tg3_rx_buffer_desc));
8095 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8097 readl(rxd + 0x0), readl(rxd + 0x4),
8098 readl(rxd + 0x8), readl(rxd + 0xc));
8099 rxd += (4 * sizeof(u32));
8100 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8102 readl(rxd + 0x0), readl(rxd + 0x4),
8103 readl(rxd + 0x8), readl(rxd + 0xc));
8108 static struct net_device_stats *tg3_get_stats(struct net_device *);
8109 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8111 static int tg3_close(struct net_device *dev)
8113 struct tg3 *tp = netdev_priv(dev);
8115 napi_disable(&tp->napi);
8116 cancel_work_sync(&tp->reset_task);
8118 netif_stop_queue(dev);
8120 del_timer_sync(&tp->timer);
8122 tg3_full_lock(tp, 1);
8127 tg3_disable_ints(tp);
8129 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8131 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8133 tg3_full_unlock(tp);
8135 free_irq(tp->pdev->irq, dev);
8136 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8137 pci_disable_msi(tp->pdev);
8138 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8141 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8142 sizeof(tp->net_stats_prev));
8143 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8144 sizeof(tp->estats_prev));
8146 tg3_free_consistent(tp);
8148 tg3_set_power_state(tp, PCI_D3hot);
8150 netif_carrier_off(tp->dev);
8155 static inline unsigned long get_stat64(tg3_stat64_t *val)
8159 #if (BITS_PER_LONG == 32)
8162 ret = ((u64)val->high << 32) | ((u64)val->low);
8167 static unsigned long calc_crc_errors(struct tg3 *tp)
8169 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8171 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8172 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8176 spin_lock_bh(&tp->lock);
8177 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8178 tg3_writephy(tp, MII_TG3_TEST1,
8179 val | MII_TG3_TEST1_CRC_EN);
8180 tg3_readphy(tp, 0x14, &val);
8183 spin_unlock_bh(&tp->lock);
8185 tp->phy_crc_errors += val;
8187 return tp->phy_crc_errors;
8190 return get_stat64(&hw_stats->rx_fcs_errors);
8193 #define ESTAT_ADD(member) \
8194 estats->member = old_estats->member + \
8195 get_stat64(&hw_stats->member)
8197 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8199 struct tg3_ethtool_stats *estats = &tp->estats;
8200 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8201 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8206 ESTAT_ADD(rx_octets);
8207 ESTAT_ADD(rx_fragments);
8208 ESTAT_ADD(rx_ucast_packets);
8209 ESTAT_ADD(rx_mcast_packets);
8210 ESTAT_ADD(rx_bcast_packets);
8211 ESTAT_ADD(rx_fcs_errors);
8212 ESTAT_ADD(rx_align_errors);
8213 ESTAT_ADD(rx_xon_pause_rcvd);
8214 ESTAT_ADD(rx_xoff_pause_rcvd);
8215 ESTAT_ADD(rx_mac_ctrl_rcvd);
8216 ESTAT_ADD(rx_xoff_entered);
8217 ESTAT_ADD(rx_frame_too_long_errors);
8218 ESTAT_ADD(rx_jabbers);
8219 ESTAT_ADD(rx_undersize_packets);
8220 ESTAT_ADD(rx_in_length_errors);
8221 ESTAT_ADD(rx_out_length_errors);
8222 ESTAT_ADD(rx_64_or_less_octet_packets);
8223 ESTAT_ADD(rx_65_to_127_octet_packets);
8224 ESTAT_ADD(rx_128_to_255_octet_packets);
8225 ESTAT_ADD(rx_256_to_511_octet_packets);
8226 ESTAT_ADD(rx_512_to_1023_octet_packets);
8227 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8228 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8229 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8230 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8231 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8233 ESTAT_ADD(tx_octets);
8234 ESTAT_ADD(tx_collisions);
8235 ESTAT_ADD(tx_xon_sent);
8236 ESTAT_ADD(tx_xoff_sent);
8237 ESTAT_ADD(tx_flow_control);
8238 ESTAT_ADD(tx_mac_errors);
8239 ESTAT_ADD(tx_single_collisions);
8240 ESTAT_ADD(tx_mult_collisions);
8241 ESTAT_ADD(tx_deferred);
8242 ESTAT_ADD(tx_excessive_collisions);
8243 ESTAT_ADD(tx_late_collisions);
8244 ESTAT_ADD(tx_collide_2times);
8245 ESTAT_ADD(tx_collide_3times);
8246 ESTAT_ADD(tx_collide_4times);
8247 ESTAT_ADD(tx_collide_5times);
8248 ESTAT_ADD(tx_collide_6times);
8249 ESTAT_ADD(tx_collide_7times);
8250 ESTAT_ADD(tx_collide_8times);
8251 ESTAT_ADD(tx_collide_9times);
8252 ESTAT_ADD(tx_collide_10times);
8253 ESTAT_ADD(tx_collide_11times);
8254 ESTAT_ADD(tx_collide_12times);
8255 ESTAT_ADD(tx_collide_13times);
8256 ESTAT_ADD(tx_collide_14times);
8257 ESTAT_ADD(tx_collide_15times);
8258 ESTAT_ADD(tx_ucast_packets);
8259 ESTAT_ADD(tx_mcast_packets);
8260 ESTAT_ADD(tx_bcast_packets);
8261 ESTAT_ADD(tx_carrier_sense_errors);
8262 ESTAT_ADD(tx_discards);
8263 ESTAT_ADD(tx_errors);
8265 ESTAT_ADD(dma_writeq_full);
8266 ESTAT_ADD(dma_write_prioq_full);
8267 ESTAT_ADD(rxbds_empty);
8268 ESTAT_ADD(rx_discards);
8269 ESTAT_ADD(rx_errors);
8270 ESTAT_ADD(rx_threshold_hit);
8272 ESTAT_ADD(dma_readq_full);
8273 ESTAT_ADD(dma_read_prioq_full);
8274 ESTAT_ADD(tx_comp_queue_full);
8276 ESTAT_ADD(ring_set_send_prod_index);
8277 ESTAT_ADD(ring_status_update);
8278 ESTAT_ADD(nic_irqs);
8279 ESTAT_ADD(nic_avoided_irqs);
8280 ESTAT_ADD(nic_tx_threshold_hit);
8285 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8287 struct tg3 *tp = netdev_priv(dev);
8288 struct net_device_stats *stats = &tp->net_stats;
8289 struct net_device_stats *old_stats = &tp->net_stats_prev;
8290 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8295 stats->rx_packets = old_stats->rx_packets +
8296 get_stat64(&hw_stats->rx_ucast_packets) +
8297 get_stat64(&hw_stats->rx_mcast_packets) +
8298 get_stat64(&hw_stats->rx_bcast_packets);
8300 stats->tx_packets = old_stats->tx_packets +
8301 get_stat64(&hw_stats->tx_ucast_packets) +
8302 get_stat64(&hw_stats->tx_mcast_packets) +
8303 get_stat64(&hw_stats->tx_bcast_packets);
8305 stats->rx_bytes = old_stats->rx_bytes +
8306 get_stat64(&hw_stats->rx_octets);
8307 stats->tx_bytes = old_stats->tx_bytes +
8308 get_stat64(&hw_stats->tx_octets);
8310 stats->rx_errors = old_stats->rx_errors +
8311 get_stat64(&hw_stats->rx_errors);
8312 stats->tx_errors = old_stats->tx_errors +
8313 get_stat64(&hw_stats->tx_errors) +
8314 get_stat64(&hw_stats->tx_mac_errors) +
8315 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8316 get_stat64(&hw_stats->tx_discards);
8318 stats->multicast = old_stats->multicast +
8319 get_stat64(&hw_stats->rx_mcast_packets);
8320 stats->collisions = old_stats->collisions +
8321 get_stat64(&hw_stats->tx_collisions);
8323 stats->rx_length_errors = old_stats->rx_length_errors +
8324 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8325 get_stat64(&hw_stats->rx_undersize_packets);
8327 stats->rx_over_errors = old_stats->rx_over_errors +
8328 get_stat64(&hw_stats->rxbds_empty);
8329 stats->rx_frame_errors = old_stats->rx_frame_errors +
8330 get_stat64(&hw_stats->rx_align_errors);
8331 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8332 get_stat64(&hw_stats->tx_discards);
8333 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8334 get_stat64(&hw_stats->tx_carrier_sense_errors);
8336 stats->rx_crc_errors = old_stats->rx_crc_errors +
8337 calc_crc_errors(tp);
8339 stats->rx_missed_errors = old_stats->rx_missed_errors +
8340 get_stat64(&hw_stats->rx_discards);
8345 static inline u32 calc_crc(unsigned char *buf, int len)
8353 for (j = 0; j < len; j++) {
8356 for (k = 0; k < 8; k++) {
8370 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8372 /* accept or reject all multicast frames */
8373 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8374 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8375 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8376 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8379 static void __tg3_set_rx_mode(struct net_device *dev)
8381 struct tg3 *tp = netdev_priv(dev);
8384 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8385 RX_MODE_KEEP_VLAN_TAG);
8387 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8390 #if TG3_VLAN_TAG_USED
8392 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8393 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8395 /* By definition, VLAN is disabled always in this
8398 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8399 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8402 if (dev->flags & IFF_PROMISC) {
8403 /* Promiscuous mode. */
8404 rx_mode |= RX_MODE_PROMISC;
8405 } else if (dev->flags & IFF_ALLMULTI) {
8406 /* Accept all multicast. */
8407 tg3_set_multi (tp, 1);
8408 } else if (dev->mc_count < 1) {
8409 /* Reject all multicast. */
8410 tg3_set_multi (tp, 0);
8412 /* Accept one or more multicast(s). */
8413 struct dev_mc_list *mclist;
8415 u32 mc_filter[4] = { 0, };
8420 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8421 i++, mclist = mclist->next) {
8423 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8425 regidx = (bit & 0x60) >> 5;
8427 mc_filter[regidx] |= (1 << bit);
8430 tw32(MAC_HASH_REG_0, mc_filter[0]);
8431 tw32(MAC_HASH_REG_1, mc_filter[1]);
8432 tw32(MAC_HASH_REG_2, mc_filter[2]);
8433 tw32(MAC_HASH_REG_3, mc_filter[3]);
8436 if (rx_mode != tp->rx_mode) {
8437 tp->rx_mode = rx_mode;
8438 tw32_f(MAC_RX_MODE, rx_mode);
8443 static void tg3_set_rx_mode(struct net_device *dev)
8445 struct tg3 *tp = netdev_priv(dev);
8447 if (!netif_running(dev))
8450 tg3_full_lock(tp, 0);
8451 __tg3_set_rx_mode(dev);
8452 tg3_full_unlock(tp);
8455 #define TG3_REGDUMP_LEN (32 * 1024)
8457 static int tg3_get_regs_len(struct net_device *dev)
8459 return TG3_REGDUMP_LEN;
8462 static void tg3_get_regs(struct net_device *dev,
8463 struct ethtool_regs *regs, void *_p)
8466 struct tg3 *tp = netdev_priv(dev);
8472 memset(p, 0, TG3_REGDUMP_LEN);
8474 if (tp->link_config.phy_is_low_power)
8477 tg3_full_lock(tp, 0);
8479 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8480 #define GET_REG32_LOOP(base,len) \
8481 do { p = (u32 *)(orig_p + (base)); \
8482 for (i = 0; i < len; i += 4) \
8483 __GET_REG32((base) + i); \
8485 #define GET_REG32_1(reg) \
8486 do { p = (u32 *)(orig_p + (reg)); \
8487 __GET_REG32((reg)); \
8490 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8491 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8492 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8493 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8494 GET_REG32_1(SNDDATAC_MODE);
8495 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8496 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8497 GET_REG32_1(SNDBDC_MODE);
8498 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8499 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8500 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8501 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8502 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8503 GET_REG32_1(RCVDCC_MODE);
8504 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8505 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8506 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8507 GET_REG32_1(MBFREE_MODE);
8508 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8509 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8510 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8511 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8512 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8513 GET_REG32_1(RX_CPU_MODE);
8514 GET_REG32_1(RX_CPU_STATE);
8515 GET_REG32_1(RX_CPU_PGMCTR);
8516 GET_REG32_1(RX_CPU_HWBKPT);
8517 GET_REG32_1(TX_CPU_MODE);
8518 GET_REG32_1(TX_CPU_STATE);
8519 GET_REG32_1(TX_CPU_PGMCTR);
8520 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8521 GET_REG32_LOOP(FTQ_RESET, 0x120);
8522 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8523 GET_REG32_1(DMAC_MODE);
8524 GET_REG32_LOOP(GRC_MODE, 0x4c);
8525 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8526 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8529 #undef GET_REG32_LOOP
8532 tg3_full_unlock(tp);
8535 static int tg3_get_eeprom_len(struct net_device *dev)
8537 struct tg3 *tp = netdev_priv(dev);
8539 return tp->nvram_size;
8542 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8543 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8544 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8546 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8548 struct tg3 *tp = netdev_priv(dev);
8551 u32 i, offset, len, b_offset, b_count;
8554 if (tp->link_config.phy_is_low_power)
8557 offset = eeprom->offset;
8561 eeprom->magic = TG3_EEPROM_MAGIC;
8564 /* adjustments to start on required 4 byte boundary */
8565 b_offset = offset & 3;
8566 b_count = 4 - b_offset;
8567 if (b_count > len) {
8568 /* i.e. offset=1 len=2 */
8571 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8574 memcpy(data, ((char*)&val) + b_offset, b_count);
8577 eeprom->len += b_count;
8580 /* read bytes upto the last 4 byte boundary */
8581 pd = &data[eeprom->len];
8582 for (i = 0; i < (len - (len & 3)); i += 4) {
8583 ret = tg3_nvram_read_le(tp, offset + i, &val);
8588 memcpy(pd + i, &val, 4);
8593 /* read last bytes not ending on 4 byte boundary */
8594 pd = &data[eeprom->len];
8596 b_offset = offset + len - b_count;
8597 ret = tg3_nvram_read_le(tp, b_offset, &val);
8600 memcpy(pd, &val, b_count);
8601 eeprom->len += b_count;
8606 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8608 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8610 struct tg3 *tp = netdev_priv(dev);
8612 u32 offset, len, b_offset, odd_len;
8616 if (tp->link_config.phy_is_low_power)
8619 if (eeprom->magic != TG3_EEPROM_MAGIC)
8622 offset = eeprom->offset;
8625 if ((b_offset = (offset & 3))) {
8626 /* adjustments to start on required 4 byte boundary */
8627 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8638 /* adjustments to end on required 4 byte boundary */
8640 len = (len + 3) & ~3;
8641 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8647 if (b_offset || odd_len) {
8648 buf = kmalloc(len, GFP_KERNEL);
8652 memcpy(buf, &start, 4);
8654 memcpy(buf+len-4, &end, 4);
8655 memcpy(buf + b_offset, data, eeprom->len);
8658 ret = tg3_nvram_write_block(tp, offset, len, buf);
8666 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8668 struct tg3 *tp = netdev_priv(dev);
8670 cmd->supported = (SUPPORTED_Autoneg);
8672 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8673 cmd->supported |= (SUPPORTED_1000baseT_Half |
8674 SUPPORTED_1000baseT_Full);
8676 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8677 cmd->supported |= (SUPPORTED_100baseT_Half |
8678 SUPPORTED_100baseT_Full |
8679 SUPPORTED_10baseT_Half |
8680 SUPPORTED_10baseT_Full |
8682 cmd->port = PORT_TP;
8684 cmd->supported |= SUPPORTED_FIBRE;
8685 cmd->port = PORT_FIBRE;
8688 cmd->advertising = tp->link_config.advertising;
8689 if (netif_running(dev)) {
8690 cmd->speed = tp->link_config.active_speed;
8691 cmd->duplex = tp->link_config.active_duplex;
8693 cmd->phy_address = PHY_ADDR;
8694 cmd->transceiver = 0;
8695 cmd->autoneg = tp->link_config.autoneg;
8701 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8703 struct tg3 *tp = netdev_priv(dev);
8705 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8706 /* These are the only valid advertisement bits allowed. */
8707 if (cmd->autoneg == AUTONEG_ENABLE &&
8708 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8709 ADVERTISED_1000baseT_Full |
8710 ADVERTISED_Autoneg |
8713 /* Fiber can only do SPEED_1000. */
8714 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8715 (cmd->speed != SPEED_1000))
8717 /* Copper cannot force SPEED_1000. */
8718 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8719 (cmd->speed == SPEED_1000))
8721 else if ((cmd->speed == SPEED_1000) &&
8722 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8725 tg3_full_lock(tp, 0);
8727 tp->link_config.autoneg = cmd->autoneg;
8728 if (cmd->autoneg == AUTONEG_ENABLE) {
8729 tp->link_config.advertising = (cmd->advertising |
8730 ADVERTISED_Autoneg);
8731 tp->link_config.speed = SPEED_INVALID;
8732 tp->link_config.duplex = DUPLEX_INVALID;
8734 tp->link_config.advertising = 0;
8735 tp->link_config.speed = cmd->speed;
8736 tp->link_config.duplex = cmd->duplex;
8739 tp->link_config.orig_speed = tp->link_config.speed;
8740 tp->link_config.orig_duplex = tp->link_config.duplex;
8741 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8743 if (netif_running(dev))
8744 tg3_setup_phy(tp, 1);
8746 tg3_full_unlock(tp);
8751 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8753 struct tg3 *tp = netdev_priv(dev);
8755 strcpy(info->driver, DRV_MODULE_NAME);
8756 strcpy(info->version, DRV_MODULE_VERSION);
8757 strcpy(info->fw_version, tp->fw_ver);
8758 strcpy(info->bus_info, pci_name(tp->pdev));
8761 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8763 struct tg3 *tp = netdev_priv(dev);
8765 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8766 wol->supported = WAKE_MAGIC;
8770 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8771 wol->wolopts = WAKE_MAGIC;
8772 memset(&wol->sopass, 0, sizeof(wol->sopass));
8775 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8777 struct tg3 *tp = netdev_priv(dev);
8779 if (wol->wolopts & ~WAKE_MAGIC)
8781 if ((wol->wolopts & WAKE_MAGIC) &&
8782 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8785 spin_lock_bh(&tp->lock);
8786 if (wol->wolopts & WAKE_MAGIC)
8787 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8789 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8790 spin_unlock_bh(&tp->lock);
8795 static u32 tg3_get_msglevel(struct net_device *dev)
8797 struct tg3 *tp = netdev_priv(dev);
8798 return tp->msg_enable;
8801 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8803 struct tg3 *tp = netdev_priv(dev);
8804 tp->msg_enable = value;
8807 static int tg3_set_tso(struct net_device *dev, u32 value)
8809 struct tg3 *tp = netdev_priv(dev);
8811 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8816 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8817 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8819 dev->features |= NETIF_F_TSO6;
8820 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8821 dev->features |= NETIF_F_TSO_ECN;
8823 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8825 return ethtool_op_set_tso(dev, value);
8828 static int tg3_nway_reset(struct net_device *dev)
8830 struct tg3 *tp = netdev_priv(dev);
8834 if (!netif_running(dev))
8837 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8840 spin_lock_bh(&tp->lock);
8842 tg3_readphy(tp, MII_BMCR, &bmcr);
8843 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8844 ((bmcr & BMCR_ANENABLE) ||
8845 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8846 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8850 spin_unlock_bh(&tp->lock);
8855 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8857 struct tg3 *tp = netdev_priv(dev);
8859 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8860 ering->rx_mini_max_pending = 0;
8861 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8862 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8864 ering->rx_jumbo_max_pending = 0;
8866 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8868 ering->rx_pending = tp->rx_pending;
8869 ering->rx_mini_pending = 0;
8870 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8871 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8873 ering->rx_jumbo_pending = 0;
8875 ering->tx_pending = tp->tx_pending;
8878 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8880 struct tg3 *tp = netdev_priv(dev);
8881 int irq_sync = 0, err = 0;
8883 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8884 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8885 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8886 (ering->tx_pending <= MAX_SKB_FRAGS) ||
8887 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8888 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8891 if (netif_running(dev)) {
8896 tg3_full_lock(tp, irq_sync);
8898 tp->rx_pending = ering->rx_pending;
8900 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8901 tp->rx_pending > 63)
8902 tp->rx_pending = 63;
8903 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8904 tp->tx_pending = ering->tx_pending;
8906 if (netif_running(dev)) {
8907 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8908 err = tg3_restart_hw(tp, 1);
8910 tg3_netif_start(tp);
8913 tg3_full_unlock(tp);
8918 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8920 struct tg3 *tp = netdev_priv(dev);
8922 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8924 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8925 epause->rx_pause = 1;
8927 epause->rx_pause = 0;
8929 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8930 epause->tx_pause = 1;
8932 epause->tx_pause = 0;
8935 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8937 struct tg3 *tp = netdev_priv(dev);
8938 int irq_sync = 0, err = 0;
8940 if (netif_running(dev)) {
8945 tg3_full_lock(tp, irq_sync);
8947 if (epause->autoneg)
8948 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8950 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8951 if (epause->rx_pause)
8952 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8954 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8955 if (epause->tx_pause)
8956 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8958 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8960 if (netif_running(dev)) {
8961 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8962 err = tg3_restart_hw(tp, 1);
8964 tg3_netif_start(tp);
8967 tg3_full_unlock(tp);
8972 static u32 tg3_get_rx_csum(struct net_device *dev)
8974 struct tg3 *tp = netdev_priv(dev);
8975 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8978 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8980 struct tg3 *tp = netdev_priv(dev);
8982 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8988 spin_lock_bh(&tp->lock);
8990 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8992 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8993 spin_unlock_bh(&tp->lock);
8998 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9000 struct tg3 *tp = netdev_priv(dev);
9002 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9008 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9011 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9012 ethtool_op_set_tx_ipv6_csum(dev, data);
9014 ethtool_op_set_tx_csum(dev, data);
9019 static int tg3_get_sset_count (struct net_device *dev, int sset)
9023 return TG3_NUM_TEST;
9025 return TG3_NUM_STATS;
9031 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9033 switch (stringset) {
9035 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
9038 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
9041 WARN_ON(1); /* we need a WARN() */
9046 static int tg3_phys_id(struct net_device *dev, u32 data)
9048 struct tg3 *tp = netdev_priv(dev);
9051 if (!netif_running(tp->dev))
9055 data = UINT_MAX / 2;
9057 for (i = 0; i < (data * 2); i++) {
9059 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9060 LED_CTRL_1000MBPS_ON |
9061 LED_CTRL_100MBPS_ON |
9062 LED_CTRL_10MBPS_ON |
9063 LED_CTRL_TRAFFIC_OVERRIDE |
9064 LED_CTRL_TRAFFIC_BLINK |
9065 LED_CTRL_TRAFFIC_LED);
9068 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9069 LED_CTRL_TRAFFIC_OVERRIDE);
9071 if (msleep_interruptible(500))
9074 tw32(MAC_LED_CTRL, tp->led_ctrl);
9078 static void tg3_get_ethtool_stats (struct net_device *dev,
9079 struct ethtool_stats *estats, u64 *tmp_stats)
9081 struct tg3 *tp = netdev_priv(dev);
9082 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9085 #define NVRAM_TEST_SIZE 0x100
9086 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9087 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9088 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
9089 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9090 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9092 static int tg3_test_nvram(struct tg3 *tp)
9096 int i, j, k, err = 0, size;
9098 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9101 if (magic == TG3_EEPROM_MAGIC)
9102 size = NVRAM_TEST_SIZE;
9103 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9104 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9105 TG3_EEPROM_SB_FORMAT_1) {
9106 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9107 case TG3_EEPROM_SB_REVISION_0:
9108 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9110 case TG3_EEPROM_SB_REVISION_2:
9111 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9113 case TG3_EEPROM_SB_REVISION_3:
9114 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9121 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9122 size = NVRAM_SELFBOOT_HW_SIZE;
9126 buf = kmalloc(size, GFP_KERNEL);
9131 for (i = 0, j = 0; i < size; i += 4, j++) {
9132 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9138 /* Selfboot format */
9139 magic = swab32(le32_to_cpu(buf[0]));
9140 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9141 TG3_EEPROM_MAGIC_FW) {
9142 u8 *buf8 = (u8 *) buf, csum8 = 0;
9144 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9145 TG3_EEPROM_SB_REVISION_2) {
9146 /* For rev 2, the csum doesn't include the MBA. */
9147 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9149 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9152 for (i = 0; i < size; i++)
9165 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9166 TG3_EEPROM_MAGIC_HW) {
9167 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9168 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9169 u8 *buf8 = (u8 *) buf;
9171 /* Separate the parity bits and the data bytes. */
9172 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9173 if ((i == 0) || (i == 8)) {
9177 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9178 parity[k++] = buf8[i] & msk;
9185 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9186 parity[k++] = buf8[i] & msk;
9189 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9190 parity[k++] = buf8[i] & msk;
9193 data[j++] = buf8[i];
9197 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9198 u8 hw8 = hweight8(data[i]);
9200 if ((hw8 & 0x1) && parity[i])
9202 else if (!(hw8 & 0x1) && !parity[i])
9209 /* Bootstrap checksum at offset 0x10 */
9210 csum = calc_crc((unsigned char *) buf, 0x10);
9211 if(csum != le32_to_cpu(buf[0x10/4]))
9214 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9215 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9216 if (csum != le32_to_cpu(buf[0xfc/4]))
9226 #define TG3_SERDES_TIMEOUT_SEC 2
9227 #define TG3_COPPER_TIMEOUT_SEC 6
9229 static int tg3_test_link(struct tg3 *tp)
9233 if (!netif_running(tp->dev))
9236 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9237 max = TG3_SERDES_TIMEOUT_SEC;
9239 max = TG3_COPPER_TIMEOUT_SEC;
9241 for (i = 0; i < max; i++) {
9242 if (netif_carrier_ok(tp->dev))
9245 if (msleep_interruptible(1000))
9252 /* Only test the commonly used registers */
9253 static int tg3_test_registers(struct tg3 *tp)
9255 int i, is_5705, is_5750;
9256 u32 offset, read_mask, write_mask, val, save_val, read_val;
9260 #define TG3_FL_5705 0x1
9261 #define TG3_FL_NOT_5705 0x2
9262 #define TG3_FL_NOT_5788 0x4
9263 #define TG3_FL_NOT_5750 0x8
9267 /* MAC Control Registers */
9268 { MAC_MODE, TG3_FL_NOT_5705,
9269 0x00000000, 0x00ef6f8c },
9270 { MAC_MODE, TG3_FL_5705,
9271 0x00000000, 0x01ef6b8c },
9272 { MAC_STATUS, TG3_FL_NOT_5705,
9273 0x03800107, 0x00000000 },
9274 { MAC_STATUS, TG3_FL_5705,
9275 0x03800100, 0x00000000 },
9276 { MAC_ADDR_0_HIGH, 0x0000,
9277 0x00000000, 0x0000ffff },
9278 { MAC_ADDR_0_LOW, 0x0000,
9279 0x00000000, 0xffffffff },
9280 { MAC_RX_MTU_SIZE, 0x0000,
9281 0x00000000, 0x0000ffff },
9282 { MAC_TX_MODE, 0x0000,
9283 0x00000000, 0x00000070 },
9284 { MAC_TX_LENGTHS, 0x0000,
9285 0x00000000, 0x00003fff },
9286 { MAC_RX_MODE, TG3_FL_NOT_5705,
9287 0x00000000, 0x000007fc },
9288 { MAC_RX_MODE, TG3_FL_5705,
9289 0x00000000, 0x000007dc },
9290 { MAC_HASH_REG_0, 0x0000,
9291 0x00000000, 0xffffffff },
9292 { MAC_HASH_REG_1, 0x0000,
9293 0x00000000, 0xffffffff },
9294 { MAC_HASH_REG_2, 0x0000,
9295 0x00000000, 0xffffffff },
9296 { MAC_HASH_REG_3, 0x0000,
9297 0x00000000, 0xffffffff },
9299 /* Receive Data and Receive BD Initiator Control Registers. */
9300 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9301 0x00000000, 0xffffffff },
9302 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9303 0x00000000, 0xffffffff },
9304 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9305 0x00000000, 0x00000003 },
9306 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9307 0x00000000, 0xffffffff },
9308 { RCVDBDI_STD_BD+0, 0x0000,
9309 0x00000000, 0xffffffff },
9310 { RCVDBDI_STD_BD+4, 0x0000,
9311 0x00000000, 0xffffffff },
9312 { RCVDBDI_STD_BD+8, 0x0000,
9313 0x00000000, 0xffff0002 },
9314 { RCVDBDI_STD_BD+0xc, 0x0000,
9315 0x00000000, 0xffffffff },
9317 /* Receive BD Initiator Control Registers. */
9318 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9319 0x00000000, 0xffffffff },
9320 { RCVBDI_STD_THRESH, TG3_FL_5705,
9321 0x00000000, 0x000003ff },
9322 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9323 0x00000000, 0xffffffff },
9325 /* Host Coalescing Control Registers. */
9326 { HOSTCC_MODE, TG3_FL_NOT_5705,
9327 0x00000000, 0x00000004 },
9328 { HOSTCC_MODE, TG3_FL_5705,
9329 0x00000000, 0x000000f6 },
9330 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9331 0x00000000, 0xffffffff },
9332 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9333 0x00000000, 0x000003ff },
9334 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9335 0x00000000, 0xffffffff },
9336 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9337 0x00000000, 0x000003ff },
9338 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9339 0x00000000, 0xffffffff },
9340 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9341 0x00000000, 0x000000ff },
9342 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9343 0x00000000, 0xffffffff },
9344 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9345 0x00000000, 0x000000ff },
9346 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9347 0x00000000, 0xffffffff },
9348 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9349 0x00000000, 0xffffffff },
9350 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9351 0x00000000, 0xffffffff },
9352 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9353 0x00000000, 0x000000ff },
9354 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9355 0x00000000, 0xffffffff },
9356 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9357 0x00000000, 0x000000ff },
9358 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9359 0x00000000, 0xffffffff },
9360 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9361 0x00000000, 0xffffffff },
9362 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9363 0x00000000, 0xffffffff },
9364 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9365 0x00000000, 0xffffffff },
9366 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9367 0x00000000, 0xffffffff },
9368 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9369 0xffffffff, 0x00000000 },
9370 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9371 0xffffffff, 0x00000000 },
9373 /* Buffer Manager Control Registers. */
9374 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9375 0x00000000, 0x007fff80 },
9376 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9377 0x00000000, 0x007fffff },
9378 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9379 0x00000000, 0x0000003f },
9380 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9381 0x00000000, 0x000001ff },
9382 { BUFMGR_MB_HIGH_WATER, 0x0000,
9383 0x00000000, 0x000001ff },
9384 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9385 0xffffffff, 0x00000000 },
9386 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9387 0xffffffff, 0x00000000 },
9389 /* Mailbox Registers */
9390 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9391 0x00000000, 0x000001ff },
9392 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9393 0x00000000, 0x000001ff },
9394 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9395 0x00000000, 0x000007ff },
9396 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9397 0x00000000, 0x000001ff },
9399 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9402 is_5705 = is_5750 = 0;
9403 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9405 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9409 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9410 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9413 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9416 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9417 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9420 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9423 offset = (u32) reg_tbl[i].offset;
9424 read_mask = reg_tbl[i].read_mask;
9425 write_mask = reg_tbl[i].write_mask;
9427 /* Save the original register content */
9428 save_val = tr32(offset);
9430 /* Determine the read-only value. */
9431 read_val = save_val & read_mask;
9433 /* Write zero to the register, then make sure the read-only bits
9434 * are not changed and the read/write bits are all zeros.
9440 /* Test the read-only and read/write bits. */
9441 if (((val & read_mask) != read_val) || (val & write_mask))
9444 /* Write ones to all the bits defined by RdMask and WrMask, then
9445 * make sure the read-only bits are not changed and the
9446 * read/write bits are all ones.
9448 tw32(offset, read_mask | write_mask);
9452 /* Test the read-only bits. */
9453 if ((val & read_mask) != read_val)
9456 /* Test the read/write bits. */
9457 if ((val & write_mask) != write_mask)
9460 tw32(offset, save_val);
9466 if (netif_msg_hw(tp))
9467 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9469 tw32(offset, save_val);
9473 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9475 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9479 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9480 for (j = 0; j < len; j += 4) {
9483 tg3_write_mem(tp, offset + j, test_pattern[i]);
9484 tg3_read_mem(tp, offset + j, &val);
9485 if (val != test_pattern[i])
9492 static int tg3_test_memory(struct tg3 *tp)
9494 static struct mem_entry {
9497 } mem_tbl_570x[] = {
9498 { 0x00000000, 0x00b50},
9499 { 0x00002000, 0x1c000},
9500 { 0xffffffff, 0x00000}
9501 }, mem_tbl_5705[] = {
9502 { 0x00000100, 0x0000c},
9503 { 0x00000200, 0x00008},
9504 { 0x00004000, 0x00800},
9505 { 0x00006000, 0x01000},
9506 { 0x00008000, 0x02000},
9507 { 0x00010000, 0x0e000},
9508 { 0xffffffff, 0x00000}
9509 }, mem_tbl_5755[] = {
9510 { 0x00000200, 0x00008},
9511 { 0x00004000, 0x00800},
9512 { 0x00006000, 0x00800},
9513 { 0x00008000, 0x02000},
9514 { 0x00010000, 0x0c000},
9515 { 0xffffffff, 0x00000}
9516 }, mem_tbl_5906[] = {
9517 { 0x00000200, 0x00008},
9518 { 0x00004000, 0x00400},
9519 { 0x00006000, 0x00400},
9520 { 0x00008000, 0x01000},
9521 { 0x00010000, 0x01000},
9522 { 0xffffffff, 0x00000}
9524 struct mem_entry *mem_tbl;
9528 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9529 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9531 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9532 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9533 mem_tbl = mem_tbl_5755;
9534 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9535 mem_tbl = mem_tbl_5906;
9537 mem_tbl = mem_tbl_5705;
9539 mem_tbl = mem_tbl_570x;
9541 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9542 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9543 mem_tbl[i].len)) != 0)
9550 #define TG3_MAC_LOOPBACK 0
9551 #define TG3_PHY_LOOPBACK 1
9553 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9555 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9557 struct sk_buff *skb, *rx_skb;
9560 int num_pkts, tx_len, rx_len, i, err;
9561 struct tg3_rx_buffer_desc *desc;
9563 if (loopback_mode == TG3_MAC_LOOPBACK) {
9564 /* HW errata - mac loopback fails in some cases on 5780.
9565 * Normal traffic and PHY loopback are not affected by
9568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9571 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9572 MAC_MODE_PORT_INT_LPBACK;
9573 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9574 mac_mode |= MAC_MODE_LINK_POLARITY;
9575 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9576 mac_mode |= MAC_MODE_PORT_MODE_MII;
9578 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9579 tw32(MAC_MODE, mac_mode);
9580 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9583 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9586 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9589 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9590 phytest | MII_TG3_EPHY_SHADOW_EN);
9591 if (!tg3_readphy(tp, 0x1b, &phy))
9592 tg3_writephy(tp, 0x1b, phy & ~0x20);
9593 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9595 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9597 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9599 tg3_phy_toggle_automdix(tp, 0);
9601 tg3_writephy(tp, MII_BMCR, val);
9604 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9605 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9606 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9607 mac_mode |= MAC_MODE_PORT_MODE_MII;
9609 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9611 /* reset to prevent losing 1st rx packet intermittently */
9612 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9613 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9615 tw32_f(MAC_RX_MODE, tp->rx_mode);
9617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9618 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9619 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9620 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9621 mac_mode |= MAC_MODE_LINK_POLARITY;
9622 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9623 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9625 tw32(MAC_MODE, mac_mode);
9633 skb = netdev_alloc_skb(tp->dev, tx_len);
9637 tx_data = skb_put(skb, tx_len);
9638 memcpy(tx_data, tp->dev->dev_addr, 6);
9639 memset(tx_data + 6, 0x0, 8);
9641 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9643 for (i = 14; i < tx_len; i++)
9644 tx_data[i] = (u8) (i & 0xff);
9646 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9648 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9653 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9657 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9662 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9664 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9668 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9669 for (i = 0; i < 25; i++) {
9670 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9675 tx_idx = tp->hw_status->idx[0].tx_consumer;
9676 rx_idx = tp->hw_status->idx[0].rx_producer;
9677 if ((tx_idx == tp->tx_prod) &&
9678 (rx_idx == (rx_start_idx + num_pkts)))
9682 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9685 if (tx_idx != tp->tx_prod)
9688 if (rx_idx != rx_start_idx + num_pkts)
9691 desc = &tp->rx_rcb[rx_start_idx];
9692 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9693 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9694 if (opaque_key != RXD_OPAQUE_RING_STD)
9697 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9698 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9701 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9702 if (rx_len != tx_len)
9705 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9707 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9708 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9710 for (i = 14; i < tx_len; i++) {
9711 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9716 /* tg3_free_rings will unmap and free the rx_skb */
9721 #define TG3_MAC_LOOPBACK_FAILED 1
9722 #define TG3_PHY_LOOPBACK_FAILED 2
9723 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9724 TG3_PHY_LOOPBACK_FAILED)
9726 static int tg3_test_loopback(struct tg3 *tp)
9731 if (!netif_running(tp->dev))
9732 return TG3_LOOPBACK_FAILED;
9734 err = tg3_reset_hw(tp, 1);
9736 return TG3_LOOPBACK_FAILED;
9738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9739 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9743 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9745 /* Wait for up to 40 microseconds to acquire lock. */
9746 for (i = 0; i < 4; i++) {
9747 status = tr32(TG3_CPMU_MUTEX_GNT);
9748 if (status == CPMU_MUTEX_GNT_DRIVER)
9753 if (status != CPMU_MUTEX_GNT_DRIVER)
9754 return TG3_LOOPBACK_FAILED;
9756 /* Turn off link-based power management. */
9757 cpmuctrl = tr32(TG3_CPMU_CTRL);
9759 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9760 CPMU_CTRL_LINK_AWARE_MODE));
9763 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9764 err |= TG3_MAC_LOOPBACK_FAILED;
9766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9768 tw32(TG3_CPMU_CTRL, cpmuctrl);
9770 /* Release the mutex */
9771 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9774 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9775 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
9776 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9777 err |= TG3_PHY_LOOPBACK_FAILED;
9783 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9786 struct tg3 *tp = netdev_priv(dev);
9788 if (tp->link_config.phy_is_low_power)
9789 tg3_set_power_state(tp, PCI_D0);
9791 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9793 if (tg3_test_nvram(tp) != 0) {
9794 etest->flags |= ETH_TEST_FL_FAILED;
9797 if (tg3_test_link(tp) != 0) {
9798 etest->flags |= ETH_TEST_FL_FAILED;
9801 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9802 int err, irq_sync = 0;
9804 if (netif_running(dev)) {
9809 tg3_full_lock(tp, irq_sync);
9811 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9812 err = tg3_nvram_lock(tp);
9813 tg3_halt_cpu(tp, RX_CPU_BASE);
9814 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9815 tg3_halt_cpu(tp, TX_CPU_BASE);
9817 tg3_nvram_unlock(tp);
9819 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9822 if (tg3_test_registers(tp) != 0) {
9823 etest->flags |= ETH_TEST_FL_FAILED;
9826 if (tg3_test_memory(tp) != 0) {
9827 etest->flags |= ETH_TEST_FL_FAILED;
9830 if ((data[4] = tg3_test_loopback(tp)) != 0)
9831 etest->flags |= ETH_TEST_FL_FAILED;
9833 tg3_full_unlock(tp);
9835 if (tg3_test_interrupt(tp) != 0) {
9836 etest->flags |= ETH_TEST_FL_FAILED;
9840 tg3_full_lock(tp, 0);
9842 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9843 if (netif_running(dev)) {
9844 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9845 if (!tg3_restart_hw(tp, 1))
9846 tg3_netif_start(tp);
9849 tg3_full_unlock(tp);
9851 if (tp->link_config.phy_is_low_power)
9852 tg3_set_power_state(tp, PCI_D3hot);
9856 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9858 struct mii_ioctl_data *data = if_mii(ifr);
9859 struct tg3 *tp = netdev_priv(dev);
9864 data->phy_id = PHY_ADDR;
9870 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9871 break; /* We have no PHY */
9873 if (tp->link_config.phy_is_low_power)
9876 spin_lock_bh(&tp->lock);
9877 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9878 spin_unlock_bh(&tp->lock);
9880 data->val_out = mii_regval;
9886 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9887 break; /* We have no PHY */
9889 if (!capable(CAP_NET_ADMIN))
9892 if (tp->link_config.phy_is_low_power)
9895 spin_lock_bh(&tp->lock);
9896 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9897 spin_unlock_bh(&tp->lock);
9908 #if TG3_VLAN_TAG_USED
9909 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9911 struct tg3 *tp = netdev_priv(dev);
9913 if (netif_running(dev))
9916 tg3_full_lock(tp, 0);
9920 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9921 __tg3_set_rx_mode(dev);
9923 if (netif_running(dev))
9924 tg3_netif_start(tp);
9926 tg3_full_unlock(tp);
9930 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9932 struct tg3 *tp = netdev_priv(dev);
9934 memcpy(ec, &tp->coal, sizeof(*ec));
9938 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9940 struct tg3 *tp = netdev_priv(dev);
9941 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9942 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9944 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9945 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9946 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9947 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9948 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9951 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9952 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9953 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9954 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9955 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9956 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9957 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9958 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9959 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9960 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9963 /* No rx interrupts will be generated if both are zero */
9964 if ((ec->rx_coalesce_usecs == 0) &&
9965 (ec->rx_max_coalesced_frames == 0))
9968 /* No tx interrupts will be generated if both are zero */
9969 if ((ec->tx_coalesce_usecs == 0) &&
9970 (ec->tx_max_coalesced_frames == 0))
9973 /* Only copy relevant parameters, ignore all others. */
9974 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9975 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9976 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9977 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9978 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9979 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9980 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9981 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9982 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9984 if (netif_running(dev)) {
9985 tg3_full_lock(tp, 0);
9986 __tg3_set_coalesce(tp, &tp->coal);
9987 tg3_full_unlock(tp);
9992 static const struct ethtool_ops tg3_ethtool_ops = {
9993 .get_settings = tg3_get_settings,
9994 .set_settings = tg3_set_settings,
9995 .get_drvinfo = tg3_get_drvinfo,
9996 .get_regs_len = tg3_get_regs_len,
9997 .get_regs = tg3_get_regs,
9998 .get_wol = tg3_get_wol,
9999 .set_wol = tg3_set_wol,
10000 .get_msglevel = tg3_get_msglevel,
10001 .set_msglevel = tg3_set_msglevel,
10002 .nway_reset = tg3_nway_reset,
10003 .get_link = ethtool_op_get_link,
10004 .get_eeprom_len = tg3_get_eeprom_len,
10005 .get_eeprom = tg3_get_eeprom,
10006 .set_eeprom = tg3_set_eeprom,
10007 .get_ringparam = tg3_get_ringparam,
10008 .set_ringparam = tg3_set_ringparam,
10009 .get_pauseparam = tg3_get_pauseparam,
10010 .set_pauseparam = tg3_set_pauseparam,
10011 .get_rx_csum = tg3_get_rx_csum,
10012 .set_rx_csum = tg3_set_rx_csum,
10013 .set_tx_csum = tg3_set_tx_csum,
10014 .set_sg = ethtool_op_set_sg,
10015 .set_tso = tg3_set_tso,
10016 .self_test = tg3_self_test,
10017 .get_strings = tg3_get_strings,
10018 .phys_id = tg3_phys_id,
10019 .get_ethtool_stats = tg3_get_ethtool_stats,
10020 .get_coalesce = tg3_get_coalesce,
10021 .set_coalesce = tg3_set_coalesce,
10022 .get_sset_count = tg3_get_sset_count,
10025 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10027 u32 cursize, val, magic;
10029 tp->nvram_size = EEPROM_CHIP_SIZE;
10031 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10034 if ((magic != TG3_EEPROM_MAGIC) &&
10035 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10036 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10040 * Size the chip by reading offsets at increasing powers of two.
10041 * When we encounter our validation signature, we know the addressing
10042 * has wrapped around, and thus have our chip size.
10046 while (cursize < tp->nvram_size) {
10047 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10056 tp->nvram_size = cursize;
10059 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10063 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10066 /* Selfboot format */
10067 if (val != TG3_EEPROM_MAGIC) {
10068 tg3_get_eeprom_size(tp);
10072 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10074 tp->nvram_size = (val >> 16) * 1024;
10078 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10081 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10085 nvcfg1 = tr32(NVRAM_CFG1);
10086 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10087 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10090 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10091 tw32(NVRAM_CFG1, nvcfg1);
10094 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10095 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10096 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10097 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10098 tp->nvram_jedecnum = JEDEC_ATMEL;
10099 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10100 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10102 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10103 tp->nvram_jedecnum = JEDEC_ATMEL;
10104 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10106 case FLASH_VENDOR_ATMEL_EEPROM:
10107 tp->nvram_jedecnum = JEDEC_ATMEL;
10108 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10109 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10111 case FLASH_VENDOR_ST:
10112 tp->nvram_jedecnum = JEDEC_ST;
10113 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10114 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10116 case FLASH_VENDOR_SAIFUN:
10117 tp->nvram_jedecnum = JEDEC_SAIFUN;
10118 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10120 case FLASH_VENDOR_SST_SMALL:
10121 case FLASH_VENDOR_SST_LARGE:
10122 tp->nvram_jedecnum = JEDEC_SST;
10123 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10128 tp->nvram_jedecnum = JEDEC_ATMEL;
10129 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10130 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10134 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10138 nvcfg1 = tr32(NVRAM_CFG1);
10140 /* NVRAM protection for TPM */
10141 if (nvcfg1 & (1 << 27))
10142 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10144 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10145 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10146 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10147 tp->nvram_jedecnum = JEDEC_ATMEL;
10148 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10150 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10151 tp->nvram_jedecnum = JEDEC_ATMEL;
10152 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10153 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10155 case FLASH_5752VENDOR_ST_M45PE10:
10156 case FLASH_5752VENDOR_ST_M45PE20:
10157 case FLASH_5752VENDOR_ST_M45PE40:
10158 tp->nvram_jedecnum = JEDEC_ST;
10159 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10160 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10164 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10165 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10166 case FLASH_5752PAGE_SIZE_256:
10167 tp->nvram_pagesize = 256;
10169 case FLASH_5752PAGE_SIZE_512:
10170 tp->nvram_pagesize = 512;
10172 case FLASH_5752PAGE_SIZE_1K:
10173 tp->nvram_pagesize = 1024;
10175 case FLASH_5752PAGE_SIZE_2K:
10176 tp->nvram_pagesize = 2048;
10178 case FLASH_5752PAGE_SIZE_4K:
10179 tp->nvram_pagesize = 4096;
10181 case FLASH_5752PAGE_SIZE_264:
10182 tp->nvram_pagesize = 264;
10187 /* For eeprom, set pagesize to maximum eeprom size */
10188 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10190 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10191 tw32(NVRAM_CFG1, nvcfg1);
10195 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10197 u32 nvcfg1, protect = 0;
10199 nvcfg1 = tr32(NVRAM_CFG1);
10201 /* NVRAM protection for TPM */
10202 if (nvcfg1 & (1 << 27)) {
10203 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10207 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10209 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10210 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10211 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10212 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10213 tp->nvram_jedecnum = JEDEC_ATMEL;
10214 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10215 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10216 tp->nvram_pagesize = 264;
10217 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10218 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10219 tp->nvram_size = (protect ? 0x3e200 :
10220 TG3_NVRAM_SIZE_512KB);
10221 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10222 tp->nvram_size = (protect ? 0x1f200 :
10223 TG3_NVRAM_SIZE_256KB);
10225 tp->nvram_size = (protect ? 0x1f200 :
10226 TG3_NVRAM_SIZE_128KB);
10228 case FLASH_5752VENDOR_ST_M45PE10:
10229 case FLASH_5752VENDOR_ST_M45PE20:
10230 case FLASH_5752VENDOR_ST_M45PE40:
10231 tp->nvram_jedecnum = JEDEC_ST;
10232 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10233 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10234 tp->nvram_pagesize = 256;
10235 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10236 tp->nvram_size = (protect ?
10237 TG3_NVRAM_SIZE_64KB :
10238 TG3_NVRAM_SIZE_128KB);
10239 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10240 tp->nvram_size = (protect ?
10241 TG3_NVRAM_SIZE_64KB :
10242 TG3_NVRAM_SIZE_256KB);
10244 tp->nvram_size = (protect ?
10245 TG3_NVRAM_SIZE_128KB :
10246 TG3_NVRAM_SIZE_512KB);
10251 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10255 nvcfg1 = tr32(NVRAM_CFG1);
10257 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10258 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10259 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10260 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10261 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10262 tp->nvram_jedecnum = JEDEC_ATMEL;
10263 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10264 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10266 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10267 tw32(NVRAM_CFG1, nvcfg1);
10269 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10270 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10271 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10272 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10273 tp->nvram_jedecnum = JEDEC_ATMEL;
10274 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10275 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10276 tp->nvram_pagesize = 264;
10278 case FLASH_5752VENDOR_ST_M45PE10:
10279 case FLASH_5752VENDOR_ST_M45PE20:
10280 case FLASH_5752VENDOR_ST_M45PE40:
10281 tp->nvram_jedecnum = JEDEC_ST;
10282 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10283 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10284 tp->nvram_pagesize = 256;
10289 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10291 u32 nvcfg1, protect = 0;
10293 nvcfg1 = tr32(NVRAM_CFG1);
10295 /* NVRAM protection for TPM */
10296 if (nvcfg1 & (1 << 27)) {
10297 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10301 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10303 case FLASH_5761VENDOR_ATMEL_ADB021D:
10304 case FLASH_5761VENDOR_ATMEL_ADB041D:
10305 case FLASH_5761VENDOR_ATMEL_ADB081D:
10306 case FLASH_5761VENDOR_ATMEL_ADB161D:
10307 case FLASH_5761VENDOR_ATMEL_MDB021D:
10308 case FLASH_5761VENDOR_ATMEL_MDB041D:
10309 case FLASH_5761VENDOR_ATMEL_MDB081D:
10310 case FLASH_5761VENDOR_ATMEL_MDB161D:
10311 tp->nvram_jedecnum = JEDEC_ATMEL;
10312 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10313 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10314 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10315 tp->nvram_pagesize = 256;
10317 case FLASH_5761VENDOR_ST_A_M45PE20:
10318 case FLASH_5761VENDOR_ST_A_M45PE40:
10319 case FLASH_5761VENDOR_ST_A_M45PE80:
10320 case FLASH_5761VENDOR_ST_A_M45PE16:
10321 case FLASH_5761VENDOR_ST_M_M45PE20:
10322 case FLASH_5761VENDOR_ST_M_M45PE40:
10323 case FLASH_5761VENDOR_ST_M_M45PE80:
10324 case FLASH_5761VENDOR_ST_M_M45PE16:
10325 tp->nvram_jedecnum = JEDEC_ST;
10326 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10327 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10328 tp->nvram_pagesize = 256;
10333 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10336 case FLASH_5761VENDOR_ATMEL_ADB161D:
10337 case FLASH_5761VENDOR_ATMEL_MDB161D:
10338 case FLASH_5761VENDOR_ST_A_M45PE16:
10339 case FLASH_5761VENDOR_ST_M_M45PE16:
10340 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10342 case FLASH_5761VENDOR_ATMEL_ADB081D:
10343 case FLASH_5761VENDOR_ATMEL_MDB081D:
10344 case FLASH_5761VENDOR_ST_A_M45PE80:
10345 case FLASH_5761VENDOR_ST_M_M45PE80:
10346 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10348 case FLASH_5761VENDOR_ATMEL_ADB041D:
10349 case FLASH_5761VENDOR_ATMEL_MDB041D:
10350 case FLASH_5761VENDOR_ST_A_M45PE40:
10351 case FLASH_5761VENDOR_ST_M_M45PE40:
10352 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10354 case FLASH_5761VENDOR_ATMEL_ADB021D:
10355 case FLASH_5761VENDOR_ATMEL_MDB021D:
10356 case FLASH_5761VENDOR_ST_A_M45PE20:
10357 case FLASH_5761VENDOR_ST_M_M45PE20:
10358 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10364 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10366 tp->nvram_jedecnum = JEDEC_ATMEL;
10367 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10368 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10371 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10372 static void __devinit tg3_nvram_init(struct tg3 *tp)
10374 tw32_f(GRC_EEPROM_ADDR,
10375 (EEPROM_ADDR_FSM_RESET |
10376 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10377 EEPROM_ADDR_CLKPERD_SHIFT)));
10381 /* Enable seeprom accesses. */
10382 tw32_f(GRC_LOCAL_CTRL,
10383 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10386 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10387 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10388 tp->tg3_flags |= TG3_FLAG_NVRAM;
10390 if (tg3_nvram_lock(tp)) {
10391 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10392 "tg3_nvram_init failed.\n", tp->dev->name);
10395 tg3_enable_nvram_access(tp);
10397 tp->nvram_size = 0;
10399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10400 tg3_get_5752_nvram_info(tp);
10401 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10402 tg3_get_5755_nvram_info(tp);
10403 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10405 tg3_get_5787_nvram_info(tp);
10406 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10407 tg3_get_5761_nvram_info(tp);
10408 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10409 tg3_get_5906_nvram_info(tp);
10411 tg3_get_nvram_info(tp);
10413 if (tp->nvram_size == 0)
10414 tg3_get_nvram_size(tp);
10416 tg3_disable_nvram_access(tp);
10417 tg3_nvram_unlock(tp);
10420 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10422 tg3_get_eeprom_size(tp);
10426 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10427 u32 offset, u32 *val)
10432 if (offset > EEPROM_ADDR_ADDR_MASK ||
10436 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10437 EEPROM_ADDR_DEVID_MASK |
10439 tw32(GRC_EEPROM_ADDR,
10441 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10442 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10443 EEPROM_ADDR_ADDR_MASK) |
10444 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10446 for (i = 0; i < 1000; i++) {
10447 tmp = tr32(GRC_EEPROM_ADDR);
10449 if (tmp & EEPROM_ADDR_COMPLETE)
10453 if (!(tmp & EEPROM_ADDR_COMPLETE))
10456 *val = tr32(GRC_EEPROM_DATA);
10460 #define NVRAM_CMD_TIMEOUT 10000
10462 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10466 tw32(NVRAM_CMD, nvram_cmd);
10467 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10469 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10474 if (i == NVRAM_CMD_TIMEOUT) {
10480 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10482 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10483 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10484 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10485 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10486 (tp->nvram_jedecnum == JEDEC_ATMEL))
10488 addr = ((addr / tp->nvram_pagesize) <<
10489 ATMEL_AT45DB0X1B_PAGE_POS) +
10490 (addr % tp->nvram_pagesize);
10495 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10497 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10498 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10499 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10500 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10501 (tp->nvram_jedecnum == JEDEC_ATMEL))
10503 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10504 tp->nvram_pagesize) +
10505 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10510 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10514 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10515 return tg3_nvram_read_using_eeprom(tp, offset, val);
10517 offset = tg3_nvram_phys_addr(tp, offset);
10519 if (offset > NVRAM_ADDR_MSK)
10522 ret = tg3_nvram_lock(tp);
10526 tg3_enable_nvram_access(tp);
10528 tw32(NVRAM_ADDR, offset);
10529 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10530 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10533 *val = swab32(tr32(NVRAM_RDDATA));
10535 tg3_disable_nvram_access(tp);
10537 tg3_nvram_unlock(tp);
10542 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10545 int res = tg3_nvram_read(tp, offset, &v);
10547 *val = cpu_to_le32(v);
10551 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10556 err = tg3_nvram_read(tp, offset, &tmp);
10557 *val = swab32(tmp);
10561 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10562 u32 offset, u32 len, u8 *buf)
10567 for (i = 0; i < len; i += 4) {
10573 memcpy(&data, buf + i, 4);
10575 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10577 val = tr32(GRC_EEPROM_ADDR);
10578 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10580 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10582 tw32(GRC_EEPROM_ADDR, val |
10583 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10584 (addr & EEPROM_ADDR_ADDR_MASK) |
10585 EEPROM_ADDR_START |
10586 EEPROM_ADDR_WRITE);
10588 for (j = 0; j < 1000; j++) {
10589 val = tr32(GRC_EEPROM_ADDR);
10591 if (val & EEPROM_ADDR_COMPLETE)
10595 if (!(val & EEPROM_ADDR_COMPLETE)) {
10604 /* offset and length are dword aligned */
10605 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10609 u32 pagesize = tp->nvram_pagesize;
10610 u32 pagemask = pagesize - 1;
10614 tmp = kmalloc(pagesize, GFP_KERNEL);
10620 u32 phy_addr, page_off, size;
10622 phy_addr = offset & ~pagemask;
10624 for (j = 0; j < pagesize; j += 4) {
10625 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10626 (__le32 *) (tmp + j))))
10632 page_off = offset & pagemask;
10639 memcpy(tmp + page_off, buf, size);
10641 offset = offset + (pagesize - page_off);
10643 tg3_enable_nvram_access(tp);
10646 * Before we can erase the flash page, we need
10647 * to issue a special "write enable" command.
10649 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10651 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10654 /* Erase the target page */
10655 tw32(NVRAM_ADDR, phy_addr);
10657 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10658 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10660 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10663 /* Issue another write enable to start the write. */
10664 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10666 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10669 for (j = 0; j < pagesize; j += 4) {
10672 data = *((__be32 *) (tmp + j));
10673 /* swab32(le32_to_cpu(data)), actually */
10674 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10676 tw32(NVRAM_ADDR, phy_addr + j);
10678 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10682 nvram_cmd |= NVRAM_CMD_FIRST;
10683 else if (j == (pagesize - 4))
10684 nvram_cmd |= NVRAM_CMD_LAST;
10686 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10693 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10694 tg3_nvram_exec_cmd(tp, nvram_cmd);
10701 /* offset and length are dword aligned */
10702 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10707 for (i = 0; i < len; i += 4, offset += 4) {
10708 u32 page_off, phy_addr, nvram_cmd;
10711 memcpy(&data, buf + i, 4);
10712 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10714 page_off = offset % tp->nvram_pagesize;
10716 phy_addr = tg3_nvram_phys_addr(tp, offset);
10718 tw32(NVRAM_ADDR, phy_addr);
10720 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10722 if ((page_off == 0) || (i == 0))
10723 nvram_cmd |= NVRAM_CMD_FIRST;
10724 if (page_off == (tp->nvram_pagesize - 4))
10725 nvram_cmd |= NVRAM_CMD_LAST;
10727 if (i == (len - 4))
10728 nvram_cmd |= NVRAM_CMD_LAST;
10730 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10731 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10732 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10733 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10734 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10735 (tp->nvram_jedecnum == JEDEC_ST) &&
10736 (nvram_cmd & NVRAM_CMD_FIRST)) {
10738 if ((ret = tg3_nvram_exec_cmd(tp,
10739 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10744 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10745 /* We always do complete word writes to eeprom. */
10746 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10749 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10755 /* offset and length are dword aligned */
10756 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10760 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10761 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10762 ~GRC_LCLCTRL_GPIO_OUTPUT1);
10766 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10767 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10772 ret = tg3_nvram_lock(tp);
10776 tg3_enable_nvram_access(tp);
10777 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10778 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10779 tw32(NVRAM_WRITE1, 0x406);
10781 grc_mode = tr32(GRC_MODE);
10782 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10784 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10785 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10787 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10791 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10795 grc_mode = tr32(GRC_MODE);
10796 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10798 tg3_disable_nvram_access(tp);
10799 tg3_nvram_unlock(tp);
10802 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10803 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10810 struct subsys_tbl_ent {
10811 u16 subsys_vendor, subsys_devid;
10815 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10816 /* Broadcom boards. */
10817 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10818 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10819 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10820 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10821 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10822 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10823 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10824 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10825 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10826 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10827 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10830 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10831 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10832 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10833 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10834 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10837 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10838 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10839 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10840 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10842 /* Compaq boards. */
10843 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10844 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10845 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10846 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10847 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10850 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10853 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10857 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10858 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10859 tp->pdev->subsystem_vendor) &&
10860 (subsys_id_to_phy_id[i].subsys_devid ==
10861 tp->pdev->subsystem_device))
10862 return &subsys_id_to_phy_id[i];
10867 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10872 /* On some early chips the SRAM cannot be accessed in D3hot state,
10873 * so need make sure we're in D0.
10875 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10876 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10877 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10880 /* Make sure register accesses (indirect or otherwise)
10881 * will function correctly.
10883 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10884 tp->misc_host_ctrl);
10886 /* The memory arbiter has to be enabled in order for SRAM accesses
10887 * to succeed. Normally on powerup the tg3 chip firmware will make
10888 * sure it is enabled, but other entities such as system netboot
10889 * code might disable it.
10891 val = tr32(MEMARB_MODE);
10892 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10894 tp->phy_id = PHY_ID_INVALID;
10895 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10897 /* Assume an onboard device and WOL capable by default. */
10898 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10900 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10901 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10902 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10903 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10905 val = tr32(VCPU_CFGSHDW);
10906 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10907 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10908 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10909 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10910 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10914 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10915 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10916 u32 nic_cfg, led_cfg;
10917 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10918 int eeprom_phy_serdes = 0;
10920 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10921 tp->nic_sram_data_cfg = nic_cfg;
10923 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10924 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10925 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10926 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10927 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10928 (ver > 0) && (ver < 0x100))
10929 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10931 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10932 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10933 eeprom_phy_serdes = 1;
10935 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10936 if (nic_phy_id != 0) {
10937 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10938 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10940 eeprom_phy_id = (id1 >> 16) << 10;
10941 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10942 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10946 tp->phy_id = eeprom_phy_id;
10947 if (eeprom_phy_serdes) {
10948 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10949 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10951 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10954 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10955 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10956 SHASTA_EXT_LED_MODE_MASK);
10958 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10962 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10963 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10966 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10967 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10970 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10971 tp->led_ctrl = LED_CTRL_MODE_MAC;
10973 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10974 * read on some older 5700/5701 bootcode.
10976 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10978 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10980 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10984 case SHASTA_EXT_LED_SHARED:
10985 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10986 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10987 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10988 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10989 LED_CTRL_MODE_PHY_2);
10992 case SHASTA_EXT_LED_MAC:
10993 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10996 case SHASTA_EXT_LED_COMBO:
10997 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10998 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10999 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11000 LED_CTRL_MODE_PHY_2);
11005 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11006 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11007 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11008 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11010 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11011 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11013 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11014 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11015 if ((tp->pdev->subsystem_vendor ==
11016 PCI_VENDOR_ID_ARIMA) &&
11017 (tp->pdev->subsystem_device == 0x205a ||
11018 tp->pdev->subsystem_device == 0x2063))
11019 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11021 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11022 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11025 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11026 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11027 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11028 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11030 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
11031 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11032 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11033 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11034 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11036 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
11037 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
11038 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11040 if (cfg2 & (1 << 17))
11041 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11043 /* serdes signal pre-emphasis in register 0x590 set by */
11044 /* bootcode if bit 18 is set */
11045 if (cfg2 & (1 << 18))
11046 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11048 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11051 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11052 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11053 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11058 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11063 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11064 tw32(OTP_CTRL, cmd);
11066 /* Wait for up to 1 ms for command to execute. */
11067 for (i = 0; i < 100; i++) {
11068 val = tr32(OTP_STATUS);
11069 if (val & OTP_STATUS_CMD_DONE)
11074 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11077 /* Read the gphy configuration from the OTP region of the chip. The gphy
11078 * configuration is a 32-bit value that straddles the alignment boundary.
11079 * We do two 32-bit reads and then shift and merge the results.
11081 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11083 u32 bhalf_otp, thalf_otp;
11085 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11087 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11090 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11092 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11095 thalf_otp = tr32(OTP_READ_DATA);
11097 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11099 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11102 bhalf_otp = tr32(OTP_READ_DATA);
11104 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11107 static int __devinit tg3_phy_probe(struct tg3 *tp)
11109 u32 hw_phy_id_1, hw_phy_id_2;
11110 u32 hw_phy_id, hw_phy_id_masked;
11113 /* Reading the PHY ID register can conflict with ASF
11114 * firwmare access to the PHY hardware.
11117 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11118 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11119 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11121 /* Now read the physical PHY_ID from the chip and verify
11122 * that it is sane. If it doesn't look good, we fall back
11123 * to either the hard-coded table based PHY_ID and failing
11124 * that the value found in the eeprom area.
11126 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11127 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11129 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11130 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11131 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11133 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11136 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11137 tp->phy_id = hw_phy_id;
11138 if (hw_phy_id_masked == PHY_ID_BCM8002)
11139 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11141 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11143 if (tp->phy_id != PHY_ID_INVALID) {
11144 /* Do nothing, phy ID already set up in
11145 * tg3_get_eeprom_hw_cfg().
11148 struct subsys_tbl_ent *p;
11150 /* No eeprom signature? Try the hardcoded
11151 * subsys device table.
11153 p = lookup_by_subsys(tp);
11157 tp->phy_id = p->phy_id;
11159 tp->phy_id == PHY_ID_BCM8002)
11160 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11164 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11165 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11166 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11167 u32 bmsr, adv_reg, tg3_ctrl, mask;
11169 tg3_readphy(tp, MII_BMSR, &bmsr);
11170 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11171 (bmsr & BMSR_LSTATUS))
11172 goto skip_phy_reset;
11174 err = tg3_phy_reset(tp);
11178 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11179 ADVERTISE_100HALF | ADVERTISE_100FULL |
11180 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11182 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11183 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11184 MII_TG3_CTRL_ADV_1000_FULL);
11185 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11186 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11187 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11188 MII_TG3_CTRL_ENABLE_AS_MASTER);
11191 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11192 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11193 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11194 if (!tg3_copper_is_advertising_all(tp, mask)) {
11195 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11197 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11198 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11200 tg3_writephy(tp, MII_BMCR,
11201 BMCR_ANENABLE | BMCR_ANRESTART);
11203 tg3_phy_set_wirespeed(tp);
11205 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11206 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11207 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11211 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11212 err = tg3_init_5401phy_dsp(tp);
11217 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11218 err = tg3_init_5401phy_dsp(tp);
11221 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11222 tp->link_config.advertising =
11223 (ADVERTISED_1000baseT_Half |
11224 ADVERTISED_1000baseT_Full |
11225 ADVERTISED_Autoneg |
11227 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11228 tp->link_config.advertising &=
11229 ~(ADVERTISED_1000baseT_Half |
11230 ADVERTISED_1000baseT_Full);
11235 static void __devinit tg3_read_partno(struct tg3 *tp)
11237 unsigned char vpd_data[256];
11241 if (tg3_nvram_read_swab(tp, 0x0, &magic))
11242 goto out_not_found;
11244 if (magic == TG3_EEPROM_MAGIC) {
11245 for (i = 0; i < 256; i += 4) {
11248 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11249 goto out_not_found;
11251 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11252 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11253 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11254 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11259 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11260 for (i = 0; i < 256; i += 4) {
11265 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11267 while (j++ < 100) {
11268 pci_read_config_word(tp->pdev, vpd_cap +
11269 PCI_VPD_ADDR, &tmp16);
11270 if (tmp16 & 0x8000)
11274 if (!(tmp16 & 0x8000))
11275 goto out_not_found;
11277 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11279 v = cpu_to_le32(tmp);
11280 memcpy(&vpd_data[i], &v, 4);
11284 /* Now parse and find the part number. */
11285 for (i = 0; i < 254; ) {
11286 unsigned char val = vpd_data[i];
11287 unsigned int block_end;
11289 if (val == 0x82 || val == 0x91) {
11292 (vpd_data[i + 2] << 8)));
11297 goto out_not_found;
11299 block_end = (i + 3 +
11301 (vpd_data[i + 2] << 8)));
11304 if (block_end > 256)
11305 goto out_not_found;
11307 while (i < (block_end - 2)) {
11308 if (vpd_data[i + 0] == 'P' &&
11309 vpd_data[i + 1] == 'N') {
11310 int partno_len = vpd_data[i + 2];
11313 if (partno_len > 24 || (partno_len + i) > 256)
11314 goto out_not_found;
11316 memcpy(tp->board_part_number,
11317 &vpd_data[i], partno_len);
11322 i += 3 + vpd_data[i + 2];
11325 /* Part number not found. */
11326 goto out_not_found;
11330 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11331 strcpy(tp->board_part_number, "BCM95906");
11333 strcpy(tp->board_part_number, "none");
11336 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11340 if (tg3_nvram_read_swab(tp, offset, &val) ||
11341 (val & 0xfc000000) != 0x0c000000 ||
11342 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11349 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11351 u32 val, offset, start;
11355 if (tg3_nvram_read_swab(tp, 0, &val))
11358 if (val != TG3_EEPROM_MAGIC)
11361 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11362 tg3_nvram_read_swab(tp, 0x4, &start))
11365 offset = tg3_nvram_logical_addr(tp, offset);
11367 if (!tg3_fw_img_is_valid(tp, offset) ||
11368 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11371 offset = offset + ver_offset - start;
11372 for (i = 0; i < 16; i += 4) {
11374 if (tg3_nvram_read_le(tp, offset + i, &v))
11377 memcpy(tp->fw_ver + i, &v, 4);
11380 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11381 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11384 for (offset = TG3_NVM_DIR_START;
11385 offset < TG3_NVM_DIR_END;
11386 offset += TG3_NVM_DIRENT_SIZE) {
11387 if (tg3_nvram_read_swab(tp, offset, &val))
11390 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11394 if (offset == TG3_NVM_DIR_END)
11397 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11398 start = 0x08000000;
11399 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11402 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11403 !tg3_fw_img_is_valid(tp, offset) ||
11404 tg3_nvram_read_swab(tp, offset + 8, &val))
11407 offset += val - start;
11409 bcnt = strlen(tp->fw_ver);
11411 tp->fw_ver[bcnt++] = ',';
11412 tp->fw_ver[bcnt++] = ' ';
11414 for (i = 0; i < 4; i++) {
11416 if (tg3_nvram_read_le(tp, offset, &v))
11419 offset += sizeof(v);
11421 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11422 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11426 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11430 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11433 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11435 static int __devinit tg3_get_invariants(struct tg3 *tp)
11437 static struct pci_device_id write_reorder_chipsets[] = {
11438 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11439 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11440 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11441 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11442 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11443 PCI_DEVICE_ID_VIA_8385_0) },
11447 u32 cacheline_sz_reg;
11448 u32 pci_state_reg, grc_misc_cfg;
11453 /* Force memory write invalidate off. If we leave it on,
11454 * then on 5700_BX chips we have to enable a workaround.
11455 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11456 * to match the cacheline size. The Broadcom driver have this
11457 * workaround but turns MWI off all the times so never uses
11458 * it. This seems to suggest that the workaround is insufficient.
11460 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11461 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11462 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11464 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11465 * has the register indirect write enable bit set before
11466 * we try to access any of the MMIO registers. It is also
11467 * critical that the PCI-X hw workaround situation is decided
11468 * before that as well.
11470 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11473 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11474 MISC_HOST_CTRL_CHIPREV_SHIFT);
11475 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11476 u32 prod_id_asic_rev;
11478 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11479 &prod_id_asic_rev);
11480 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11483 /* Wrong chip ID in 5752 A0. This code can be removed later
11484 * as A0 is not in production.
11486 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11487 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11489 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11490 * we need to disable memory and use config. cycles
11491 * only to access all registers. The 5702/03 chips
11492 * can mistakenly decode the special cycles from the
11493 * ICH chipsets as memory write cycles, causing corruption
11494 * of register and memory space. Only certain ICH bridges
11495 * will drive special cycles with non-zero data during the
11496 * address phase which can fall within the 5703's address
11497 * range. This is not an ICH bug as the PCI spec allows
11498 * non-zero address during special cycles. However, only
11499 * these ICH bridges are known to drive non-zero addresses
11500 * during special cycles.
11502 * Since special cycles do not cross PCI bridges, we only
11503 * enable this workaround if the 5703 is on the secondary
11504 * bus of these ICH bridges.
11506 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11507 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11508 static struct tg3_dev_id {
11512 } ich_chipsets[] = {
11513 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11515 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11517 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11519 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11523 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11524 struct pci_dev *bridge = NULL;
11526 while (pci_id->vendor != 0) {
11527 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11533 if (pci_id->rev != PCI_ANY_ID) {
11534 if (bridge->revision > pci_id->rev)
11537 if (bridge->subordinate &&
11538 (bridge->subordinate->number ==
11539 tp->pdev->bus->number)) {
11541 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11542 pci_dev_put(bridge);
11548 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11549 static struct tg3_dev_id {
11552 } bridge_chipsets[] = {
11553 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11554 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11557 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11558 struct pci_dev *bridge = NULL;
11560 while (pci_id->vendor != 0) {
11561 bridge = pci_get_device(pci_id->vendor,
11568 if (bridge->subordinate &&
11569 (bridge->subordinate->number <=
11570 tp->pdev->bus->number) &&
11571 (bridge->subordinate->subordinate >=
11572 tp->pdev->bus->number)) {
11573 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11574 pci_dev_put(bridge);
11580 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11581 * DMA addresses > 40-bit. This bridge may have other additional
11582 * 57xx devices behind it in some 4-port NIC designs for example.
11583 * Any tg3 device found behind the bridge will also need the 40-bit
11586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11588 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11589 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11590 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11593 struct pci_dev *bridge = NULL;
11596 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11597 PCI_DEVICE_ID_SERVERWORKS_EPB,
11599 if (bridge && bridge->subordinate &&
11600 (bridge->subordinate->number <=
11601 tp->pdev->bus->number) &&
11602 (bridge->subordinate->subordinate >=
11603 tp->pdev->bus->number)) {
11604 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11605 pci_dev_put(bridge);
11611 /* Initialize misc host control in PCI block. */
11612 tp->misc_host_ctrl |= (misc_ctrl_reg &
11613 MISC_HOST_CTRL_CHIPREV);
11614 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11615 tp->misc_host_ctrl);
11617 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11618 &cacheline_sz_reg);
11620 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
11621 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
11622 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
11623 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
11625 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11626 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11627 tp->pdev_peer = tg3_find_peer(tp);
11629 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11630 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11631 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11632 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11633 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11634 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11636 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11637 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11639 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11640 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11641 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11643 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11644 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11645 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11646 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11647 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11648 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11649 tp->pdev_peer == tp->pdev))
11650 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11652 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11653 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11654 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11655 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11656 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11657 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11658 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11660 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11661 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11663 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11664 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11668 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11669 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11670 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11672 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11673 if (pcie_cap != 0) {
11674 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11676 pcie_set_readrq(tp->pdev, 4096);
11678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11681 pci_read_config_word(tp->pdev,
11682 pcie_cap + PCI_EXP_LNKCTL,
11684 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11685 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11689 /* If we have an AMD 762 or VIA K8T800 chipset, write
11690 * reordering to the mailbox registers done by the host
11691 * controller can cause major troubles. We read back from
11692 * every mailbox register write to force the writes to be
11693 * posted to the chip in order.
11695 if (pci_dev_present(write_reorder_chipsets) &&
11696 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11697 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11699 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11700 tp->pci_lat_timer < 64) {
11701 tp->pci_lat_timer = 64;
11703 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11704 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11705 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11706 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11708 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11712 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11713 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11714 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11715 if (!tp->pcix_cap) {
11716 printk(KERN_ERR PFX "Cannot find PCI-X "
11717 "capability, aborting.\n");
11722 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11725 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11726 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11728 /* If this is a 5700 BX chipset, and we are in PCI-X
11729 * mode, enable register write workaround.
11731 * The workaround is to use indirect register accesses
11732 * for all chip writes not to mailbox registers.
11734 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11737 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11739 /* The chip can have it's power management PCI config
11740 * space registers clobbered due to this bug.
11741 * So explicitly force the chip into D0 here.
11743 pci_read_config_dword(tp->pdev,
11744 tp->pm_cap + PCI_PM_CTRL,
11746 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11747 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11748 pci_write_config_dword(tp->pdev,
11749 tp->pm_cap + PCI_PM_CTRL,
11752 /* Also, force SERR#/PERR# in PCI command. */
11753 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11754 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11755 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11759 /* 5700 BX chips need to have their TX producer index mailboxes
11760 * written twice to workaround a bug.
11762 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11763 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11765 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11766 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11767 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11768 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11770 /* Chip-specific fixup from Broadcom driver */
11771 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11772 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11773 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11774 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11777 /* Default fast path register access methods */
11778 tp->read32 = tg3_read32;
11779 tp->write32 = tg3_write32;
11780 tp->read32_mbox = tg3_read32;
11781 tp->write32_mbox = tg3_write32;
11782 tp->write32_tx_mbox = tg3_write32;
11783 tp->write32_rx_mbox = tg3_write32;
11785 /* Various workaround register access methods */
11786 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11787 tp->write32 = tg3_write_indirect_reg32;
11788 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11789 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11790 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11792 * Back to back register writes can cause problems on these
11793 * chips, the workaround is to read back all reg writes
11794 * except those to mailbox regs.
11796 * See tg3_write_indirect_reg32().
11798 tp->write32 = tg3_write_flush_reg32;
11802 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11803 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11804 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11805 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11806 tp->write32_rx_mbox = tg3_write_flush_reg32;
11809 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11810 tp->read32 = tg3_read_indirect_reg32;
11811 tp->write32 = tg3_write_indirect_reg32;
11812 tp->read32_mbox = tg3_read_indirect_mbox;
11813 tp->write32_mbox = tg3_write_indirect_mbox;
11814 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11815 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11820 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11821 pci_cmd &= ~PCI_COMMAND_MEMORY;
11822 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11824 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11825 tp->read32_mbox = tg3_read32_mbox_5906;
11826 tp->write32_mbox = tg3_write32_mbox_5906;
11827 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11828 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11831 if (tp->write32 == tg3_write_indirect_reg32 ||
11832 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11833 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11834 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11835 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11837 /* Get eeprom hw config before calling tg3_set_power_state().
11838 * In particular, the TG3_FLG2_IS_NIC flag must be
11839 * determined before calling tg3_set_power_state() so that
11840 * we know whether or not to switch out of Vaux power.
11841 * When the flag is set, it means that GPIO1 is used for eeprom
11842 * write protect and also implies that it is a LOM where GPIOs
11843 * are not used to switch power.
11845 tg3_get_eeprom_hw_cfg(tp);
11847 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11848 /* Allow reads and writes to the
11849 * APE register and memory space.
11851 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11852 PCISTATE_ALLOW_APE_SHMEM_WR;
11853 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11857 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11858 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11859 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11861 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11862 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11863 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11864 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11865 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11868 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11869 * GPIO1 driven high will bring 5700's external PHY out of reset.
11870 * It is also used as eeprom write protect on LOMs.
11872 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11873 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11874 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11875 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11876 GRC_LCLCTRL_GPIO_OUTPUT1);
11877 /* Unused GPIO3 must be driven as output on 5752 because there
11878 * are no pull-up resistors on unused GPIO pins.
11880 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11881 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11883 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11884 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11886 /* Force the chip into D0. */
11887 err = tg3_set_power_state(tp, PCI_D0);
11889 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11890 pci_name(tp->pdev));
11894 /* 5700 B0 chips do not support checksumming correctly due
11895 * to hardware bugs.
11897 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11898 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11900 /* Derive initial jumbo mode from MTU assigned in
11901 * ether_setup() via the alloc_etherdev() call
11903 if (tp->dev->mtu > ETH_DATA_LEN &&
11904 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11905 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11907 /* Determine WakeOnLan speed to use. */
11908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11909 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11910 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11911 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11912 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11914 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11917 /* A few boards don't want Ethernet@WireSpeed phy feature */
11918 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11919 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11920 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11921 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11922 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11923 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11924 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11926 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11927 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11928 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11929 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11930 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11932 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11937 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11938 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11939 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11940 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11941 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11942 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11943 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11947 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
11948 tp->phy_otp = tg3_read_otp_phycfg(tp);
11949 if (tp->phy_otp == 0)
11950 tp->phy_otp = TG3_OTP_DEFAULT;
11953 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
11954 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11956 tp->mi_mode = MAC_MI_MODE_BASE;
11958 tp->coalesce_mode = 0;
11959 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11960 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11961 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11963 err = tg3_mdio_init(tp);
11967 /* Initialize data/descriptor byte/word swapping. */
11968 val = tr32(GRC_MODE);
11969 val &= GRC_MODE_HOST_STACKUP;
11970 tw32(GRC_MODE, val | tp->grc_mode);
11972 tg3_switch_clocks(tp);
11974 /* Clear this out for sanity. */
11975 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11977 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11979 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11980 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11981 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11983 if (chiprevid == CHIPREV_ID_5701_A0 ||
11984 chiprevid == CHIPREV_ID_5701_B0 ||
11985 chiprevid == CHIPREV_ID_5701_B2 ||
11986 chiprevid == CHIPREV_ID_5701_B5) {
11987 void __iomem *sram_base;
11989 /* Write some dummy words into the SRAM status block
11990 * area, see if it reads back correctly. If the return
11991 * value is bad, force enable the PCIX workaround.
11993 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11995 writel(0x00000000, sram_base);
11996 writel(0x00000000, sram_base + 4);
11997 writel(0xffffffff, sram_base + 4);
11998 if (readl(sram_base) != 0x00000000)
11999 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12004 tg3_nvram_init(tp);
12006 grc_misc_cfg = tr32(GRC_MISC_CFG);
12007 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12009 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12010 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12011 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12012 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12014 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12015 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12016 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12017 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12018 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12019 HOSTCC_MODE_CLRTICK_TXBD);
12021 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12022 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12023 tp->misc_host_ctrl);
12026 /* these are limited to 10/100 only */
12027 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12028 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12029 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12030 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12031 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12032 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12033 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12034 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12035 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12036 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12037 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12039 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12041 err = tg3_phy_probe(tp);
12043 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12044 pci_name(tp->pdev), err);
12045 /* ... but do not return immediately ... */
12048 tg3_read_partno(tp);
12049 tg3_read_fw_ver(tp);
12051 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12052 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12055 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12057 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12060 /* 5700 {AX,BX} chips have a broken status block link
12061 * change bit implementation, so we must use the
12062 * status register in those cases.
12064 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12065 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12067 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12069 /* The led_ctrl is set during tg3_phy_probe, here we might
12070 * have to force the link status polling mechanism based
12071 * upon subsystem IDs.
12073 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12074 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12075 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12076 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12077 TG3_FLAG_USE_LINKCHG_REG);
12080 /* For all SERDES we poll the MAC status register. */
12081 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12082 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12084 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12086 /* All chips before 5787 can get confused if TX buffers
12087 * straddle the 4GB address boundary in some cases.
12089 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12092 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12094 tp->dev->hard_start_xmit = tg3_start_xmit;
12096 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12099 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12100 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12103 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12105 /* Increment the rx prod index on the rx std ring by at most
12106 * 8 for these chips to workaround hw errata.
12108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12109 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12110 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12111 tp->rx_std_max_post = 8;
12113 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12114 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12115 PCIE_PWR_MGMT_L1_THRESH_MSK;
12120 #ifdef CONFIG_SPARC
12121 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12123 struct net_device *dev = tp->dev;
12124 struct pci_dev *pdev = tp->pdev;
12125 struct device_node *dp = pci_device_to_OF_node(pdev);
12126 const unsigned char *addr;
12129 addr = of_get_property(dp, "local-mac-address", &len);
12130 if (addr && len == 6) {
12131 memcpy(dev->dev_addr, addr, 6);
12132 memcpy(dev->perm_addr, dev->dev_addr, 6);
12138 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12140 struct net_device *dev = tp->dev;
12142 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12143 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12148 static int __devinit tg3_get_device_address(struct tg3 *tp)
12150 struct net_device *dev = tp->dev;
12151 u32 hi, lo, mac_offset;
12154 #ifdef CONFIG_SPARC
12155 if (!tg3_get_macaddr_sparc(tp))
12160 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12161 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12162 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12164 if (tg3_nvram_lock(tp))
12165 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12167 tg3_nvram_unlock(tp);
12169 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12172 /* First try to get it from MAC address mailbox. */
12173 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12174 if ((hi >> 16) == 0x484b) {
12175 dev->dev_addr[0] = (hi >> 8) & 0xff;
12176 dev->dev_addr[1] = (hi >> 0) & 0xff;
12178 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12179 dev->dev_addr[2] = (lo >> 24) & 0xff;
12180 dev->dev_addr[3] = (lo >> 16) & 0xff;
12181 dev->dev_addr[4] = (lo >> 8) & 0xff;
12182 dev->dev_addr[5] = (lo >> 0) & 0xff;
12184 /* Some old bootcode may report a 0 MAC address in SRAM */
12185 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12188 /* Next, try NVRAM. */
12189 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12190 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12191 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12192 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12193 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12194 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12195 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12196 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12198 /* Finally just fetch it out of the MAC control regs. */
12200 hi = tr32(MAC_ADDR_0_HIGH);
12201 lo = tr32(MAC_ADDR_0_LOW);
12203 dev->dev_addr[5] = lo & 0xff;
12204 dev->dev_addr[4] = (lo >> 8) & 0xff;
12205 dev->dev_addr[3] = (lo >> 16) & 0xff;
12206 dev->dev_addr[2] = (lo >> 24) & 0xff;
12207 dev->dev_addr[1] = hi & 0xff;
12208 dev->dev_addr[0] = (hi >> 8) & 0xff;
12212 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12213 #ifdef CONFIG_SPARC
12214 if (!tg3_get_default_macaddr_sparc(tp))
12219 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12223 #define BOUNDARY_SINGLE_CACHELINE 1
12224 #define BOUNDARY_MULTI_CACHELINE 2
12226 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12228 int cacheline_size;
12232 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12234 cacheline_size = 1024;
12236 cacheline_size = (int) byte * 4;
12238 /* On 5703 and later chips, the boundary bits have no
12241 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12242 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12243 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12246 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12247 goal = BOUNDARY_MULTI_CACHELINE;
12249 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12250 goal = BOUNDARY_SINGLE_CACHELINE;
12259 /* PCI controllers on most RISC systems tend to disconnect
12260 * when a device tries to burst across a cache-line boundary.
12261 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12263 * Unfortunately, for PCI-E there are only limited
12264 * write-side controls for this, and thus for reads
12265 * we will still get the disconnects. We'll also waste
12266 * these PCI cycles for both read and write for chips
12267 * other than 5700 and 5701 which do not implement the
12270 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12271 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12272 switch (cacheline_size) {
12277 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12278 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12279 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12281 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12282 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12287 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12288 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12292 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12293 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12296 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12297 switch (cacheline_size) {
12301 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12302 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12303 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12309 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12310 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12314 switch (cacheline_size) {
12316 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12317 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12318 DMA_RWCTRL_WRITE_BNDRY_16);
12323 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12324 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12325 DMA_RWCTRL_WRITE_BNDRY_32);
12330 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12331 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12332 DMA_RWCTRL_WRITE_BNDRY_64);
12337 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12338 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12339 DMA_RWCTRL_WRITE_BNDRY_128);
12344 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12345 DMA_RWCTRL_WRITE_BNDRY_256);
12348 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12349 DMA_RWCTRL_WRITE_BNDRY_512);
12353 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12354 DMA_RWCTRL_WRITE_BNDRY_1024);
12363 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12365 struct tg3_internal_buffer_desc test_desc;
12366 u32 sram_dma_descs;
12369 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12371 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12372 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12373 tw32(RDMAC_STATUS, 0);
12374 tw32(WDMAC_STATUS, 0);
12376 tw32(BUFMGR_MODE, 0);
12377 tw32(FTQ_RESET, 0);
12379 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12380 test_desc.addr_lo = buf_dma & 0xffffffff;
12381 test_desc.nic_mbuf = 0x00002100;
12382 test_desc.len = size;
12385 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12386 * the *second* time the tg3 driver was getting loaded after an
12389 * Broadcom tells me:
12390 * ...the DMA engine is connected to the GRC block and a DMA
12391 * reset may affect the GRC block in some unpredictable way...
12392 * The behavior of resets to individual blocks has not been tested.
12394 * Broadcom noted the GRC reset will also reset all sub-components.
12397 test_desc.cqid_sqid = (13 << 8) | 2;
12399 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12402 test_desc.cqid_sqid = (16 << 8) | 7;
12404 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12407 test_desc.flags = 0x00000005;
12409 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12412 val = *(((u32 *)&test_desc) + i);
12413 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12414 sram_dma_descs + (i * sizeof(u32)));
12415 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12417 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12420 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12422 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12426 for (i = 0; i < 40; i++) {
12430 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12432 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12433 if ((val & 0xffff) == sram_dma_descs) {
12444 #define TEST_BUFFER_SIZE 0x2000
12446 static int __devinit tg3_test_dma(struct tg3 *tp)
12448 dma_addr_t buf_dma;
12449 u32 *buf, saved_dma_rwctrl;
12452 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12458 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12459 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12461 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12463 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12464 /* DMA read watermark not used on PCIE */
12465 tp->dma_rwctrl |= 0x00180000;
12466 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12468 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12469 tp->dma_rwctrl |= 0x003f0000;
12471 tp->dma_rwctrl |= 0x003f000f;
12473 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12474 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12475 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12476 u32 read_water = 0x7;
12478 /* If the 5704 is behind the EPB bridge, we can
12479 * do the less restrictive ONE_DMA workaround for
12480 * better performance.
12482 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12484 tp->dma_rwctrl |= 0x8000;
12485 else if (ccval == 0x6 || ccval == 0x7)
12486 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12488 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12490 /* Set bit 23 to enable PCIX hw bug fix */
12492 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12493 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12495 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12496 /* 5780 always in PCIX mode */
12497 tp->dma_rwctrl |= 0x00144000;
12498 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12499 /* 5714 always in PCIX mode */
12500 tp->dma_rwctrl |= 0x00148000;
12502 tp->dma_rwctrl |= 0x001b000f;
12506 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12508 tp->dma_rwctrl &= 0xfffffff0;
12510 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12511 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12512 /* Remove this if it causes problems for some boards. */
12513 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12515 /* On 5700/5701 chips, we need to set this bit.
12516 * Otherwise the chip will issue cacheline transactions
12517 * to streamable DMA memory with not all the byte
12518 * enables turned on. This is an error on several
12519 * RISC PCI controllers, in particular sparc64.
12521 * On 5703/5704 chips, this bit has been reassigned
12522 * a different meaning. In particular, it is used
12523 * on those chips to enable a PCI-X workaround.
12525 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12528 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12531 /* Unneeded, already done by tg3_get_invariants. */
12532 tg3_switch_clocks(tp);
12536 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12537 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12540 /* It is best to perform DMA test with maximum write burst size
12541 * to expose the 5700/5701 write DMA bug.
12543 saved_dma_rwctrl = tp->dma_rwctrl;
12544 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12545 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12550 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12553 /* Send the buffer to the chip. */
12554 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12556 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12561 /* validate data reached card RAM correctly. */
12562 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12564 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12565 if (le32_to_cpu(val) != p[i]) {
12566 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12567 /* ret = -ENODEV here? */
12572 /* Now read it back. */
12573 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12575 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12581 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12585 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12586 DMA_RWCTRL_WRITE_BNDRY_16) {
12587 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12588 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12589 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12592 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12598 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12604 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12605 DMA_RWCTRL_WRITE_BNDRY_16) {
12606 static struct pci_device_id dma_wait_state_chipsets[] = {
12607 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12608 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12612 /* DMA test passed without adjusting DMA boundary,
12613 * now look for chipsets that are known to expose the
12614 * DMA bug without failing the test.
12616 if (pci_dev_present(dma_wait_state_chipsets)) {
12617 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12618 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12621 /* Safe to use the calculated DMA boundary. */
12622 tp->dma_rwctrl = saved_dma_rwctrl;
12624 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12628 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12633 static void __devinit tg3_init_link_config(struct tg3 *tp)
12635 tp->link_config.advertising =
12636 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12637 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12638 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12639 ADVERTISED_Autoneg | ADVERTISED_MII);
12640 tp->link_config.speed = SPEED_INVALID;
12641 tp->link_config.duplex = DUPLEX_INVALID;
12642 tp->link_config.autoneg = AUTONEG_ENABLE;
12643 tp->link_config.active_speed = SPEED_INVALID;
12644 tp->link_config.active_duplex = DUPLEX_INVALID;
12645 tp->link_config.phy_is_low_power = 0;
12646 tp->link_config.orig_speed = SPEED_INVALID;
12647 tp->link_config.orig_duplex = DUPLEX_INVALID;
12648 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12651 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12653 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12654 tp->bufmgr_config.mbuf_read_dma_low_water =
12655 DEFAULT_MB_RDMA_LOW_WATER_5705;
12656 tp->bufmgr_config.mbuf_mac_rx_low_water =
12657 DEFAULT_MB_MACRX_LOW_WATER_5705;
12658 tp->bufmgr_config.mbuf_high_water =
12659 DEFAULT_MB_HIGH_WATER_5705;
12660 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12661 tp->bufmgr_config.mbuf_mac_rx_low_water =
12662 DEFAULT_MB_MACRX_LOW_WATER_5906;
12663 tp->bufmgr_config.mbuf_high_water =
12664 DEFAULT_MB_HIGH_WATER_5906;
12667 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12668 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12669 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12670 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12671 tp->bufmgr_config.mbuf_high_water_jumbo =
12672 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12674 tp->bufmgr_config.mbuf_read_dma_low_water =
12675 DEFAULT_MB_RDMA_LOW_WATER;
12676 tp->bufmgr_config.mbuf_mac_rx_low_water =
12677 DEFAULT_MB_MACRX_LOW_WATER;
12678 tp->bufmgr_config.mbuf_high_water =
12679 DEFAULT_MB_HIGH_WATER;
12681 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12682 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12683 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12684 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12685 tp->bufmgr_config.mbuf_high_water_jumbo =
12686 DEFAULT_MB_HIGH_WATER_JUMBO;
12689 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12690 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12693 static char * __devinit tg3_phy_string(struct tg3 *tp)
12695 switch (tp->phy_id & PHY_ID_MASK) {
12696 case PHY_ID_BCM5400: return "5400";
12697 case PHY_ID_BCM5401: return "5401";
12698 case PHY_ID_BCM5411: return "5411";
12699 case PHY_ID_BCM5701: return "5701";
12700 case PHY_ID_BCM5703: return "5703";
12701 case PHY_ID_BCM5704: return "5704";
12702 case PHY_ID_BCM5705: return "5705";
12703 case PHY_ID_BCM5750: return "5750";
12704 case PHY_ID_BCM5752: return "5752";
12705 case PHY_ID_BCM5714: return "5714";
12706 case PHY_ID_BCM5780: return "5780";
12707 case PHY_ID_BCM5755: return "5755";
12708 case PHY_ID_BCM5787: return "5787";
12709 case PHY_ID_BCM5784: return "5784";
12710 case PHY_ID_BCM5756: return "5722/5756";
12711 case PHY_ID_BCM5906: return "5906";
12712 case PHY_ID_BCM5761: return "5761";
12713 case PHY_ID_BCM8002: return "8002/serdes";
12714 case 0: return "serdes";
12715 default: return "unknown";
12719 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12721 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12722 strcpy(str, "PCI Express");
12724 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12725 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12727 strcpy(str, "PCIX:");
12729 if ((clock_ctrl == 7) ||
12730 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12731 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12732 strcat(str, "133MHz");
12733 else if (clock_ctrl == 0)
12734 strcat(str, "33MHz");
12735 else if (clock_ctrl == 2)
12736 strcat(str, "50MHz");
12737 else if (clock_ctrl == 4)
12738 strcat(str, "66MHz");
12739 else if (clock_ctrl == 6)
12740 strcat(str, "100MHz");
12742 strcpy(str, "PCI:");
12743 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12744 strcat(str, "66MHz");
12746 strcat(str, "33MHz");
12748 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12749 strcat(str, ":32-bit");
12751 strcat(str, ":64-bit");
12755 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12757 struct pci_dev *peer;
12758 unsigned int func, devnr = tp->pdev->devfn & ~7;
12760 for (func = 0; func < 8; func++) {
12761 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12762 if (peer && peer != tp->pdev)
12766 /* 5704 can be configured in single-port mode, set peer to
12767 * tp->pdev in that case.
12775 * We don't need to keep the refcount elevated; there's no way
12776 * to remove one half of this device without removing the other
12783 static void __devinit tg3_init_coal(struct tg3 *tp)
12785 struct ethtool_coalesce *ec = &tp->coal;
12787 memset(ec, 0, sizeof(*ec));
12788 ec->cmd = ETHTOOL_GCOALESCE;
12789 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12790 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12791 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12792 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12793 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12794 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12795 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12796 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12797 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12799 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12800 HOSTCC_MODE_CLRTICK_TXBD)) {
12801 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12802 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12803 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12804 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12807 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12808 ec->rx_coalesce_usecs_irq = 0;
12809 ec->tx_coalesce_usecs_irq = 0;
12810 ec->stats_block_coalesce_usecs = 0;
12814 static int __devinit tg3_init_one(struct pci_dev *pdev,
12815 const struct pci_device_id *ent)
12817 static int tg3_version_printed = 0;
12818 resource_size_t tg3reg_base;
12819 unsigned long tg3reg_len;
12820 struct net_device *dev;
12824 u64 dma_mask, persist_dma_mask;
12825 DECLARE_MAC_BUF(mac);
12827 if (tg3_version_printed++ == 0)
12828 printk(KERN_INFO "%s", version);
12830 err = pci_enable_device(pdev);
12832 printk(KERN_ERR PFX "Cannot enable PCI device, "
12837 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12838 printk(KERN_ERR PFX "Cannot find proper PCI device "
12839 "base address, aborting.\n");
12841 goto err_out_disable_pdev;
12844 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12846 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12848 goto err_out_disable_pdev;
12851 pci_set_master(pdev);
12853 /* Find power-management capability. */
12854 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12856 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12859 goto err_out_free_res;
12862 tg3reg_base = pci_resource_start(pdev, 0);
12863 tg3reg_len = pci_resource_len(pdev, 0);
12865 dev = alloc_etherdev(sizeof(*tp));
12867 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12869 goto err_out_free_res;
12872 SET_NETDEV_DEV(dev, &pdev->dev);
12874 #if TG3_VLAN_TAG_USED
12875 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12876 dev->vlan_rx_register = tg3_vlan_rx_register;
12879 tp = netdev_priv(dev);
12882 tp->pm_cap = pm_cap;
12883 tp->mac_mode = TG3_DEF_MAC_MODE;
12884 tp->rx_mode = TG3_DEF_RX_MODE;
12885 tp->tx_mode = TG3_DEF_TX_MODE;
12888 tp->msg_enable = tg3_debug;
12890 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12892 /* The word/byte swap controls here control register access byte
12893 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12896 tp->misc_host_ctrl =
12897 MISC_HOST_CTRL_MASK_PCI_INT |
12898 MISC_HOST_CTRL_WORD_SWAP |
12899 MISC_HOST_CTRL_INDIR_ACCESS |
12900 MISC_HOST_CTRL_PCISTATE_RW;
12902 /* The NONFRM (non-frame) byte/word swap controls take effect
12903 * on descriptor entries, anything which isn't packet data.
12905 * The StrongARM chips on the board (one for tx, one for rx)
12906 * are running in big-endian mode.
12908 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12909 GRC_MODE_WSWAP_NONFRM_DATA);
12910 #ifdef __BIG_ENDIAN
12911 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12913 spin_lock_init(&tp->lock);
12914 spin_lock_init(&tp->indirect_lock);
12915 INIT_WORK(&tp->reset_task, tg3_reset_task);
12917 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12919 printk(KERN_ERR PFX "Cannot map device registers, "
12922 goto err_out_free_dev;
12925 tg3_init_link_config(tp);
12927 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12928 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12929 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12931 dev->open = tg3_open;
12932 dev->stop = tg3_close;
12933 dev->get_stats = tg3_get_stats;
12934 dev->set_multicast_list = tg3_set_rx_mode;
12935 dev->set_mac_address = tg3_set_mac_addr;
12936 dev->do_ioctl = tg3_ioctl;
12937 dev->tx_timeout = tg3_tx_timeout;
12938 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12939 dev->ethtool_ops = &tg3_ethtool_ops;
12940 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12941 dev->change_mtu = tg3_change_mtu;
12942 dev->irq = pdev->irq;
12943 #ifdef CONFIG_NET_POLL_CONTROLLER
12944 dev->poll_controller = tg3_poll_controller;
12947 err = tg3_get_invariants(tp);
12949 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12951 goto err_out_iounmap;
12954 /* The EPB bridge inside 5714, 5715, and 5780 and any
12955 * device behind the EPB cannot support DMA addresses > 40-bit.
12956 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12957 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12958 * do DMA address check in tg3_start_xmit().
12960 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12961 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12962 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12963 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12964 #ifdef CONFIG_HIGHMEM
12965 dma_mask = DMA_64BIT_MASK;
12968 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12970 /* Configure DMA attributes. */
12971 if (dma_mask > DMA_32BIT_MASK) {
12972 err = pci_set_dma_mask(pdev, dma_mask);
12974 dev->features |= NETIF_F_HIGHDMA;
12975 err = pci_set_consistent_dma_mask(pdev,
12978 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12979 "DMA for consistent allocations\n");
12980 goto err_out_iounmap;
12984 if (err || dma_mask == DMA_32BIT_MASK) {
12985 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12987 printk(KERN_ERR PFX "No usable DMA configuration, "
12989 goto err_out_iounmap;
12993 tg3_init_bufmgr_config(tp);
12995 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12996 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12998 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13000 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13002 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13003 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13005 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13008 /* TSO is on by default on chips that support hardware TSO.
13009 * Firmware TSO on older chips gives lower performance, so it
13010 * is off by default, but can be enabled using ethtool.
13012 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13013 dev->features |= NETIF_F_TSO;
13014 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13015 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13016 dev->features |= NETIF_F_TSO6;
13017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13018 dev->features |= NETIF_F_TSO_ECN;
13022 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13023 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13024 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13025 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13026 tp->rx_pending = 63;
13029 err = tg3_get_device_address(tp);
13031 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13033 goto err_out_iounmap;
13036 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13037 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13038 printk(KERN_ERR PFX "Cannot find proper PCI device "
13039 "base address for APE, aborting.\n");
13041 goto err_out_iounmap;
13044 tg3reg_base = pci_resource_start(pdev, 2);
13045 tg3reg_len = pci_resource_len(pdev, 2);
13047 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
13048 if (!tp->aperegs) {
13049 printk(KERN_ERR PFX "Cannot map APE registers, "
13052 goto err_out_iounmap;
13055 tg3_ape_lock_init(tp);
13059 * Reset chip in case UNDI or EFI driver did not shutdown
13060 * DMA self test will enable WDMAC and we'll see (spurious)
13061 * pending DMA on the PCI bus at that point.
13063 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13064 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13065 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13066 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13069 err = tg3_test_dma(tp);
13071 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13072 goto err_out_apeunmap;
13075 /* Tigon3 can do ipv4 only... and some chips have buggy
13078 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13079 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13080 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13081 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13084 dev->features |= NETIF_F_IPV6_CSUM;
13086 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13088 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13090 /* flow control autonegotiation is default behavior */
13091 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13092 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13096 pci_set_drvdata(pdev, dev);
13098 err = register_netdev(dev);
13100 printk(KERN_ERR PFX "Cannot register net device, "
13102 goto err_out_apeunmap;
13105 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13106 "(%s) %s Ethernet %s\n",
13108 tp->board_part_number,
13109 tp->pci_chip_rev_id,
13110 tg3_phy_string(tp),
13111 tg3_bus_string(tp, str),
13112 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13113 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13114 "10/100/1000Base-T")),
13115 print_mac(mac, dev->dev_addr));
13117 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
13118 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
13120 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13121 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13122 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13123 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13124 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13125 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13126 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13127 dev->name, tp->dma_rwctrl,
13128 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13129 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13135 iounmap(tp->aperegs);
13136 tp->aperegs = NULL;
13149 pci_release_regions(pdev);
13151 err_out_disable_pdev:
13152 pci_disable_device(pdev);
13153 pci_set_drvdata(pdev, NULL);
13157 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13159 struct net_device *dev = pci_get_drvdata(pdev);
13162 struct tg3 *tp = netdev_priv(dev);
13164 flush_scheduled_work();
13166 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
13169 unregister_netdev(dev);
13171 iounmap(tp->aperegs);
13172 tp->aperegs = NULL;
13179 pci_release_regions(pdev);
13180 pci_disable_device(pdev);
13181 pci_set_drvdata(pdev, NULL);
13185 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13187 struct net_device *dev = pci_get_drvdata(pdev);
13188 struct tg3 *tp = netdev_priv(dev);
13191 /* PCI register 4 needs to be saved whether netif_running() or not.
13192 * MSI address and data need to be saved if using MSI and
13195 pci_save_state(pdev);
13197 if (!netif_running(dev))
13200 flush_scheduled_work();
13201 tg3_netif_stop(tp);
13203 del_timer_sync(&tp->timer);
13205 tg3_full_lock(tp, 1);
13206 tg3_disable_ints(tp);
13207 tg3_full_unlock(tp);
13209 netif_device_detach(dev);
13211 tg3_full_lock(tp, 0);
13212 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13213 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13214 tg3_full_unlock(tp);
13216 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13218 tg3_full_lock(tp, 0);
13220 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13221 if (tg3_restart_hw(tp, 1))
13224 tp->timer.expires = jiffies + tp->timer_offset;
13225 add_timer(&tp->timer);
13227 netif_device_attach(dev);
13228 tg3_netif_start(tp);
13231 tg3_full_unlock(tp);
13237 static int tg3_resume(struct pci_dev *pdev)
13239 struct net_device *dev = pci_get_drvdata(pdev);
13240 struct tg3 *tp = netdev_priv(dev);
13243 pci_restore_state(tp->pdev);
13245 if (!netif_running(dev))
13248 err = tg3_set_power_state(tp, PCI_D0);
13252 netif_device_attach(dev);
13254 tg3_full_lock(tp, 0);
13256 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13257 err = tg3_restart_hw(tp, 1);
13261 tp->timer.expires = jiffies + tp->timer_offset;
13262 add_timer(&tp->timer);
13264 tg3_netif_start(tp);
13267 tg3_full_unlock(tp);
13272 static struct pci_driver tg3_driver = {
13273 .name = DRV_MODULE_NAME,
13274 .id_table = tg3_pci_tbl,
13275 .probe = tg3_init_one,
13276 .remove = __devexit_p(tg3_remove_one),
13277 .suspend = tg3_suspend,
13278 .resume = tg3_resume
13281 static int __init tg3_init(void)
13283 return pci_register_driver(&tg3_driver);
13286 static void __exit tg3_cleanup(void)
13288 pci_unregister_driver(&tg3_driver);
13291 module_init(tg3_init);
13292 module_exit(tg3_cleanup);