2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
50 #include <asm/system.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
56 #include <asm/idprom.h>
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 return test_bit(flag, bits);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 clear_bit(flag, bits);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MIN_NUM 118
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "April 22, 2011"
96 #define TG3_DEF_MAC_MODE 0
97 #define TG3_DEF_RX_MODE 0
98 #define TG3_DEF_TX_MODE 0
99 #define TG3_DEF_MSG_ENABLE \
109 /* length of time before we decide the hardware is borked,
110 * and dev->tx_timeout() should be called to fix the problem
113 #define TG3_TX_TIMEOUT (5 * HZ)
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU 60
117 #define TG3_MAX_MTU(tp) \
118 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121 * You can't change the ring sizes, but you can change where you place
122 * them in the NIC onboard memory.
124 #define TG3_RX_STD_RING_SIZE(tp) \
125 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING 200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
132 #define TG3_RSS_INDIR_TBL_SIZE 128
134 /* Do not place this n-ring entries value into the tp struct itself,
135 * we really want to expose these constants to GCC so that modulo et
136 * al. operations are done with shifts and masks instead of with
137 * hw multiply/modulo instructions. Another solution would be to
138 * replace things like '% foo' with '& (foo - 1)'.
141 #define TG3_TX_RING_SIZE 512
142 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
144 #define TG3_RX_STD_RING_BYTES(tp) \
145 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
152 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154 #define TG3_DMA_BYTE_ENAB 64
156 #define TG3_RX_STD_DMA_SZ 1536
157 #define TG3_RX_JMB_DMA_SZ 9046
159 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
161 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171 * that are at least dword aligned when used in PCIX mode. The driver
172 * works around this bug by double copying the packet. This workaround
173 * is built into the normal double copy length check for efficiency.
175 * However, the double copy is only necessary on those architectures
176 * where unaligned memory accesses are inefficient. For those architectures
177 * where unaligned memory accesses incur little penalty, we can reintegrate
178 * the 5701 in the normal rx path. Doing so saves a device structure
179 * dereference by hardcoding the double copy threshold in place.
181 #define TG3_RX_COPY_THRESHOLD 256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
185 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
191 #define TG3_RAW_IP_ALIGN 2
193 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
195 #define FIRMWARE_TG3 "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
199 static char version[] __devinitdata =
200 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
210 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
300 static const struct {
301 const char string[ETH_GSTRING_LEN];
302 } ethtool_stats_keys[] = {
305 { "rx_ucast_packets" },
306 { "rx_mcast_packets" },
307 { "rx_bcast_packets" },
309 { "rx_align_errors" },
310 { "rx_xon_pause_rcvd" },
311 { "rx_xoff_pause_rcvd" },
312 { "rx_mac_ctrl_rcvd" },
313 { "rx_xoff_entered" },
314 { "rx_frame_too_long_errors" },
316 { "rx_undersize_packets" },
317 { "rx_in_length_errors" },
318 { "rx_out_length_errors" },
319 { "rx_64_or_less_octet_packets" },
320 { "rx_65_to_127_octet_packets" },
321 { "rx_128_to_255_octet_packets" },
322 { "rx_256_to_511_octet_packets" },
323 { "rx_512_to_1023_octet_packets" },
324 { "rx_1024_to_1522_octet_packets" },
325 { "rx_1523_to_2047_octet_packets" },
326 { "rx_2048_to_4095_octet_packets" },
327 { "rx_4096_to_8191_octet_packets" },
328 { "rx_8192_to_9022_octet_packets" },
335 { "tx_flow_control" },
337 { "tx_single_collisions" },
338 { "tx_mult_collisions" },
340 { "tx_excessive_collisions" },
341 { "tx_late_collisions" },
342 { "tx_collide_2times" },
343 { "tx_collide_3times" },
344 { "tx_collide_4times" },
345 { "tx_collide_5times" },
346 { "tx_collide_6times" },
347 { "tx_collide_7times" },
348 { "tx_collide_8times" },
349 { "tx_collide_9times" },
350 { "tx_collide_10times" },
351 { "tx_collide_11times" },
352 { "tx_collide_12times" },
353 { "tx_collide_13times" },
354 { "tx_collide_14times" },
355 { "tx_collide_15times" },
356 { "tx_ucast_packets" },
357 { "tx_mcast_packets" },
358 { "tx_bcast_packets" },
359 { "tx_carrier_sense_errors" },
363 { "dma_writeq_full" },
364 { "dma_write_prioq_full" },
368 { "rx_threshold_hit" },
370 { "dma_readq_full" },
371 { "dma_read_prioq_full" },
372 { "tx_comp_queue_full" },
374 { "ring_set_send_prod_index" },
375 { "ring_status_update" },
377 { "nic_avoided_irqs" },
378 { "nic_tx_threshold_hit" },
380 { "mbuf_lwm_thresh_hit" },
383 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
386 static const struct {
387 const char string[ETH_GSTRING_LEN];
388 } ethtool_test_keys[] = {
389 { "nvram test (online) " },
390 { "link test (online) " },
391 { "register test (offline)" },
392 { "memory test (offline)" },
393 { "loopback test (offline)" },
394 { "interrupt test (offline)" },
397 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
400 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
402 writel(val, tp->regs + off);
405 static u32 tg3_read32(struct tg3 *tp, u32 off)
407 return readl(tp->regs + off);
410 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
412 writel(val, tp->aperegs + off);
415 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
417 return readl(tp->aperegs + off);
420 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
424 spin_lock_irqsave(&tp->indirect_lock, flags);
425 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
426 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
427 spin_unlock_irqrestore(&tp->indirect_lock, flags);
430 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
432 writel(val, tp->regs + off);
433 readl(tp->regs + off);
436 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
441 spin_lock_irqsave(&tp->indirect_lock, flags);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
443 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
444 spin_unlock_irqrestore(&tp->indirect_lock, flags);
448 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
452 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
453 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
454 TG3_64BIT_REG_LOW, val);
457 if (off == TG3_RX_STD_PROD_IDX_REG) {
458 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
459 TG3_64BIT_REG_LOW, val);
463 spin_lock_irqsave(&tp->indirect_lock, flags);
464 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
465 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
466 spin_unlock_irqrestore(&tp->indirect_lock, flags);
468 /* In indirect mode when disabling interrupts, we also need
469 * to clear the interrupt bit in the GRC local ctrl register.
471 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
473 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
474 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
478 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
483 spin_lock_irqsave(&tp->indirect_lock, flags);
484 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
485 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
490 /* usec_wait specifies the wait time in usec when writing to certain registers
491 * where it is unsafe to read back the register without some delay.
492 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
493 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
495 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
497 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
498 /* Non-posted methods */
499 tp->write32(tp, off, val);
502 tg3_write32(tp, off, val);
507 /* Wait again after the read for the posted method to guarantee that
508 * the wait time is met.
514 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
516 tp->write32_mbox(tp, off, val);
517 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
518 tp->read32_mbox(tp, off);
521 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
523 void __iomem *mbox = tp->regs + off;
525 if (tg3_flag(tp, TXD_MBOX_HWBUG))
527 if (tg3_flag(tp, MBOX_WRITE_REORDER))
531 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
533 return readl(tp->regs + off + GRCMBOX_BASE);
536 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
538 writel(val, tp->regs + off + GRCMBOX_BASE);
541 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
542 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
543 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
544 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
545 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
547 #define tw32(reg, val) tp->write32(tp, reg, val)
548 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
549 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
550 #define tr32(reg) tp->read32(tp, reg)
552 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
556 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
557 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
560 spin_lock_irqsave(&tp->indirect_lock, flags);
561 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
562 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
563 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
565 /* Always leave this as zero. */
566 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
568 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
569 tw32_f(TG3PCI_MEM_WIN_DATA, val);
571 /* Always leave this as zero. */
572 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
574 spin_unlock_irqrestore(&tp->indirect_lock, flags);
577 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
581 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
582 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587 spin_lock_irqsave(&tp->indirect_lock, flags);
588 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
589 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
590 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
592 /* Always leave this as zero. */
593 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
595 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
596 *val = tr32(TG3PCI_MEM_WIN_DATA);
598 /* Always leave this as zero. */
599 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
601 spin_unlock_irqrestore(&tp->indirect_lock, flags);
604 static void tg3_ape_lock_init(struct tg3 *tp)
609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
610 regbase = TG3_APE_LOCK_GRANT;
612 regbase = TG3_APE_PER_LOCK_GRANT;
614 /* Make sure the driver hasn't any stale locks. */
615 for (i = 0; i < 8; i++)
616 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
619 static int tg3_ape_lock(struct tg3 *tp, int locknum)
623 u32 status, req, gnt;
625 if (!tg3_flag(tp, ENABLE_APE))
629 case TG3_APE_LOCK_GRC:
630 case TG3_APE_LOCK_MEM:
636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
637 req = TG3_APE_LOCK_REQ;
638 gnt = TG3_APE_LOCK_GRANT;
640 req = TG3_APE_PER_LOCK_REQ;
641 gnt = TG3_APE_PER_LOCK_GRANT;
646 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
648 /* Wait for up to 1 millisecond to acquire lock. */
649 for (i = 0; i < 100; i++) {
650 status = tg3_ape_read32(tp, gnt + off);
651 if (status == APE_LOCK_GRANT_DRIVER)
656 if (status != APE_LOCK_GRANT_DRIVER) {
657 /* Revoke the lock request. */
658 tg3_ape_write32(tp, gnt + off,
659 APE_LOCK_GRANT_DRIVER);
667 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
671 if (!tg3_flag(tp, ENABLE_APE))
675 case TG3_APE_LOCK_GRC:
676 case TG3_APE_LOCK_MEM:
682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
683 gnt = TG3_APE_LOCK_GRANT;
685 gnt = TG3_APE_PER_LOCK_GRANT;
687 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
690 static void tg3_disable_ints(struct tg3 *tp)
694 tw32(TG3PCI_MISC_HOST_CTRL,
695 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
696 for (i = 0; i < tp->irq_max; i++)
697 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
700 static void tg3_enable_ints(struct tg3 *tp)
707 tw32(TG3PCI_MISC_HOST_CTRL,
708 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
710 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
711 for (i = 0; i < tp->irq_cnt; i++) {
712 struct tg3_napi *tnapi = &tp->napi[i];
714 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
715 if (tg3_flag(tp, 1SHOT_MSI))
716 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
718 tp->coal_now |= tnapi->coal_now;
721 /* Force an initial interrupt */
722 if (!tg3_flag(tp, TAGGED_STATUS) &&
723 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
724 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
726 tw32(HOSTCC_MODE, tp->coal_now);
728 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
731 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
733 struct tg3 *tp = tnapi->tp;
734 struct tg3_hw_status *sblk = tnapi->hw_status;
735 unsigned int work_exists = 0;
737 /* check for phy events */
738 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
739 if (sblk->status & SD_STATUS_LINK_CHG)
742 /* check for RX/TX work to do */
743 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
744 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
751 * similar to tg3_enable_ints, but it accurately determines whether there
752 * is new work pending and can return without flushing the PIO write
753 * which reenables interrupts
755 static void tg3_int_reenable(struct tg3_napi *tnapi)
757 struct tg3 *tp = tnapi->tp;
759 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
762 /* When doing tagged status, this work check is unnecessary.
763 * The last_tag we write above tells the chip which piece of
764 * work we've completed.
766 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
767 tw32(HOSTCC_MODE, tp->coalesce_mode |
768 HOSTCC_MODE_ENABLE | tnapi->coal_now);
771 static void tg3_switch_clocks(struct tg3 *tp)
776 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
779 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
781 orig_clock_ctrl = clock_ctrl;
782 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
783 CLOCK_CTRL_CLKRUN_OENABLE |
785 tp->pci_clock_ctrl = clock_ctrl;
787 if (tg3_flag(tp, 5705_PLUS)) {
788 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
789 tw32_wait_f(TG3PCI_CLOCK_CTRL,
790 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
792 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
793 tw32_wait_f(TG3PCI_CLOCK_CTRL,
795 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
797 tw32_wait_f(TG3PCI_CLOCK_CTRL,
798 clock_ctrl | (CLOCK_CTRL_ALTCLK),
801 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
804 #define PHY_BUSY_LOOPS 5000
806 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
812 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
814 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
820 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
821 MI_COM_PHY_ADDR_MASK);
822 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
823 MI_COM_REG_ADDR_MASK);
824 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
826 tw32_f(MAC_MI_COM, frame_val);
828 loops = PHY_BUSY_LOOPS;
831 frame_val = tr32(MAC_MI_COM);
833 if ((frame_val & MI_COM_BUSY) == 0) {
835 frame_val = tr32(MAC_MI_COM);
843 *val = frame_val & MI_COM_DATA_MASK;
847 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
848 tw32_f(MAC_MI_MODE, tp->mi_mode);
855 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
861 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
862 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
865 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
867 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
871 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
872 MI_COM_PHY_ADDR_MASK);
873 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
874 MI_COM_REG_ADDR_MASK);
875 frame_val |= (val & MI_COM_DATA_MASK);
876 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
878 tw32_f(MAC_MI_COM, frame_val);
880 loops = PHY_BUSY_LOOPS;
883 frame_val = tr32(MAC_MI_COM);
884 if ((frame_val & MI_COM_BUSY) == 0) {
886 frame_val = tr32(MAC_MI_COM);
896 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
897 tw32_f(MAC_MI_MODE, tp->mi_mode);
904 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
908 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
912 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
916 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
917 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
921 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
927 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
931 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
935 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
939 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
940 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
944 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
950 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
954 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
956 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
961 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
965 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
967 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
972 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
976 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
977 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
978 MII_TG3_AUXCTL_SHDWSEL_MISC);
980 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
985 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
987 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
988 set |= MII_TG3_AUXCTL_MISC_WREN;
990 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
993 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
994 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
995 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
996 MII_TG3_AUXCTL_ACTL_TX_6DB)
998 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
999 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1000 MII_TG3_AUXCTL_ACTL_TX_6DB);
1002 static int tg3_bmcr_reset(struct tg3 *tp)
1007 /* OK, reset it, and poll the BMCR_RESET bit until it
1008 * clears or we time out.
1010 phy_control = BMCR_RESET;
1011 err = tg3_writephy(tp, MII_BMCR, phy_control);
1017 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1021 if ((phy_control & BMCR_RESET) == 0) {
1033 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1035 struct tg3 *tp = bp->priv;
1038 spin_lock_bh(&tp->lock);
1040 if (tg3_readphy(tp, reg, &val))
1043 spin_unlock_bh(&tp->lock);
1048 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1050 struct tg3 *tp = bp->priv;
1053 spin_lock_bh(&tp->lock);
1055 if (tg3_writephy(tp, reg, val))
1058 spin_unlock_bh(&tp->lock);
1063 static int tg3_mdio_reset(struct mii_bus *bp)
1068 static void tg3_mdio_config_5785(struct tg3 *tp)
1071 struct phy_device *phydev;
1073 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1074 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1075 case PHY_ID_BCM50610:
1076 case PHY_ID_BCM50610M:
1077 val = MAC_PHYCFG2_50610_LED_MODES;
1079 case PHY_ID_BCMAC131:
1080 val = MAC_PHYCFG2_AC131_LED_MODES;
1082 case PHY_ID_RTL8211C:
1083 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1085 case PHY_ID_RTL8201E:
1086 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1092 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1093 tw32(MAC_PHYCFG2, val);
1095 val = tr32(MAC_PHYCFG1);
1096 val &= ~(MAC_PHYCFG1_RGMII_INT |
1097 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1098 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1099 tw32(MAC_PHYCFG1, val);
1104 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1105 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1106 MAC_PHYCFG2_FMODE_MASK_MASK |
1107 MAC_PHYCFG2_GMODE_MASK_MASK |
1108 MAC_PHYCFG2_ACT_MASK_MASK |
1109 MAC_PHYCFG2_QUAL_MASK_MASK |
1110 MAC_PHYCFG2_INBAND_ENABLE;
1112 tw32(MAC_PHYCFG2, val);
1114 val = tr32(MAC_PHYCFG1);
1115 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1116 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1117 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1118 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1119 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1120 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1121 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1123 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1124 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1125 tw32(MAC_PHYCFG1, val);
1127 val = tr32(MAC_EXT_RGMII_MODE);
1128 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1129 MAC_RGMII_MODE_RX_QUALITY |
1130 MAC_RGMII_MODE_RX_ACTIVITY |
1131 MAC_RGMII_MODE_RX_ENG_DET |
1132 MAC_RGMII_MODE_TX_ENABLE |
1133 MAC_RGMII_MODE_TX_LOWPWR |
1134 MAC_RGMII_MODE_TX_RESET);
1135 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1136 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1137 val |= MAC_RGMII_MODE_RX_INT_B |
1138 MAC_RGMII_MODE_RX_QUALITY |
1139 MAC_RGMII_MODE_RX_ACTIVITY |
1140 MAC_RGMII_MODE_RX_ENG_DET;
1141 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1142 val |= MAC_RGMII_MODE_TX_ENABLE |
1143 MAC_RGMII_MODE_TX_LOWPWR |
1144 MAC_RGMII_MODE_TX_RESET;
1146 tw32(MAC_EXT_RGMII_MODE, val);
1149 static void tg3_mdio_start(struct tg3 *tp)
1151 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1152 tw32_f(MAC_MI_MODE, tp->mi_mode);
1155 if (tg3_flag(tp, MDIOBUS_INITED) &&
1156 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1157 tg3_mdio_config_5785(tp);
1160 static int tg3_mdio_init(struct tg3 *tp)
1164 struct phy_device *phydev;
1166 if (tg3_flag(tp, 5717_PLUS)) {
1169 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1171 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1172 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1174 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1175 TG3_CPMU_PHY_STRAP_IS_SERDES;
1179 tp->phy_addr = TG3_PHY_MII_ADDR;
1183 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1186 tp->mdio_bus = mdiobus_alloc();
1187 if (tp->mdio_bus == NULL)
1190 tp->mdio_bus->name = "tg3 mdio bus";
1191 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1192 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1193 tp->mdio_bus->priv = tp;
1194 tp->mdio_bus->parent = &tp->pdev->dev;
1195 tp->mdio_bus->read = &tg3_mdio_read;
1196 tp->mdio_bus->write = &tg3_mdio_write;
1197 tp->mdio_bus->reset = &tg3_mdio_reset;
1198 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1199 tp->mdio_bus->irq = &tp->mdio_irq[0];
1201 for (i = 0; i < PHY_MAX_ADDR; i++)
1202 tp->mdio_bus->irq[i] = PHY_POLL;
1204 /* The bus registration will look for all the PHYs on the mdio bus.
1205 * Unfortunately, it does not ensure the PHY is powered up before
1206 * accessing the PHY ID registers. A chip reset is the
1207 * quickest way to bring the device back to an operational state..
1209 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1212 i = mdiobus_register(tp->mdio_bus);
1214 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1215 mdiobus_free(tp->mdio_bus);
1219 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1221 if (!phydev || !phydev->drv) {
1222 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1223 mdiobus_unregister(tp->mdio_bus);
1224 mdiobus_free(tp->mdio_bus);
1228 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1229 case PHY_ID_BCM57780:
1230 phydev->interface = PHY_INTERFACE_MODE_GMII;
1231 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1233 case PHY_ID_BCM50610:
1234 case PHY_ID_BCM50610M:
1235 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1236 PHY_BRCM_RX_REFCLK_UNUSED |
1237 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1238 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1239 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1240 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1241 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1242 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1243 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1244 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1246 case PHY_ID_RTL8211C:
1247 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1249 case PHY_ID_RTL8201E:
1250 case PHY_ID_BCMAC131:
1251 phydev->interface = PHY_INTERFACE_MODE_MII;
1252 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1253 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1257 tg3_flag_set(tp, MDIOBUS_INITED);
1259 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1260 tg3_mdio_config_5785(tp);
1265 static void tg3_mdio_fini(struct tg3 *tp)
1267 if (tg3_flag(tp, MDIOBUS_INITED)) {
1268 tg3_flag_clear(tp, MDIOBUS_INITED);
1269 mdiobus_unregister(tp->mdio_bus);
1270 mdiobus_free(tp->mdio_bus);
1274 /* tp->lock is held. */
1275 static inline void tg3_generate_fw_event(struct tg3 *tp)
1279 val = tr32(GRC_RX_CPU_EVENT);
1280 val |= GRC_RX_CPU_DRIVER_EVENT;
1281 tw32_f(GRC_RX_CPU_EVENT, val);
1283 tp->last_event_jiffies = jiffies;
1286 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1288 /* tp->lock is held. */
1289 static void tg3_wait_for_event_ack(struct tg3 *tp)
1292 unsigned int delay_cnt;
1295 /* If enough time has passed, no wait is necessary. */
1296 time_remain = (long)(tp->last_event_jiffies + 1 +
1297 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1299 if (time_remain < 0)
1302 /* Check if we can shorten the wait time. */
1303 delay_cnt = jiffies_to_usecs(time_remain);
1304 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1305 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1306 delay_cnt = (delay_cnt >> 3) + 1;
1308 for (i = 0; i < delay_cnt; i++) {
1309 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1315 /* tp->lock is held. */
1316 static void tg3_ump_link_report(struct tg3 *tp)
1321 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1324 tg3_wait_for_event_ack(tp);
1326 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1328 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1331 if (!tg3_readphy(tp, MII_BMCR, ®))
1333 if (!tg3_readphy(tp, MII_BMSR, ®))
1334 val |= (reg & 0xffff);
1335 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1338 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1340 if (!tg3_readphy(tp, MII_LPA, ®))
1341 val |= (reg & 0xffff);
1342 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1345 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1346 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1348 if (!tg3_readphy(tp, MII_STAT1000, ®))
1349 val |= (reg & 0xffff);
1351 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1353 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1357 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1359 tg3_generate_fw_event(tp);
1362 static void tg3_link_report(struct tg3 *tp)
1364 if (!netif_carrier_ok(tp->dev)) {
1365 netif_info(tp, link, tp->dev, "Link is down\n");
1366 tg3_ump_link_report(tp);
1367 } else if (netif_msg_link(tp)) {
1368 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1369 (tp->link_config.active_speed == SPEED_1000 ?
1371 (tp->link_config.active_speed == SPEED_100 ?
1373 (tp->link_config.active_duplex == DUPLEX_FULL ?
1376 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1377 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1379 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1382 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1383 netdev_info(tp->dev, "EEE is %s\n",
1384 tp->setlpicnt ? "enabled" : "disabled");
1386 tg3_ump_link_report(tp);
1390 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1394 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1395 miireg = ADVERTISE_PAUSE_CAP;
1396 else if (flow_ctrl & FLOW_CTRL_TX)
1397 miireg = ADVERTISE_PAUSE_ASYM;
1398 else if (flow_ctrl & FLOW_CTRL_RX)
1399 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1406 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1410 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1411 miireg = ADVERTISE_1000XPAUSE;
1412 else if (flow_ctrl & FLOW_CTRL_TX)
1413 miireg = ADVERTISE_1000XPSE_ASYM;
1414 else if (flow_ctrl & FLOW_CTRL_RX)
1415 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1422 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1426 if (lcladv & ADVERTISE_1000XPAUSE) {
1427 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1428 if (rmtadv & LPA_1000XPAUSE)
1429 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1430 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1433 if (rmtadv & LPA_1000XPAUSE)
1434 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1436 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1437 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1444 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1448 u32 old_rx_mode = tp->rx_mode;
1449 u32 old_tx_mode = tp->tx_mode;
1451 if (tg3_flag(tp, USE_PHYLIB))
1452 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1454 autoneg = tp->link_config.autoneg;
1456 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1457 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1458 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1460 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1462 flowctrl = tp->link_config.flowctrl;
1464 tp->link_config.active_flowctrl = flowctrl;
1466 if (flowctrl & FLOW_CTRL_RX)
1467 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1469 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1471 if (old_rx_mode != tp->rx_mode)
1472 tw32_f(MAC_RX_MODE, tp->rx_mode);
1474 if (flowctrl & FLOW_CTRL_TX)
1475 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1477 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1479 if (old_tx_mode != tp->tx_mode)
1480 tw32_f(MAC_TX_MODE, tp->tx_mode);
1483 static void tg3_adjust_link(struct net_device *dev)
1485 u8 oldflowctrl, linkmesg = 0;
1486 u32 mac_mode, lcl_adv, rmt_adv;
1487 struct tg3 *tp = netdev_priv(dev);
1488 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1490 spin_lock_bh(&tp->lock);
1492 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1493 MAC_MODE_HALF_DUPLEX);
1495 oldflowctrl = tp->link_config.active_flowctrl;
1501 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1502 mac_mode |= MAC_MODE_PORT_MODE_MII;
1503 else if (phydev->speed == SPEED_1000 ||
1504 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1505 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1507 mac_mode |= MAC_MODE_PORT_MODE_MII;
1509 if (phydev->duplex == DUPLEX_HALF)
1510 mac_mode |= MAC_MODE_HALF_DUPLEX;
1512 lcl_adv = tg3_advert_flowctrl_1000T(
1513 tp->link_config.flowctrl);
1516 rmt_adv = LPA_PAUSE_CAP;
1517 if (phydev->asym_pause)
1518 rmt_adv |= LPA_PAUSE_ASYM;
1521 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1523 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1525 if (mac_mode != tp->mac_mode) {
1526 tp->mac_mode = mac_mode;
1527 tw32_f(MAC_MODE, tp->mac_mode);
1531 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1532 if (phydev->speed == SPEED_10)
1534 MAC_MI_STAT_10MBPS_MODE |
1535 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1537 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1540 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1541 tw32(MAC_TX_LENGTHS,
1542 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1543 (6 << TX_LENGTHS_IPG_SHIFT) |
1544 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1546 tw32(MAC_TX_LENGTHS,
1547 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1548 (6 << TX_LENGTHS_IPG_SHIFT) |
1549 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1551 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1552 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1553 phydev->speed != tp->link_config.active_speed ||
1554 phydev->duplex != tp->link_config.active_duplex ||
1555 oldflowctrl != tp->link_config.active_flowctrl)
1558 tp->link_config.active_speed = phydev->speed;
1559 tp->link_config.active_duplex = phydev->duplex;
1561 spin_unlock_bh(&tp->lock);
1564 tg3_link_report(tp);
1567 static int tg3_phy_init(struct tg3 *tp)
1569 struct phy_device *phydev;
1571 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1574 /* Bring the PHY back to a known state. */
1577 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1579 /* Attach the MAC to the PHY. */
1580 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1581 phydev->dev_flags, phydev->interface);
1582 if (IS_ERR(phydev)) {
1583 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1584 return PTR_ERR(phydev);
1587 /* Mask with MAC supported features. */
1588 switch (phydev->interface) {
1589 case PHY_INTERFACE_MODE_GMII:
1590 case PHY_INTERFACE_MODE_RGMII:
1591 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1592 phydev->supported &= (PHY_GBIT_FEATURES |
1594 SUPPORTED_Asym_Pause);
1598 case PHY_INTERFACE_MODE_MII:
1599 phydev->supported &= (PHY_BASIC_FEATURES |
1601 SUPPORTED_Asym_Pause);
1604 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1608 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1610 phydev->advertising = phydev->supported;
1615 static void tg3_phy_start(struct tg3 *tp)
1617 struct phy_device *phydev;
1619 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1622 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1624 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1625 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1626 phydev->speed = tp->link_config.orig_speed;
1627 phydev->duplex = tp->link_config.orig_duplex;
1628 phydev->autoneg = tp->link_config.orig_autoneg;
1629 phydev->advertising = tp->link_config.orig_advertising;
1634 phy_start_aneg(phydev);
1637 static void tg3_phy_stop(struct tg3 *tp)
1639 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1642 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1645 static void tg3_phy_fini(struct tg3 *tp)
1647 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1648 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1649 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1653 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1657 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1660 tg3_writephy(tp, MII_TG3_FET_TEST,
1661 phytest | MII_TG3_FET_SHADOW_EN);
1662 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1664 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1666 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1667 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1669 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1673 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1677 if (!tg3_flag(tp, 5705_PLUS) ||
1678 (tg3_flag(tp, 5717_PLUS) &&
1679 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1682 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1683 tg3_phy_fet_toggle_apd(tp, enable);
1687 reg = MII_TG3_MISC_SHDW_WREN |
1688 MII_TG3_MISC_SHDW_SCR5_SEL |
1689 MII_TG3_MISC_SHDW_SCR5_LPED |
1690 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1691 MII_TG3_MISC_SHDW_SCR5_SDTL |
1692 MII_TG3_MISC_SHDW_SCR5_C125OE;
1693 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1694 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1696 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1699 reg = MII_TG3_MISC_SHDW_WREN |
1700 MII_TG3_MISC_SHDW_APD_SEL |
1701 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1703 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1705 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1708 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1712 if (!tg3_flag(tp, 5705_PLUS) ||
1713 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1716 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1719 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1720 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1722 tg3_writephy(tp, MII_TG3_FET_TEST,
1723 ephy | MII_TG3_FET_SHADOW_EN);
1724 if (!tg3_readphy(tp, reg, &phy)) {
1726 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1728 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1729 tg3_writephy(tp, reg, phy);
1731 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1736 ret = tg3_phy_auxctl_read(tp,
1737 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1740 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1742 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1743 tg3_phy_auxctl_write(tp,
1744 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1749 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1754 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1757 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1759 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1760 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1763 static void tg3_phy_apply_otp(struct tg3 *tp)
1772 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1775 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1776 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1777 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1779 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1780 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1781 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1783 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1784 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1785 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1787 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1788 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1790 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1791 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1793 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1794 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1795 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1797 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1800 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1804 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1809 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1810 current_link_up == 1 &&
1811 tp->link_config.active_duplex == DUPLEX_FULL &&
1812 (tp->link_config.active_speed == SPEED_100 ||
1813 tp->link_config.active_speed == SPEED_1000)) {
1816 if (tp->link_config.active_speed == SPEED_1000)
1817 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1819 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1821 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1823 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1824 TG3_CL45_D7_EEERES_STAT, &val);
1826 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1827 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1831 if (!tp->setlpicnt) {
1832 val = tr32(TG3_CPMU_EEE_MODE);
1833 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1837 static void tg3_phy_eee_enable(struct tg3 *tp)
1841 if (tp->link_config.active_speed == SPEED_1000 &&
1842 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1845 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1846 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1847 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1850 val = tr32(TG3_CPMU_EEE_MODE);
1851 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1854 static int tg3_wait_macro_done(struct tg3 *tp)
1861 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1862 if ((tmp32 & 0x1000) == 0)
1872 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1874 static const u32 test_pat[4][6] = {
1875 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1876 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1877 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1878 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1882 for (chan = 0; chan < 4; chan++) {
1885 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1886 (chan * 0x2000) | 0x0200);
1887 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1889 for (i = 0; i < 6; i++)
1890 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1893 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1894 if (tg3_wait_macro_done(tp)) {
1899 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1900 (chan * 0x2000) | 0x0200);
1901 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1902 if (tg3_wait_macro_done(tp)) {
1907 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1908 if (tg3_wait_macro_done(tp)) {
1913 for (i = 0; i < 6; i += 2) {
1916 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1917 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1918 tg3_wait_macro_done(tp)) {
1924 if (low != test_pat[chan][i] ||
1925 high != test_pat[chan][i+1]) {
1926 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1927 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1928 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1938 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1942 for (chan = 0; chan < 4; chan++) {
1945 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1946 (chan * 0x2000) | 0x0200);
1947 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1948 for (i = 0; i < 6; i++)
1949 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1950 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1951 if (tg3_wait_macro_done(tp))
1958 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1960 u32 reg32, phy9_orig;
1961 int retries, do_phy_reset, err;
1967 err = tg3_bmcr_reset(tp);
1973 /* Disable transmitter and interrupt. */
1974 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1978 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1980 /* Set full-duplex, 1000 mbps. */
1981 tg3_writephy(tp, MII_BMCR,
1982 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1984 /* Set to master mode. */
1985 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1988 tg3_writephy(tp, MII_TG3_CTRL,
1989 (MII_TG3_CTRL_AS_MASTER |
1990 MII_TG3_CTRL_ENABLE_AS_MASTER));
1992 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1996 /* Block the PHY control access. */
1997 tg3_phydsp_write(tp, 0x8005, 0x0800);
1999 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2002 } while (--retries);
2004 err = tg3_phy_reset_chanpat(tp);
2008 tg3_phydsp_write(tp, 0x8005, 0x0000);
2010 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2011 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2013 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2015 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2017 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2019 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2026 /* This will reset the tigon3 PHY if there is no valid
2027 * link unless the FORCE argument is non-zero.
2029 static int tg3_phy_reset(struct tg3 *tp)
2034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2035 val = tr32(GRC_MISC_CFG);
2036 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2039 err = tg3_readphy(tp, MII_BMSR, &val);
2040 err |= tg3_readphy(tp, MII_BMSR, &val);
2044 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2045 netif_carrier_off(tp->dev);
2046 tg3_link_report(tp);
2049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2052 err = tg3_phy_reset_5703_4_5(tp);
2059 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2060 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2061 cpmuctrl = tr32(TG3_CPMU_CTRL);
2062 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2064 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2067 err = tg3_bmcr_reset(tp);
2071 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2072 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2073 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2075 tw32(TG3_CPMU_CTRL, cpmuctrl);
2078 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2079 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2080 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2081 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2082 CPMU_LSPD_1000MB_MACCLK_12_5) {
2083 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2085 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2089 if (tg3_flag(tp, 5717_PLUS) &&
2090 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2093 tg3_phy_apply_otp(tp);
2095 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2096 tg3_phy_toggle_apd(tp, true);
2098 tg3_phy_toggle_apd(tp, false);
2101 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2102 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2103 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2104 tg3_phydsp_write(tp, 0x000a, 0x0323);
2105 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2108 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2109 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2110 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2113 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2114 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2115 tg3_phydsp_write(tp, 0x000a, 0x310b);
2116 tg3_phydsp_write(tp, 0x201f, 0x9506);
2117 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2118 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2120 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2121 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2122 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2123 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2124 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2125 tg3_writephy(tp, MII_TG3_TEST1,
2126 MII_TG3_TEST1_TRIM_EN | 0x4);
2128 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2130 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2134 /* Set Extended packet length bit (bit 14) on all chips that */
2135 /* support jumbo frames */
2136 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2137 /* Cannot do read-modify-write on 5401 */
2138 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2139 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2140 /* Set bit 14 with read-modify-write to preserve other bits */
2141 err = tg3_phy_auxctl_read(tp,
2142 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2144 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2145 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2148 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2149 * jumbo frames transmission.
2151 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2152 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2153 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2154 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2158 /* adjust output voltage */
2159 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2162 tg3_phy_toggle_automdix(tp, 1);
2163 tg3_phy_set_wirespeed(tp);
2167 static void tg3_frob_aux_power(struct tg3 *tp)
2169 bool need_vaux = false;
2171 /* The GPIOs do something completely different on 57765. */
2172 if (!tg3_flag(tp, IS_NIC) ||
2173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2177 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2178 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2181 tp->pdev_peer != tp->pdev) {
2182 struct net_device *dev_peer;
2184 dev_peer = pci_get_drvdata(tp->pdev_peer);
2186 /* remove_one() may have been run on the peer. */
2188 struct tg3 *tp_peer = netdev_priv(dev_peer);
2190 if (tg3_flag(tp_peer, INIT_COMPLETE))
2193 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2194 tg3_flag(tp_peer, ENABLE_ASF))
2199 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2204 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2205 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2206 (GRC_LCLCTRL_GPIO_OE0 |
2207 GRC_LCLCTRL_GPIO_OE1 |
2208 GRC_LCLCTRL_GPIO_OE2 |
2209 GRC_LCLCTRL_GPIO_OUTPUT0 |
2210 GRC_LCLCTRL_GPIO_OUTPUT1),
2212 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2213 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2214 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2215 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2216 GRC_LCLCTRL_GPIO_OE1 |
2217 GRC_LCLCTRL_GPIO_OE2 |
2218 GRC_LCLCTRL_GPIO_OUTPUT0 |
2219 GRC_LCLCTRL_GPIO_OUTPUT1 |
2221 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2223 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2224 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2226 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2227 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2230 u32 grc_local_ctrl = 0;
2232 /* Workaround to prevent overdrawing Amps. */
2233 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2235 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2236 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2237 grc_local_ctrl, 100);
2240 /* On 5753 and variants, GPIO2 cannot be used. */
2241 no_gpio2 = tp->nic_sram_data_cfg &
2242 NIC_SRAM_DATA_CFG_NO_GPIO2;
2244 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2245 GRC_LCLCTRL_GPIO_OE1 |
2246 GRC_LCLCTRL_GPIO_OE2 |
2247 GRC_LCLCTRL_GPIO_OUTPUT1 |
2248 GRC_LCLCTRL_GPIO_OUTPUT2;
2250 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2251 GRC_LCLCTRL_GPIO_OUTPUT2);
2253 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2254 grc_local_ctrl, 100);
2256 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2258 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2259 grc_local_ctrl, 100);
2262 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2263 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2264 grc_local_ctrl, 100);
2268 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2269 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2270 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2271 (GRC_LCLCTRL_GPIO_OE1 |
2272 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2274 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2275 GRC_LCLCTRL_GPIO_OE1, 100);
2277 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2278 (GRC_LCLCTRL_GPIO_OE1 |
2279 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2284 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2286 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2288 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2289 if (speed != SPEED_10)
2291 } else if (speed == SPEED_10)
2297 static int tg3_setup_phy(struct tg3 *, int);
2299 #define RESET_KIND_SHUTDOWN 0
2300 #define RESET_KIND_INIT 1
2301 #define RESET_KIND_SUSPEND 2
2303 static void tg3_write_sig_post_reset(struct tg3 *, int);
2304 static int tg3_halt_cpu(struct tg3 *, u32);
2306 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2310 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2312 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2313 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2316 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2317 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2318 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2323 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2325 val = tr32(GRC_MISC_CFG);
2326 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2329 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2331 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2334 tg3_writephy(tp, MII_ADVERTISE, 0);
2335 tg3_writephy(tp, MII_BMCR,
2336 BMCR_ANENABLE | BMCR_ANRESTART);
2338 tg3_writephy(tp, MII_TG3_FET_TEST,
2339 phytest | MII_TG3_FET_SHADOW_EN);
2340 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2341 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2343 MII_TG3_FET_SHDW_AUXMODE4,
2346 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2349 } else if (do_low_power) {
2350 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2351 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2353 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2354 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2355 MII_TG3_AUXCTL_PCTL_VREG_11V;
2356 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2359 /* The PHY should not be powered down on some chips because
2362 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2363 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2364 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2365 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2368 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2369 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2370 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2371 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2372 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2373 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2376 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2379 /* tp->lock is held. */
2380 static int tg3_nvram_lock(struct tg3 *tp)
2382 if (tg3_flag(tp, NVRAM)) {
2385 if (tp->nvram_lock_cnt == 0) {
2386 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2387 for (i = 0; i < 8000; i++) {
2388 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2393 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2397 tp->nvram_lock_cnt++;
2402 /* tp->lock is held. */
2403 static void tg3_nvram_unlock(struct tg3 *tp)
2405 if (tg3_flag(tp, NVRAM)) {
2406 if (tp->nvram_lock_cnt > 0)
2407 tp->nvram_lock_cnt--;
2408 if (tp->nvram_lock_cnt == 0)
2409 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2413 /* tp->lock is held. */
2414 static void tg3_enable_nvram_access(struct tg3 *tp)
2416 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2417 u32 nvaccess = tr32(NVRAM_ACCESS);
2419 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2423 /* tp->lock is held. */
2424 static void tg3_disable_nvram_access(struct tg3 *tp)
2426 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2427 u32 nvaccess = tr32(NVRAM_ACCESS);
2429 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2433 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2434 u32 offset, u32 *val)
2439 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2442 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2443 EEPROM_ADDR_DEVID_MASK |
2445 tw32(GRC_EEPROM_ADDR,
2447 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2448 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2449 EEPROM_ADDR_ADDR_MASK) |
2450 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2452 for (i = 0; i < 1000; i++) {
2453 tmp = tr32(GRC_EEPROM_ADDR);
2455 if (tmp & EEPROM_ADDR_COMPLETE)
2459 if (!(tmp & EEPROM_ADDR_COMPLETE))
2462 tmp = tr32(GRC_EEPROM_DATA);
2465 * The data will always be opposite the native endian
2466 * format. Perform a blind byteswap to compensate.
2473 #define NVRAM_CMD_TIMEOUT 10000
2475 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2479 tw32(NVRAM_CMD, nvram_cmd);
2480 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2482 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2488 if (i == NVRAM_CMD_TIMEOUT)
2494 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2496 if (tg3_flag(tp, NVRAM) &&
2497 tg3_flag(tp, NVRAM_BUFFERED) &&
2498 tg3_flag(tp, FLASH) &&
2499 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2500 (tp->nvram_jedecnum == JEDEC_ATMEL))
2502 addr = ((addr / tp->nvram_pagesize) <<
2503 ATMEL_AT45DB0X1B_PAGE_POS) +
2504 (addr % tp->nvram_pagesize);
2509 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2511 if (tg3_flag(tp, NVRAM) &&
2512 tg3_flag(tp, NVRAM_BUFFERED) &&
2513 tg3_flag(tp, FLASH) &&
2514 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2515 (tp->nvram_jedecnum == JEDEC_ATMEL))
2517 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2518 tp->nvram_pagesize) +
2519 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2524 /* NOTE: Data read in from NVRAM is byteswapped according to
2525 * the byteswapping settings for all other register accesses.
2526 * tg3 devices are BE devices, so on a BE machine, the data
2527 * returned will be exactly as it is seen in NVRAM. On a LE
2528 * machine, the 32-bit value will be byteswapped.
2530 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2534 if (!tg3_flag(tp, NVRAM))
2535 return tg3_nvram_read_using_eeprom(tp, offset, val);
2537 offset = tg3_nvram_phys_addr(tp, offset);
2539 if (offset > NVRAM_ADDR_MSK)
2542 ret = tg3_nvram_lock(tp);
2546 tg3_enable_nvram_access(tp);
2548 tw32(NVRAM_ADDR, offset);
2549 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2550 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2553 *val = tr32(NVRAM_RDDATA);
2555 tg3_disable_nvram_access(tp);
2557 tg3_nvram_unlock(tp);
2562 /* Ensures NVRAM data is in bytestream format. */
2563 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2566 int res = tg3_nvram_read(tp, offset, &v);
2568 *val = cpu_to_be32(v);
2572 /* tp->lock is held. */
2573 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2575 u32 addr_high, addr_low;
2578 addr_high = ((tp->dev->dev_addr[0] << 8) |
2579 tp->dev->dev_addr[1]);
2580 addr_low = ((tp->dev->dev_addr[2] << 24) |
2581 (tp->dev->dev_addr[3] << 16) |
2582 (tp->dev->dev_addr[4] << 8) |
2583 (tp->dev->dev_addr[5] << 0));
2584 for (i = 0; i < 4; i++) {
2585 if (i == 1 && skip_mac_1)
2587 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2588 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2591 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2592 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2593 for (i = 0; i < 12; i++) {
2594 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2595 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2599 addr_high = (tp->dev->dev_addr[0] +
2600 tp->dev->dev_addr[1] +
2601 tp->dev->dev_addr[2] +
2602 tp->dev->dev_addr[3] +
2603 tp->dev->dev_addr[4] +
2604 tp->dev->dev_addr[5]) &
2605 TX_BACKOFF_SEED_MASK;
2606 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2609 static void tg3_enable_register_access(struct tg3 *tp)
2612 * Make sure register accesses (indirect or otherwise) will function
2615 pci_write_config_dword(tp->pdev,
2616 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2619 static int tg3_power_up(struct tg3 *tp)
2621 tg3_enable_register_access(tp);
2623 pci_set_power_state(tp->pdev, PCI_D0);
2625 /* Switch out of Vaux if it is a NIC */
2626 if (tg3_flag(tp, IS_NIC))
2627 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2632 static int tg3_power_down_prepare(struct tg3 *tp)
2635 bool device_should_wake, do_low_power;
2637 tg3_enable_register_access(tp);
2639 /* Restore the CLKREQ setting. */
2640 if (tg3_flag(tp, CLKREQ_BUG)) {
2643 pci_read_config_word(tp->pdev,
2644 tp->pcie_cap + PCI_EXP_LNKCTL,
2646 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2647 pci_write_config_word(tp->pdev,
2648 tp->pcie_cap + PCI_EXP_LNKCTL,
2652 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2653 tw32(TG3PCI_MISC_HOST_CTRL,
2654 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2656 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2657 tg3_flag(tp, WOL_ENABLE);
2659 if (tg3_flag(tp, USE_PHYLIB)) {
2660 do_low_power = false;
2661 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2662 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2663 struct phy_device *phydev;
2664 u32 phyid, advertising;
2666 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2668 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2670 tp->link_config.orig_speed = phydev->speed;
2671 tp->link_config.orig_duplex = phydev->duplex;
2672 tp->link_config.orig_autoneg = phydev->autoneg;
2673 tp->link_config.orig_advertising = phydev->advertising;
2675 advertising = ADVERTISED_TP |
2677 ADVERTISED_Autoneg |
2678 ADVERTISED_10baseT_Half;
2680 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2681 if (tg3_flag(tp, WOL_SPEED_100MB))
2683 ADVERTISED_100baseT_Half |
2684 ADVERTISED_100baseT_Full |
2685 ADVERTISED_10baseT_Full;
2687 advertising |= ADVERTISED_10baseT_Full;
2690 phydev->advertising = advertising;
2692 phy_start_aneg(phydev);
2694 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2695 if (phyid != PHY_ID_BCMAC131) {
2696 phyid &= PHY_BCM_OUI_MASK;
2697 if (phyid == PHY_BCM_OUI_1 ||
2698 phyid == PHY_BCM_OUI_2 ||
2699 phyid == PHY_BCM_OUI_3)
2700 do_low_power = true;
2704 do_low_power = true;
2706 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2707 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2708 tp->link_config.orig_speed = tp->link_config.speed;
2709 tp->link_config.orig_duplex = tp->link_config.duplex;
2710 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2713 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2714 tp->link_config.speed = SPEED_10;
2715 tp->link_config.duplex = DUPLEX_HALF;
2716 tp->link_config.autoneg = AUTONEG_ENABLE;
2717 tg3_setup_phy(tp, 0);
2721 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2724 val = tr32(GRC_VCPU_EXT_CTRL);
2725 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2726 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2730 for (i = 0; i < 200; i++) {
2731 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2732 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2737 if (tg3_flag(tp, WOL_CAP))
2738 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2739 WOL_DRV_STATE_SHUTDOWN |
2743 if (device_should_wake) {
2746 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2748 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2749 tg3_phy_auxctl_write(tp,
2750 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2751 MII_TG3_AUXCTL_PCTL_WOL_EN |
2752 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2753 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2757 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2758 mac_mode = MAC_MODE_PORT_MODE_GMII;
2760 mac_mode = MAC_MODE_PORT_MODE_MII;
2762 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2763 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2765 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2766 SPEED_100 : SPEED_10;
2767 if (tg3_5700_link_polarity(tp, speed))
2768 mac_mode |= MAC_MODE_LINK_POLARITY;
2770 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2773 mac_mode = MAC_MODE_PORT_MODE_TBI;
2776 if (!tg3_flag(tp, 5750_PLUS))
2777 tw32(MAC_LED_CTRL, tp->led_ctrl);
2779 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2780 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2781 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2782 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2784 if (tg3_flag(tp, ENABLE_APE))
2785 mac_mode |= MAC_MODE_APE_TX_EN |
2786 MAC_MODE_APE_RX_EN |
2787 MAC_MODE_TDE_ENABLE;
2789 tw32_f(MAC_MODE, mac_mode);
2792 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2796 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2797 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2798 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2801 base_val = tp->pci_clock_ctrl;
2802 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2803 CLOCK_CTRL_TXCLK_DISABLE);
2805 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2806 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2807 } else if (tg3_flag(tp, 5780_CLASS) ||
2808 tg3_flag(tp, CPMU_PRESENT) ||
2809 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2811 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2812 u32 newbits1, newbits2;
2814 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2816 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2817 CLOCK_CTRL_TXCLK_DISABLE |
2819 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2820 } else if (tg3_flag(tp, 5705_PLUS)) {
2821 newbits1 = CLOCK_CTRL_625_CORE;
2822 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2824 newbits1 = CLOCK_CTRL_ALTCLK;
2825 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2828 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2831 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2834 if (!tg3_flag(tp, 5705_PLUS)) {
2837 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2838 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2839 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2840 CLOCK_CTRL_TXCLK_DISABLE |
2841 CLOCK_CTRL_44MHZ_CORE);
2843 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2846 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2847 tp->pci_clock_ctrl | newbits3, 40);
2851 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2852 tg3_power_down_phy(tp, do_low_power);
2854 tg3_frob_aux_power(tp);
2856 /* Workaround for unstable PLL clock */
2857 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2858 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2859 u32 val = tr32(0x7d00);
2861 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2863 if (!tg3_flag(tp, ENABLE_ASF)) {
2866 err = tg3_nvram_lock(tp);
2867 tg3_halt_cpu(tp, RX_CPU_BASE);
2869 tg3_nvram_unlock(tp);
2873 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2878 static void tg3_power_down(struct tg3 *tp)
2880 tg3_power_down_prepare(tp);
2882 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2883 pci_set_power_state(tp->pdev, PCI_D3hot);
2886 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2888 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2889 case MII_TG3_AUX_STAT_10HALF:
2891 *duplex = DUPLEX_HALF;
2894 case MII_TG3_AUX_STAT_10FULL:
2896 *duplex = DUPLEX_FULL;
2899 case MII_TG3_AUX_STAT_100HALF:
2901 *duplex = DUPLEX_HALF;
2904 case MII_TG3_AUX_STAT_100FULL:
2906 *duplex = DUPLEX_FULL;
2909 case MII_TG3_AUX_STAT_1000HALF:
2910 *speed = SPEED_1000;
2911 *duplex = DUPLEX_HALF;
2914 case MII_TG3_AUX_STAT_1000FULL:
2915 *speed = SPEED_1000;
2916 *duplex = DUPLEX_FULL;
2920 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2921 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2923 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2927 *speed = SPEED_INVALID;
2928 *duplex = DUPLEX_INVALID;
2933 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2938 new_adv = ADVERTISE_CSMA;
2939 if (advertise & ADVERTISED_10baseT_Half)
2940 new_adv |= ADVERTISE_10HALF;
2941 if (advertise & ADVERTISED_10baseT_Full)
2942 new_adv |= ADVERTISE_10FULL;
2943 if (advertise & ADVERTISED_100baseT_Half)
2944 new_adv |= ADVERTISE_100HALF;
2945 if (advertise & ADVERTISED_100baseT_Full)
2946 new_adv |= ADVERTISE_100FULL;
2948 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2950 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2954 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2958 if (advertise & ADVERTISED_1000baseT_Half)
2959 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2960 if (advertise & ADVERTISED_1000baseT_Full)
2961 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2963 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2964 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2965 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2966 MII_TG3_CTRL_ENABLE_AS_MASTER);
2968 err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2972 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2975 tw32(TG3_CPMU_EEE_MODE,
2976 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2978 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2982 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2984 case ASIC_REV_57765:
2985 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2986 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2987 MII_TG3_DSP_CH34TP2_HIBW01);
2990 val = MII_TG3_DSP_TAP26_ALNOKO |
2991 MII_TG3_DSP_TAP26_RMRXSTO |
2992 MII_TG3_DSP_TAP26_OPCSINPT;
2993 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2997 /* Advertise 100-BaseTX EEE ability */
2998 if (advertise & ADVERTISED_100baseT_Full)
2999 val |= MDIO_AN_EEE_ADV_100TX;
3000 /* Advertise 1000-BaseT EEE ability */
3001 if (advertise & ADVERTISED_1000baseT_Full)
3002 val |= MDIO_AN_EEE_ADV_1000T;
3003 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3005 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3014 static void tg3_phy_copper_begin(struct tg3 *tp)
3019 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3020 new_adv = ADVERTISED_10baseT_Half |
3021 ADVERTISED_10baseT_Full;
3022 if (tg3_flag(tp, WOL_SPEED_100MB))
3023 new_adv |= ADVERTISED_100baseT_Half |
3024 ADVERTISED_100baseT_Full;
3026 tg3_phy_autoneg_cfg(tp, new_adv,
3027 FLOW_CTRL_TX | FLOW_CTRL_RX);
3028 } else if (tp->link_config.speed == SPEED_INVALID) {
3029 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3030 tp->link_config.advertising &=
3031 ~(ADVERTISED_1000baseT_Half |
3032 ADVERTISED_1000baseT_Full);
3034 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3035 tp->link_config.flowctrl);
3037 /* Asking for a specific link mode. */
3038 if (tp->link_config.speed == SPEED_1000) {
3039 if (tp->link_config.duplex == DUPLEX_FULL)
3040 new_adv = ADVERTISED_1000baseT_Full;
3042 new_adv = ADVERTISED_1000baseT_Half;
3043 } else if (tp->link_config.speed == SPEED_100) {
3044 if (tp->link_config.duplex == DUPLEX_FULL)
3045 new_adv = ADVERTISED_100baseT_Full;
3047 new_adv = ADVERTISED_100baseT_Half;
3049 if (tp->link_config.duplex == DUPLEX_FULL)
3050 new_adv = ADVERTISED_10baseT_Full;
3052 new_adv = ADVERTISED_10baseT_Half;
3055 tg3_phy_autoneg_cfg(tp, new_adv,
3056 tp->link_config.flowctrl);
3059 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3060 tp->link_config.speed != SPEED_INVALID) {
3061 u32 bmcr, orig_bmcr;
3063 tp->link_config.active_speed = tp->link_config.speed;
3064 tp->link_config.active_duplex = tp->link_config.duplex;
3067 switch (tp->link_config.speed) {
3073 bmcr |= BMCR_SPEED100;
3077 bmcr |= TG3_BMCR_SPEED1000;
3081 if (tp->link_config.duplex == DUPLEX_FULL)
3082 bmcr |= BMCR_FULLDPLX;
3084 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3085 (bmcr != orig_bmcr)) {
3086 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3087 for (i = 0; i < 1500; i++) {
3091 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3092 tg3_readphy(tp, MII_BMSR, &tmp))
3094 if (!(tmp & BMSR_LSTATUS)) {
3099 tg3_writephy(tp, MII_BMCR, bmcr);
3103 tg3_writephy(tp, MII_BMCR,
3104 BMCR_ANENABLE | BMCR_ANRESTART);
3108 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3112 /* Turn off tap power management. */
3113 /* Set Extended packet length bit */
3114 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3116 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3117 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3118 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3119 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3120 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3127 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3129 u32 adv_reg, all_mask = 0;
3131 if (mask & ADVERTISED_10baseT_Half)
3132 all_mask |= ADVERTISE_10HALF;
3133 if (mask & ADVERTISED_10baseT_Full)
3134 all_mask |= ADVERTISE_10FULL;
3135 if (mask & ADVERTISED_100baseT_Half)
3136 all_mask |= ADVERTISE_100HALF;
3137 if (mask & ADVERTISED_100baseT_Full)
3138 all_mask |= ADVERTISE_100FULL;
3140 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3143 if ((adv_reg & all_mask) != all_mask)
3145 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3149 if (mask & ADVERTISED_1000baseT_Half)
3150 all_mask |= ADVERTISE_1000HALF;
3151 if (mask & ADVERTISED_1000baseT_Full)
3152 all_mask |= ADVERTISE_1000FULL;
3154 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3157 if ((tg3_ctrl & all_mask) != all_mask)
3163 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3167 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3170 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3171 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3173 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3174 if (curadv != reqadv)
3177 if (tg3_flag(tp, PAUSE_AUTONEG))
3178 tg3_readphy(tp, MII_LPA, rmtadv);
3180 /* Reprogram the advertisement register, even if it
3181 * does not affect the current link. If the link
3182 * gets renegotiated in the future, we can save an
3183 * additional renegotiation cycle by advertising
3184 * it correctly in the first place.
3186 if (curadv != reqadv) {
3187 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3188 ADVERTISE_PAUSE_ASYM);
3189 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3196 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3198 int current_link_up;
3200 u32 lcl_adv, rmt_adv;
3208 (MAC_STATUS_SYNC_CHANGED |
3209 MAC_STATUS_CFG_CHANGED |
3210 MAC_STATUS_MI_COMPLETION |
3211 MAC_STATUS_LNKSTATE_CHANGED));
3214 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3216 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3220 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3222 /* Some third-party PHYs need to be reset on link going
3225 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3228 netif_carrier_ok(tp->dev)) {
3229 tg3_readphy(tp, MII_BMSR, &bmsr);
3230 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3231 !(bmsr & BMSR_LSTATUS))
3237 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3238 tg3_readphy(tp, MII_BMSR, &bmsr);
3239 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3240 !tg3_flag(tp, INIT_COMPLETE))
3243 if (!(bmsr & BMSR_LSTATUS)) {
3244 err = tg3_init_5401phy_dsp(tp);
3248 tg3_readphy(tp, MII_BMSR, &bmsr);
3249 for (i = 0; i < 1000; i++) {
3251 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3252 (bmsr & BMSR_LSTATUS)) {
3258 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3259 TG3_PHY_REV_BCM5401_B0 &&
3260 !(bmsr & BMSR_LSTATUS) &&
3261 tp->link_config.active_speed == SPEED_1000) {
3262 err = tg3_phy_reset(tp);
3264 err = tg3_init_5401phy_dsp(tp);
3269 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3270 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3271 /* 5701 {A0,B0} CRC bug workaround */
3272 tg3_writephy(tp, 0x15, 0x0a75);
3273 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3274 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3275 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3278 /* Clear pending interrupts... */
3279 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3280 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3282 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3283 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3284 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3285 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3287 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3289 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3290 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3291 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3293 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3296 current_link_up = 0;
3297 current_speed = SPEED_INVALID;
3298 current_duplex = DUPLEX_INVALID;
3300 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3301 err = tg3_phy_auxctl_read(tp,
3302 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3304 if (!err && !(val & (1 << 10))) {
3305 tg3_phy_auxctl_write(tp,
3306 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3313 for (i = 0; i < 100; i++) {
3314 tg3_readphy(tp, MII_BMSR, &bmsr);
3315 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3316 (bmsr & BMSR_LSTATUS))
3321 if (bmsr & BMSR_LSTATUS) {
3324 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3325 for (i = 0; i < 2000; i++) {
3327 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3332 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3337 for (i = 0; i < 200; i++) {
3338 tg3_readphy(tp, MII_BMCR, &bmcr);
3339 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3341 if (bmcr && bmcr != 0x7fff)
3349 tp->link_config.active_speed = current_speed;
3350 tp->link_config.active_duplex = current_duplex;
3352 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3353 if ((bmcr & BMCR_ANENABLE) &&
3354 tg3_copper_is_advertising_all(tp,
3355 tp->link_config.advertising)) {
3356 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3358 current_link_up = 1;
3361 if (!(bmcr & BMCR_ANENABLE) &&
3362 tp->link_config.speed == current_speed &&
3363 tp->link_config.duplex == current_duplex &&
3364 tp->link_config.flowctrl ==
3365 tp->link_config.active_flowctrl) {
3366 current_link_up = 1;
3370 if (current_link_up == 1 &&
3371 tp->link_config.active_duplex == DUPLEX_FULL)
3372 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3376 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3377 tg3_phy_copper_begin(tp);
3379 tg3_readphy(tp, MII_BMSR, &bmsr);
3380 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3381 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3382 current_link_up = 1;
3385 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3386 if (current_link_up == 1) {
3387 if (tp->link_config.active_speed == SPEED_100 ||
3388 tp->link_config.active_speed == SPEED_10)
3389 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3391 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3392 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3393 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3395 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3397 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3398 if (tp->link_config.active_duplex == DUPLEX_HALF)
3399 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3401 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3402 if (current_link_up == 1 &&
3403 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3404 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3406 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3409 /* ??? Without this setting Netgear GA302T PHY does not
3410 * ??? send/receive packets...
3412 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3413 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3414 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3415 tw32_f(MAC_MI_MODE, tp->mi_mode);
3419 tw32_f(MAC_MODE, tp->mac_mode);
3422 tg3_phy_eee_adjust(tp, current_link_up);
3424 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3425 /* Polled via timer. */
3426 tw32_f(MAC_EVENT, 0);
3428 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3433 current_link_up == 1 &&
3434 tp->link_config.active_speed == SPEED_1000 &&
3435 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3438 (MAC_STATUS_SYNC_CHANGED |
3439 MAC_STATUS_CFG_CHANGED));
3442 NIC_SRAM_FIRMWARE_MBOX,
3443 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3446 /* Prevent send BD corruption. */
3447 if (tg3_flag(tp, CLKREQ_BUG)) {
3448 u16 oldlnkctl, newlnkctl;
3450 pci_read_config_word(tp->pdev,
3451 tp->pcie_cap + PCI_EXP_LNKCTL,
3453 if (tp->link_config.active_speed == SPEED_100 ||
3454 tp->link_config.active_speed == SPEED_10)
3455 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3457 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3458 if (newlnkctl != oldlnkctl)
3459 pci_write_config_word(tp->pdev,
3460 tp->pcie_cap + PCI_EXP_LNKCTL,
3464 if (current_link_up != netif_carrier_ok(tp->dev)) {
3465 if (current_link_up)
3466 netif_carrier_on(tp->dev);
3468 netif_carrier_off(tp->dev);
3469 tg3_link_report(tp);
3475 struct tg3_fiber_aneginfo {
3477 #define ANEG_STATE_UNKNOWN 0
3478 #define ANEG_STATE_AN_ENABLE 1
3479 #define ANEG_STATE_RESTART_INIT 2
3480 #define ANEG_STATE_RESTART 3
3481 #define ANEG_STATE_DISABLE_LINK_OK 4
3482 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3483 #define ANEG_STATE_ABILITY_DETECT 6
3484 #define ANEG_STATE_ACK_DETECT_INIT 7
3485 #define ANEG_STATE_ACK_DETECT 8
3486 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3487 #define ANEG_STATE_COMPLETE_ACK 10
3488 #define ANEG_STATE_IDLE_DETECT_INIT 11
3489 #define ANEG_STATE_IDLE_DETECT 12
3490 #define ANEG_STATE_LINK_OK 13
3491 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3492 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3495 #define MR_AN_ENABLE 0x00000001
3496 #define MR_RESTART_AN 0x00000002
3497 #define MR_AN_COMPLETE 0x00000004
3498 #define MR_PAGE_RX 0x00000008
3499 #define MR_NP_LOADED 0x00000010
3500 #define MR_TOGGLE_TX 0x00000020
3501 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3502 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3503 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3504 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3505 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3506 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3507 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3508 #define MR_TOGGLE_RX 0x00002000
3509 #define MR_NP_RX 0x00004000
3511 #define MR_LINK_OK 0x80000000
3513 unsigned long link_time, cur_time;
3515 u32 ability_match_cfg;
3516 int ability_match_count;
3518 char ability_match, idle_match, ack_match;
3520 u32 txconfig, rxconfig;
3521 #define ANEG_CFG_NP 0x00000080
3522 #define ANEG_CFG_ACK 0x00000040
3523 #define ANEG_CFG_RF2 0x00000020
3524 #define ANEG_CFG_RF1 0x00000010
3525 #define ANEG_CFG_PS2 0x00000001
3526 #define ANEG_CFG_PS1 0x00008000
3527 #define ANEG_CFG_HD 0x00004000
3528 #define ANEG_CFG_FD 0x00002000
3529 #define ANEG_CFG_INVAL 0x00001f06
3534 #define ANEG_TIMER_ENAB 2
3535 #define ANEG_FAILED -1
3537 #define ANEG_STATE_SETTLE_TIME 10000
3539 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3540 struct tg3_fiber_aneginfo *ap)
3543 unsigned long delta;
3547 if (ap->state == ANEG_STATE_UNKNOWN) {
3551 ap->ability_match_cfg = 0;
3552 ap->ability_match_count = 0;
3553 ap->ability_match = 0;
3559 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3560 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3562 if (rx_cfg_reg != ap->ability_match_cfg) {
3563 ap->ability_match_cfg = rx_cfg_reg;
3564 ap->ability_match = 0;
3565 ap->ability_match_count = 0;
3567 if (++ap->ability_match_count > 1) {
3568 ap->ability_match = 1;
3569 ap->ability_match_cfg = rx_cfg_reg;
3572 if (rx_cfg_reg & ANEG_CFG_ACK)
3580 ap->ability_match_cfg = 0;
3581 ap->ability_match_count = 0;
3582 ap->ability_match = 0;
3588 ap->rxconfig = rx_cfg_reg;
3591 switch (ap->state) {
3592 case ANEG_STATE_UNKNOWN:
3593 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3594 ap->state = ANEG_STATE_AN_ENABLE;
3597 case ANEG_STATE_AN_ENABLE:
3598 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3599 if (ap->flags & MR_AN_ENABLE) {
3602 ap->ability_match_cfg = 0;
3603 ap->ability_match_count = 0;
3604 ap->ability_match = 0;
3608 ap->state = ANEG_STATE_RESTART_INIT;
3610 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3614 case ANEG_STATE_RESTART_INIT:
3615 ap->link_time = ap->cur_time;
3616 ap->flags &= ~(MR_NP_LOADED);
3618 tw32(MAC_TX_AUTO_NEG, 0);
3619 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3620 tw32_f(MAC_MODE, tp->mac_mode);
3623 ret = ANEG_TIMER_ENAB;
3624 ap->state = ANEG_STATE_RESTART;
3627 case ANEG_STATE_RESTART:
3628 delta = ap->cur_time - ap->link_time;
3629 if (delta > ANEG_STATE_SETTLE_TIME)
3630 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3632 ret = ANEG_TIMER_ENAB;
3635 case ANEG_STATE_DISABLE_LINK_OK:
3639 case ANEG_STATE_ABILITY_DETECT_INIT:
3640 ap->flags &= ~(MR_TOGGLE_TX);
3641 ap->txconfig = ANEG_CFG_FD;
3642 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3643 if (flowctrl & ADVERTISE_1000XPAUSE)
3644 ap->txconfig |= ANEG_CFG_PS1;
3645 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3646 ap->txconfig |= ANEG_CFG_PS2;
3647 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3648 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3649 tw32_f(MAC_MODE, tp->mac_mode);
3652 ap->state = ANEG_STATE_ABILITY_DETECT;
3655 case ANEG_STATE_ABILITY_DETECT:
3656 if (ap->ability_match != 0 && ap->rxconfig != 0)
3657 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3660 case ANEG_STATE_ACK_DETECT_INIT:
3661 ap->txconfig |= ANEG_CFG_ACK;
3662 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3663 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3664 tw32_f(MAC_MODE, tp->mac_mode);
3667 ap->state = ANEG_STATE_ACK_DETECT;
3670 case ANEG_STATE_ACK_DETECT:
3671 if (ap->ack_match != 0) {
3672 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3673 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3674 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3676 ap->state = ANEG_STATE_AN_ENABLE;
3678 } else if (ap->ability_match != 0 &&
3679 ap->rxconfig == 0) {
3680 ap->state = ANEG_STATE_AN_ENABLE;
3684 case ANEG_STATE_COMPLETE_ACK_INIT:
3685 if (ap->rxconfig & ANEG_CFG_INVAL) {
3689 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3690 MR_LP_ADV_HALF_DUPLEX |
3691 MR_LP_ADV_SYM_PAUSE |
3692 MR_LP_ADV_ASYM_PAUSE |
3693 MR_LP_ADV_REMOTE_FAULT1 |
3694 MR_LP_ADV_REMOTE_FAULT2 |
3695 MR_LP_ADV_NEXT_PAGE |
3698 if (ap->rxconfig & ANEG_CFG_FD)
3699 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3700 if (ap->rxconfig & ANEG_CFG_HD)
3701 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3702 if (ap->rxconfig & ANEG_CFG_PS1)
3703 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3704 if (ap->rxconfig & ANEG_CFG_PS2)
3705 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3706 if (ap->rxconfig & ANEG_CFG_RF1)
3707 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3708 if (ap->rxconfig & ANEG_CFG_RF2)
3709 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3710 if (ap->rxconfig & ANEG_CFG_NP)
3711 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3713 ap->link_time = ap->cur_time;
3715 ap->flags ^= (MR_TOGGLE_TX);
3716 if (ap->rxconfig & 0x0008)
3717 ap->flags |= MR_TOGGLE_RX;
3718 if (ap->rxconfig & ANEG_CFG_NP)
3719 ap->flags |= MR_NP_RX;
3720 ap->flags |= MR_PAGE_RX;
3722 ap->state = ANEG_STATE_COMPLETE_ACK;
3723 ret = ANEG_TIMER_ENAB;
3726 case ANEG_STATE_COMPLETE_ACK:
3727 if (ap->ability_match != 0 &&
3728 ap->rxconfig == 0) {
3729 ap->state = ANEG_STATE_AN_ENABLE;
3732 delta = ap->cur_time - ap->link_time;
3733 if (delta > ANEG_STATE_SETTLE_TIME) {
3734 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3735 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3737 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3738 !(ap->flags & MR_NP_RX)) {
3739 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3747 case ANEG_STATE_IDLE_DETECT_INIT:
3748 ap->link_time = ap->cur_time;
3749 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3750 tw32_f(MAC_MODE, tp->mac_mode);
3753 ap->state = ANEG_STATE_IDLE_DETECT;
3754 ret = ANEG_TIMER_ENAB;
3757 case ANEG_STATE_IDLE_DETECT:
3758 if (ap->ability_match != 0 &&
3759 ap->rxconfig == 0) {
3760 ap->state = ANEG_STATE_AN_ENABLE;
3763 delta = ap->cur_time - ap->link_time;
3764 if (delta > ANEG_STATE_SETTLE_TIME) {
3765 /* XXX another gem from the Broadcom driver :( */
3766 ap->state = ANEG_STATE_LINK_OK;
3770 case ANEG_STATE_LINK_OK:
3771 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3775 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3776 /* ??? unimplemented */
3779 case ANEG_STATE_NEXT_PAGE_WAIT:
3780 /* ??? unimplemented */
3791 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3794 struct tg3_fiber_aneginfo aninfo;
3795 int status = ANEG_FAILED;
3799 tw32_f(MAC_TX_AUTO_NEG, 0);
3801 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3802 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3805 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3808 memset(&aninfo, 0, sizeof(aninfo));
3809 aninfo.flags |= MR_AN_ENABLE;
3810 aninfo.state = ANEG_STATE_UNKNOWN;
3811 aninfo.cur_time = 0;
3813 while (++tick < 195000) {
3814 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3815 if (status == ANEG_DONE || status == ANEG_FAILED)
3821 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3822 tw32_f(MAC_MODE, tp->mac_mode);
3825 *txflags = aninfo.txconfig;
3826 *rxflags = aninfo.flags;
3828 if (status == ANEG_DONE &&
3829 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3830 MR_LP_ADV_FULL_DUPLEX)))
3836 static void tg3_init_bcm8002(struct tg3 *tp)
3838 u32 mac_status = tr32(MAC_STATUS);
3841 /* Reset when initting first time or we have a link. */
3842 if (tg3_flag(tp, INIT_COMPLETE) &&
3843 !(mac_status & MAC_STATUS_PCS_SYNCED))
3846 /* Set PLL lock range. */
3847 tg3_writephy(tp, 0x16, 0x8007);
3850 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3852 /* Wait for reset to complete. */
3853 /* XXX schedule_timeout() ... */
3854 for (i = 0; i < 500; i++)
3857 /* Config mode; select PMA/Ch 1 regs. */
3858 tg3_writephy(tp, 0x10, 0x8411);
3860 /* Enable auto-lock and comdet, select txclk for tx. */
3861 tg3_writephy(tp, 0x11, 0x0a10);
3863 tg3_writephy(tp, 0x18, 0x00a0);
3864 tg3_writephy(tp, 0x16, 0x41ff);
3866 /* Assert and deassert POR. */
3867 tg3_writephy(tp, 0x13, 0x0400);
3869 tg3_writephy(tp, 0x13, 0x0000);
3871 tg3_writephy(tp, 0x11, 0x0a50);
3873 tg3_writephy(tp, 0x11, 0x0a10);
3875 /* Wait for signal to stabilize */
3876 /* XXX schedule_timeout() ... */
3877 for (i = 0; i < 15000; i++)
3880 /* Deselect the channel register so we can read the PHYID
3883 tg3_writephy(tp, 0x10, 0x8011);
3886 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3889 u32 sg_dig_ctrl, sg_dig_status;
3890 u32 serdes_cfg, expected_sg_dig_ctrl;
3891 int workaround, port_a;
3892 int current_link_up;
3895 expected_sg_dig_ctrl = 0;
3898 current_link_up = 0;
3900 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3901 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3903 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3906 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3907 /* preserve bits 20-23 for voltage regulator */
3908 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3911 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3913 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3914 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3916 u32 val = serdes_cfg;
3922 tw32_f(MAC_SERDES_CFG, val);
3925 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3927 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3928 tg3_setup_flow_control(tp, 0, 0);
3929 current_link_up = 1;
3934 /* Want auto-negotiation. */
3935 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3937 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3938 if (flowctrl & ADVERTISE_1000XPAUSE)
3939 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3940 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3941 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3943 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3944 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3945 tp->serdes_counter &&
3946 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3947 MAC_STATUS_RCVD_CFG)) ==
3948 MAC_STATUS_PCS_SYNCED)) {
3949 tp->serdes_counter--;
3950 current_link_up = 1;
3955 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3956 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3958 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3960 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3961 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3962 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3963 MAC_STATUS_SIGNAL_DET)) {
3964 sg_dig_status = tr32(SG_DIG_STATUS);
3965 mac_status = tr32(MAC_STATUS);
3967 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3968 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3969 u32 local_adv = 0, remote_adv = 0;
3971 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3972 local_adv |= ADVERTISE_1000XPAUSE;
3973 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3974 local_adv |= ADVERTISE_1000XPSE_ASYM;
3976 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3977 remote_adv |= LPA_1000XPAUSE;
3978 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3979 remote_adv |= LPA_1000XPAUSE_ASYM;
3981 tg3_setup_flow_control(tp, local_adv, remote_adv);
3982 current_link_up = 1;
3983 tp->serdes_counter = 0;
3984 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3985 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3986 if (tp->serdes_counter)
3987 tp->serdes_counter--;
3990 u32 val = serdes_cfg;
3997 tw32_f(MAC_SERDES_CFG, val);
4000 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4003 /* Link parallel detection - link is up */
4004 /* only if we have PCS_SYNC and not */
4005 /* receiving config code words */
4006 mac_status = tr32(MAC_STATUS);
4007 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4008 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4009 tg3_setup_flow_control(tp, 0, 0);
4010 current_link_up = 1;
4012 TG3_PHYFLG_PARALLEL_DETECT;
4013 tp->serdes_counter =
4014 SERDES_PARALLEL_DET_TIMEOUT;
4016 goto restart_autoneg;
4020 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4021 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4025 return current_link_up;
4028 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4030 int current_link_up = 0;
4032 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4035 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4036 u32 txflags, rxflags;
4039 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4040 u32 local_adv = 0, remote_adv = 0;
4042 if (txflags & ANEG_CFG_PS1)
4043 local_adv |= ADVERTISE_1000XPAUSE;
4044 if (txflags & ANEG_CFG_PS2)
4045 local_adv |= ADVERTISE_1000XPSE_ASYM;
4047 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4048 remote_adv |= LPA_1000XPAUSE;
4049 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4050 remote_adv |= LPA_1000XPAUSE_ASYM;
4052 tg3_setup_flow_control(tp, local_adv, remote_adv);
4054 current_link_up = 1;
4056 for (i = 0; i < 30; i++) {
4059 (MAC_STATUS_SYNC_CHANGED |
4060 MAC_STATUS_CFG_CHANGED));
4062 if ((tr32(MAC_STATUS) &
4063 (MAC_STATUS_SYNC_CHANGED |
4064 MAC_STATUS_CFG_CHANGED)) == 0)
4068 mac_status = tr32(MAC_STATUS);
4069 if (current_link_up == 0 &&
4070 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4071 !(mac_status & MAC_STATUS_RCVD_CFG))
4072 current_link_up = 1;
4074 tg3_setup_flow_control(tp, 0, 0);
4076 /* Forcing 1000FD link up. */
4077 current_link_up = 1;
4079 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4082 tw32_f(MAC_MODE, tp->mac_mode);
4087 return current_link_up;
4090 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4093 u16 orig_active_speed;
4094 u8 orig_active_duplex;
4096 int current_link_up;
4099 orig_pause_cfg = tp->link_config.active_flowctrl;
4100 orig_active_speed = tp->link_config.active_speed;
4101 orig_active_duplex = tp->link_config.active_duplex;
4103 if (!tg3_flag(tp, HW_AUTONEG) &&
4104 netif_carrier_ok(tp->dev) &&
4105 tg3_flag(tp, INIT_COMPLETE)) {
4106 mac_status = tr32(MAC_STATUS);
4107 mac_status &= (MAC_STATUS_PCS_SYNCED |
4108 MAC_STATUS_SIGNAL_DET |
4109 MAC_STATUS_CFG_CHANGED |
4110 MAC_STATUS_RCVD_CFG);
4111 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4112 MAC_STATUS_SIGNAL_DET)) {
4113 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4114 MAC_STATUS_CFG_CHANGED));
4119 tw32_f(MAC_TX_AUTO_NEG, 0);
4121 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4122 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4123 tw32_f(MAC_MODE, tp->mac_mode);
4126 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4127 tg3_init_bcm8002(tp);
4129 /* Enable link change event even when serdes polling. */
4130 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4133 current_link_up = 0;
4134 mac_status = tr32(MAC_STATUS);
4136 if (tg3_flag(tp, HW_AUTONEG))
4137 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4139 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4141 tp->napi[0].hw_status->status =
4142 (SD_STATUS_UPDATED |
4143 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4145 for (i = 0; i < 100; i++) {
4146 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4147 MAC_STATUS_CFG_CHANGED));
4149 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4150 MAC_STATUS_CFG_CHANGED |
4151 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4155 mac_status = tr32(MAC_STATUS);
4156 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4157 current_link_up = 0;
4158 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4159 tp->serdes_counter == 0) {
4160 tw32_f(MAC_MODE, (tp->mac_mode |
4161 MAC_MODE_SEND_CONFIGS));
4163 tw32_f(MAC_MODE, tp->mac_mode);
4167 if (current_link_up == 1) {
4168 tp->link_config.active_speed = SPEED_1000;
4169 tp->link_config.active_duplex = DUPLEX_FULL;
4170 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4171 LED_CTRL_LNKLED_OVERRIDE |
4172 LED_CTRL_1000MBPS_ON));
4174 tp->link_config.active_speed = SPEED_INVALID;
4175 tp->link_config.active_duplex = DUPLEX_INVALID;
4176 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4177 LED_CTRL_LNKLED_OVERRIDE |
4178 LED_CTRL_TRAFFIC_OVERRIDE));
4181 if (current_link_up != netif_carrier_ok(tp->dev)) {
4182 if (current_link_up)
4183 netif_carrier_on(tp->dev);
4185 netif_carrier_off(tp->dev);
4186 tg3_link_report(tp);
4188 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4189 if (orig_pause_cfg != now_pause_cfg ||
4190 orig_active_speed != tp->link_config.active_speed ||
4191 orig_active_duplex != tp->link_config.active_duplex)
4192 tg3_link_report(tp);
4198 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4200 int current_link_up, err = 0;
4204 u32 local_adv, remote_adv;
4206 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4207 tw32_f(MAC_MODE, tp->mac_mode);
4213 (MAC_STATUS_SYNC_CHANGED |
4214 MAC_STATUS_CFG_CHANGED |
4215 MAC_STATUS_MI_COMPLETION |
4216 MAC_STATUS_LNKSTATE_CHANGED));
4222 current_link_up = 0;
4223 current_speed = SPEED_INVALID;
4224 current_duplex = DUPLEX_INVALID;
4226 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4227 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4228 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4229 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4230 bmsr |= BMSR_LSTATUS;
4232 bmsr &= ~BMSR_LSTATUS;
4235 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4237 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4238 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4239 /* do nothing, just check for link up at the end */
4240 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4243 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4244 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4245 ADVERTISE_1000XPAUSE |
4246 ADVERTISE_1000XPSE_ASYM |
4249 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4251 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4252 new_adv |= ADVERTISE_1000XHALF;
4253 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4254 new_adv |= ADVERTISE_1000XFULL;
4256 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4257 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4258 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4259 tg3_writephy(tp, MII_BMCR, bmcr);
4261 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4262 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4263 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4270 bmcr &= ~BMCR_SPEED1000;
4271 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4273 if (tp->link_config.duplex == DUPLEX_FULL)
4274 new_bmcr |= BMCR_FULLDPLX;
4276 if (new_bmcr != bmcr) {
4277 /* BMCR_SPEED1000 is a reserved bit that needs
4278 * to be set on write.
4280 new_bmcr |= BMCR_SPEED1000;
4282 /* Force a linkdown */
4283 if (netif_carrier_ok(tp->dev)) {
4286 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4287 adv &= ~(ADVERTISE_1000XFULL |
4288 ADVERTISE_1000XHALF |
4290 tg3_writephy(tp, MII_ADVERTISE, adv);
4291 tg3_writephy(tp, MII_BMCR, bmcr |
4295 netif_carrier_off(tp->dev);
4297 tg3_writephy(tp, MII_BMCR, new_bmcr);
4299 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4300 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4301 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4303 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4304 bmsr |= BMSR_LSTATUS;
4306 bmsr &= ~BMSR_LSTATUS;
4308 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4312 if (bmsr & BMSR_LSTATUS) {
4313 current_speed = SPEED_1000;
4314 current_link_up = 1;
4315 if (bmcr & BMCR_FULLDPLX)
4316 current_duplex = DUPLEX_FULL;
4318 current_duplex = DUPLEX_HALF;
4323 if (bmcr & BMCR_ANENABLE) {
4326 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4327 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4328 common = local_adv & remote_adv;
4329 if (common & (ADVERTISE_1000XHALF |
4330 ADVERTISE_1000XFULL)) {
4331 if (common & ADVERTISE_1000XFULL)
4332 current_duplex = DUPLEX_FULL;
4334 current_duplex = DUPLEX_HALF;
4335 } else if (!tg3_flag(tp, 5780_CLASS)) {
4336 /* Link is up via parallel detect */
4338 current_link_up = 0;
4343 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4344 tg3_setup_flow_control(tp, local_adv, remote_adv);
4346 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4347 if (tp->link_config.active_duplex == DUPLEX_HALF)
4348 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4350 tw32_f(MAC_MODE, tp->mac_mode);
4353 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4355 tp->link_config.active_speed = current_speed;
4356 tp->link_config.active_duplex = current_duplex;
4358 if (current_link_up != netif_carrier_ok(tp->dev)) {
4359 if (current_link_up)
4360 netif_carrier_on(tp->dev);
4362 netif_carrier_off(tp->dev);
4363 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4365 tg3_link_report(tp);
4370 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4372 if (tp->serdes_counter) {
4373 /* Give autoneg time to complete. */
4374 tp->serdes_counter--;
4378 if (!netif_carrier_ok(tp->dev) &&
4379 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4382 tg3_readphy(tp, MII_BMCR, &bmcr);
4383 if (bmcr & BMCR_ANENABLE) {
4386 /* Select shadow register 0x1f */
4387 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4388 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4390 /* Select expansion interrupt status register */
4391 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4392 MII_TG3_DSP_EXP1_INT_STAT);
4393 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4394 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4396 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4397 /* We have signal detect and not receiving
4398 * config code words, link is up by parallel
4402 bmcr &= ~BMCR_ANENABLE;
4403 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4404 tg3_writephy(tp, MII_BMCR, bmcr);
4405 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4408 } else if (netif_carrier_ok(tp->dev) &&
4409 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4410 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4413 /* Select expansion interrupt status register */
4414 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4415 MII_TG3_DSP_EXP1_INT_STAT);
4416 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4420 /* Config code words received, turn on autoneg. */
4421 tg3_readphy(tp, MII_BMCR, &bmcr);
4422 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4424 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4430 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4435 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4436 err = tg3_setup_fiber_phy(tp, force_reset);
4437 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4438 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4440 err = tg3_setup_copper_phy(tp, force_reset);
4442 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4445 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4446 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4448 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4453 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4454 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4455 tw32(GRC_MISC_CFG, val);
4458 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4459 (6 << TX_LENGTHS_IPG_SHIFT);
4460 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4461 val |= tr32(MAC_TX_LENGTHS) &
4462 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4463 TX_LENGTHS_CNT_DWN_VAL_MSK);
4465 if (tp->link_config.active_speed == SPEED_1000 &&
4466 tp->link_config.active_duplex == DUPLEX_HALF)
4467 tw32(MAC_TX_LENGTHS, val |
4468 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4470 tw32(MAC_TX_LENGTHS, val |
4471 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4473 if (!tg3_flag(tp, 5705_PLUS)) {
4474 if (netif_carrier_ok(tp->dev)) {
4475 tw32(HOSTCC_STAT_COAL_TICKS,
4476 tp->coal.stats_block_coalesce_usecs);
4478 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4482 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4483 val = tr32(PCIE_PWR_MGMT_THRESH);
4484 if (!netif_carrier_ok(tp->dev))
4485 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4488 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4489 tw32(PCIE_PWR_MGMT_THRESH, val);
4495 static inline int tg3_irq_sync(struct tg3 *tp)
4497 return tp->irq_sync;
4500 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4504 dst = (u32 *)((u8 *)dst + off);
4505 for (i = 0; i < len; i += sizeof(u32))
4506 *dst++ = tr32(off + i);
4509 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4511 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4512 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4513 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4514 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4515 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4516 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4517 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4518 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4519 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4520 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4521 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4522 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4523 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4524 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4525 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4526 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4527 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4528 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4529 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4531 if (tg3_flag(tp, SUPPORT_MSIX))
4532 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4534 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4535 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4536 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4537 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4538 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4539 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4540 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4541 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4543 if (!tg3_flag(tp, 5705_PLUS)) {
4544 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4545 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4546 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4549 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4550 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4551 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4552 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4553 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4555 if (tg3_flag(tp, NVRAM))
4556 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4559 static void tg3_dump_state(struct tg3 *tp)
4564 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4566 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4570 if (tg3_flag(tp, PCI_EXPRESS)) {
4571 /* Read up to but not including private PCI registers */
4572 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4573 regs[i / sizeof(u32)] = tr32(i);
4575 tg3_dump_legacy_regs(tp, regs);
4577 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4578 if (!regs[i + 0] && !regs[i + 1] &&
4579 !regs[i + 2] && !regs[i + 3])
4582 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4584 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4589 for (i = 0; i < tp->irq_cnt; i++) {
4590 struct tg3_napi *tnapi = &tp->napi[i];
4592 /* SW status block */
4594 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4596 tnapi->hw_status->status,
4597 tnapi->hw_status->status_tag,
4598 tnapi->hw_status->rx_jumbo_consumer,
4599 tnapi->hw_status->rx_consumer,
4600 tnapi->hw_status->rx_mini_consumer,
4601 tnapi->hw_status->idx[0].rx_producer,
4602 tnapi->hw_status->idx[0].tx_consumer);
4605 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4607 tnapi->last_tag, tnapi->last_irq_tag,
4608 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4610 tnapi->prodring.rx_std_prod_idx,
4611 tnapi->prodring.rx_std_cons_idx,
4612 tnapi->prodring.rx_jmb_prod_idx,
4613 tnapi->prodring.rx_jmb_cons_idx);
4617 /* This is called whenever we suspect that the system chipset is re-
4618 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4619 * is bogus tx completions. We try to recover by setting the
4620 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4623 static void tg3_tx_recover(struct tg3 *tp)
4625 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4626 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4628 netdev_warn(tp->dev,
4629 "The system may be re-ordering memory-mapped I/O "
4630 "cycles to the network device, attempting to recover. "
4631 "Please report the problem to the driver maintainer "
4632 "and include system chipset information.\n");
4634 spin_lock(&tp->lock);
4635 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4636 spin_unlock(&tp->lock);
4639 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4641 /* Tell compiler to fetch tx indices from memory. */
4643 return tnapi->tx_pending -
4644 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4647 /* Tigon3 never reports partial packet sends. So we do not
4648 * need special logic to handle SKBs that have not had all
4649 * of their frags sent yet, like SunGEM does.
4651 static void tg3_tx(struct tg3_napi *tnapi)
4653 struct tg3 *tp = tnapi->tp;
4654 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4655 u32 sw_idx = tnapi->tx_cons;
4656 struct netdev_queue *txq;
4657 int index = tnapi - tp->napi;
4659 if (tg3_flag(tp, ENABLE_TSS))
4662 txq = netdev_get_tx_queue(tp->dev, index);
4664 while (sw_idx != hw_idx) {
4665 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4666 struct sk_buff *skb = ri->skb;
4669 if (unlikely(skb == NULL)) {
4674 pci_unmap_single(tp->pdev,
4675 dma_unmap_addr(ri, mapping),
4681 sw_idx = NEXT_TX(sw_idx);
4683 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4684 ri = &tnapi->tx_buffers[sw_idx];
4685 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4688 pci_unmap_page(tp->pdev,
4689 dma_unmap_addr(ri, mapping),
4690 skb_shinfo(skb)->frags[i].size,
4692 sw_idx = NEXT_TX(sw_idx);
4697 if (unlikely(tx_bug)) {
4703 tnapi->tx_cons = sw_idx;
4705 /* Need to make the tx_cons update visible to tg3_start_xmit()
4706 * before checking for netif_queue_stopped(). Without the
4707 * memory barrier, there is a small possibility that tg3_start_xmit()
4708 * will miss it and cause the queue to be stopped forever.
4712 if (unlikely(netif_tx_queue_stopped(txq) &&
4713 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4714 __netif_tx_lock(txq, smp_processor_id());
4715 if (netif_tx_queue_stopped(txq) &&
4716 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4717 netif_tx_wake_queue(txq);
4718 __netif_tx_unlock(txq);
4722 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4727 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4728 map_sz, PCI_DMA_FROMDEVICE);
4729 dev_kfree_skb_any(ri->skb);
4733 /* Returns size of skb allocated or < 0 on error.
4735 * We only need to fill in the address because the other members
4736 * of the RX descriptor are invariant, see tg3_init_rings.
4738 * Note the purposeful assymetry of cpu vs. chip accesses. For
4739 * posting buffers we only dirty the first cache line of the RX
4740 * descriptor (containing the address). Whereas for the RX status
4741 * buffers the cpu only reads the last cacheline of the RX descriptor
4742 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4744 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4745 u32 opaque_key, u32 dest_idx_unmasked)
4747 struct tg3_rx_buffer_desc *desc;
4748 struct ring_info *map;
4749 struct sk_buff *skb;
4751 int skb_size, dest_idx;
4753 switch (opaque_key) {
4754 case RXD_OPAQUE_RING_STD:
4755 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4756 desc = &tpr->rx_std[dest_idx];
4757 map = &tpr->rx_std_buffers[dest_idx];
4758 skb_size = tp->rx_pkt_map_sz;
4761 case RXD_OPAQUE_RING_JUMBO:
4762 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4763 desc = &tpr->rx_jmb[dest_idx].std;
4764 map = &tpr->rx_jmb_buffers[dest_idx];
4765 skb_size = TG3_RX_JMB_MAP_SZ;
4772 /* Do not overwrite any of the map or rp information
4773 * until we are sure we can commit to a new buffer.
4775 * Callers depend upon this behavior and assume that
4776 * we leave everything unchanged if we fail.
4778 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4782 skb_reserve(skb, tp->rx_offset);
4784 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4785 PCI_DMA_FROMDEVICE);
4786 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4792 dma_unmap_addr_set(map, mapping, mapping);
4794 desc->addr_hi = ((u64)mapping >> 32);
4795 desc->addr_lo = ((u64)mapping & 0xffffffff);
4800 /* We only need to move over in the address because the other
4801 * members of the RX descriptor are invariant. See notes above
4802 * tg3_alloc_rx_skb for full details.
4804 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4805 struct tg3_rx_prodring_set *dpr,
4806 u32 opaque_key, int src_idx,
4807 u32 dest_idx_unmasked)
4809 struct tg3 *tp = tnapi->tp;
4810 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4811 struct ring_info *src_map, *dest_map;
4812 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4815 switch (opaque_key) {
4816 case RXD_OPAQUE_RING_STD:
4817 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4818 dest_desc = &dpr->rx_std[dest_idx];
4819 dest_map = &dpr->rx_std_buffers[dest_idx];
4820 src_desc = &spr->rx_std[src_idx];
4821 src_map = &spr->rx_std_buffers[src_idx];
4824 case RXD_OPAQUE_RING_JUMBO:
4825 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4826 dest_desc = &dpr->rx_jmb[dest_idx].std;
4827 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4828 src_desc = &spr->rx_jmb[src_idx].std;
4829 src_map = &spr->rx_jmb_buffers[src_idx];
4836 dest_map->skb = src_map->skb;
4837 dma_unmap_addr_set(dest_map, mapping,
4838 dma_unmap_addr(src_map, mapping));
4839 dest_desc->addr_hi = src_desc->addr_hi;
4840 dest_desc->addr_lo = src_desc->addr_lo;
4842 /* Ensure that the update to the skb happens after the physical
4843 * addresses have been transferred to the new BD location.
4847 src_map->skb = NULL;
4850 /* The RX ring scheme is composed of multiple rings which post fresh
4851 * buffers to the chip, and one special ring the chip uses to report
4852 * status back to the host.
4854 * The special ring reports the status of received packets to the
4855 * host. The chip does not write into the original descriptor the
4856 * RX buffer was obtained from. The chip simply takes the original
4857 * descriptor as provided by the host, updates the status and length
4858 * field, then writes this into the next status ring entry.
4860 * Each ring the host uses to post buffers to the chip is described
4861 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4862 * it is first placed into the on-chip ram. When the packet's length
4863 * is known, it walks down the TG3_BDINFO entries to select the ring.
4864 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4865 * which is within the range of the new packet's length is chosen.
4867 * The "separate ring for rx status" scheme may sound queer, but it makes
4868 * sense from a cache coherency perspective. If only the host writes
4869 * to the buffer post rings, and only the chip writes to the rx status
4870 * rings, then cache lines never move beyond shared-modified state.
4871 * If both the host and chip were to write into the same ring, cache line
4872 * eviction could occur since both entities want it in an exclusive state.
4874 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4876 struct tg3 *tp = tnapi->tp;
4877 u32 work_mask, rx_std_posted = 0;
4878 u32 std_prod_idx, jmb_prod_idx;
4879 u32 sw_idx = tnapi->rx_rcb_ptr;
4882 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4884 hw_idx = *(tnapi->rx_rcb_prod_idx);
4886 * We need to order the read of hw_idx and the read of
4887 * the opaque cookie.
4892 std_prod_idx = tpr->rx_std_prod_idx;
4893 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4894 while (sw_idx != hw_idx && budget > 0) {
4895 struct ring_info *ri;
4896 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4898 struct sk_buff *skb;
4899 dma_addr_t dma_addr;
4900 u32 opaque_key, desc_idx, *post_ptr;
4902 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4903 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4904 if (opaque_key == RXD_OPAQUE_RING_STD) {
4905 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4906 dma_addr = dma_unmap_addr(ri, mapping);
4908 post_ptr = &std_prod_idx;
4910 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4911 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4912 dma_addr = dma_unmap_addr(ri, mapping);
4914 post_ptr = &jmb_prod_idx;
4916 goto next_pkt_nopost;
4918 work_mask |= opaque_key;
4920 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4921 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4923 tg3_recycle_rx(tnapi, tpr, opaque_key,
4924 desc_idx, *post_ptr);
4926 /* Other statistics kept track of by card. */
4931 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4934 if (len > TG3_RX_COPY_THRESH(tp)) {
4937 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4942 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4943 PCI_DMA_FROMDEVICE);
4945 /* Ensure that the update to the skb happens
4946 * after the usage of the old DMA mapping.
4954 struct sk_buff *copy_skb;
4956 tg3_recycle_rx(tnapi, tpr, opaque_key,
4957 desc_idx, *post_ptr);
4959 copy_skb = netdev_alloc_skb(tp->dev, len +
4961 if (copy_skb == NULL)
4962 goto drop_it_no_recycle;
4964 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4965 skb_put(copy_skb, len);
4966 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4967 skb_copy_from_linear_data(skb, copy_skb->data, len);
4968 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4970 /* We'll reuse the original ring buffer. */
4974 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4975 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4976 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4977 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4978 skb->ip_summed = CHECKSUM_UNNECESSARY;
4980 skb_checksum_none_assert(skb);
4982 skb->protocol = eth_type_trans(skb, tp->dev);
4984 if (len > (tp->dev->mtu + ETH_HLEN) &&
4985 skb->protocol != htons(ETH_P_8021Q)) {
4987 goto drop_it_no_recycle;
4990 if (desc->type_flags & RXD_FLAG_VLAN &&
4991 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4992 __vlan_hwaccel_put_tag(skb,
4993 desc->err_vlan & RXD_VLAN_MASK);
4995 napi_gro_receive(&tnapi->napi, skb);
5003 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5004 tpr->rx_std_prod_idx = std_prod_idx &
5005 tp->rx_std_ring_mask;
5006 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5007 tpr->rx_std_prod_idx);
5008 work_mask &= ~RXD_OPAQUE_RING_STD;
5013 sw_idx &= tp->rx_ret_ring_mask;
5015 /* Refresh hw_idx to see if there is new work */
5016 if (sw_idx == hw_idx) {
5017 hw_idx = *(tnapi->rx_rcb_prod_idx);
5022 /* ACK the status ring. */
5023 tnapi->rx_rcb_ptr = sw_idx;
5024 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5026 /* Refill RX ring(s). */
5027 if (!tg3_flag(tp, ENABLE_RSS)) {
5028 if (work_mask & RXD_OPAQUE_RING_STD) {
5029 tpr->rx_std_prod_idx = std_prod_idx &
5030 tp->rx_std_ring_mask;
5031 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5032 tpr->rx_std_prod_idx);
5034 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5035 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5036 tp->rx_jmb_ring_mask;
5037 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5038 tpr->rx_jmb_prod_idx);
5041 } else if (work_mask) {
5042 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5043 * updated before the producer indices can be updated.
5047 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5048 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5050 if (tnapi != &tp->napi[1])
5051 napi_schedule(&tp->napi[1].napi);
5057 static void tg3_poll_link(struct tg3 *tp)
5059 /* handle link change and other phy events */
5060 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5061 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5063 if (sblk->status & SD_STATUS_LINK_CHG) {
5064 sblk->status = SD_STATUS_UPDATED |
5065 (sblk->status & ~SD_STATUS_LINK_CHG);
5066 spin_lock(&tp->lock);
5067 if (tg3_flag(tp, USE_PHYLIB)) {
5069 (MAC_STATUS_SYNC_CHANGED |
5070 MAC_STATUS_CFG_CHANGED |
5071 MAC_STATUS_MI_COMPLETION |
5072 MAC_STATUS_LNKSTATE_CHANGED));
5075 tg3_setup_phy(tp, 0);
5076 spin_unlock(&tp->lock);
5081 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5082 struct tg3_rx_prodring_set *dpr,
5083 struct tg3_rx_prodring_set *spr)
5085 u32 si, di, cpycnt, src_prod_idx;
5089 src_prod_idx = spr->rx_std_prod_idx;
5091 /* Make sure updates to the rx_std_buffers[] entries and the
5092 * standard producer index are seen in the correct order.
5096 if (spr->rx_std_cons_idx == src_prod_idx)
5099 if (spr->rx_std_cons_idx < src_prod_idx)
5100 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5102 cpycnt = tp->rx_std_ring_mask + 1 -
5103 spr->rx_std_cons_idx;
5105 cpycnt = min(cpycnt,
5106 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5108 si = spr->rx_std_cons_idx;
5109 di = dpr->rx_std_prod_idx;
5111 for (i = di; i < di + cpycnt; i++) {
5112 if (dpr->rx_std_buffers[i].skb) {
5122 /* Ensure that updates to the rx_std_buffers ring and the
5123 * shadowed hardware producer ring from tg3_recycle_skb() are
5124 * ordered correctly WRT the skb check above.
5128 memcpy(&dpr->rx_std_buffers[di],
5129 &spr->rx_std_buffers[si],
5130 cpycnt * sizeof(struct ring_info));
5132 for (i = 0; i < cpycnt; i++, di++, si++) {
5133 struct tg3_rx_buffer_desc *sbd, *dbd;
5134 sbd = &spr->rx_std[si];
5135 dbd = &dpr->rx_std[di];
5136 dbd->addr_hi = sbd->addr_hi;
5137 dbd->addr_lo = sbd->addr_lo;
5140 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5141 tp->rx_std_ring_mask;
5142 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5143 tp->rx_std_ring_mask;
5147 src_prod_idx = spr->rx_jmb_prod_idx;
5149 /* Make sure updates to the rx_jmb_buffers[] entries and
5150 * the jumbo producer index are seen in the correct order.
5154 if (spr->rx_jmb_cons_idx == src_prod_idx)
5157 if (spr->rx_jmb_cons_idx < src_prod_idx)
5158 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5160 cpycnt = tp->rx_jmb_ring_mask + 1 -
5161 spr->rx_jmb_cons_idx;
5163 cpycnt = min(cpycnt,
5164 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5166 si = spr->rx_jmb_cons_idx;
5167 di = dpr->rx_jmb_prod_idx;
5169 for (i = di; i < di + cpycnt; i++) {
5170 if (dpr->rx_jmb_buffers[i].skb) {
5180 /* Ensure that updates to the rx_jmb_buffers ring and the
5181 * shadowed hardware producer ring from tg3_recycle_skb() are
5182 * ordered correctly WRT the skb check above.
5186 memcpy(&dpr->rx_jmb_buffers[di],
5187 &spr->rx_jmb_buffers[si],
5188 cpycnt * sizeof(struct ring_info));
5190 for (i = 0; i < cpycnt; i++, di++, si++) {
5191 struct tg3_rx_buffer_desc *sbd, *dbd;
5192 sbd = &spr->rx_jmb[si].std;
5193 dbd = &dpr->rx_jmb[di].std;
5194 dbd->addr_hi = sbd->addr_hi;
5195 dbd->addr_lo = sbd->addr_lo;
5198 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5199 tp->rx_jmb_ring_mask;
5200 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5201 tp->rx_jmb_ring_mask;
5207 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5209 struct tg3 *tp = tnapi->tp;
5211 /* run TX completion thread */
5212 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5214 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5218 /* run RX thread, within the bounds set by NAPI.
5219 * All RX "locking" is done by ensuring outside
5220 * code synchronizes with tg3->napi.poll()
5222 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5223 work_done += tg3_rx(tnapi, budget - work_done);
5225 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5226 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5228 u32 std_prod_idx = dpr->rx_std_prod_idx;
5229 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5231 for (i = 1; i < tp->irq_cnt; i++)
5232 err |= tg3_rx_prodring_xfer(tp, dpr,
5233 &tp->napi[i].prodring);
5237 if (std_prod_idx != dpr->rx_std_prod_idx)
5238 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5239 dpr->rx_std_prod_idx);
5241 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5242 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5243 dpr->rx_jmb_prod_idx);
5248 tw32_f(HOSTCC_MODE, tp->coal_now);
5254 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5256 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5257 struct tg3 *tp = tnapi->tp;
5259 struct tg3_hw_status *sblk = tnapi->hw_status;
5262 work_done = tg3_poll_work(tnapi, work_done, budget);
5264 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5267 if (unlikely(work_done >= budget))
5270 /* tp->last_tag is used in tg3_int_reenable() below
5271 * to tell the hw how much work has been processed,
5272 * so we must read it before checking for more work.
5274 tnapi->last_tag = sblk->status_tag;
5275 tnapi->last_irq_tag = tnapi->last_tag;
5278 /* check for RX/TX work to do */
5279 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5280 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5281 napi_complete(napi);
5282 /* Reenable interrupts. */
5283 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5292 /* work_done is guaranteed to be less than budget. */
5293 napi_complete(napi);
5294 schedule_work(&tp->reset_task);
5298 static void tg3_process_error(struct tg3 *tp)
5301 bool real_error = false;
5303 if (tg3_flag(tp, ERROR_PROCESSED))
5306 /* Check Flow Attention register */
5307 val = tr32(HOSTCC_FLOW_ATTN);
5308 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5309 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5313 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5314 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5318 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5319 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5328 tg3_flag_set(tp, ERROR_PROCESSED);
5329 schedule_work(&tp->reset_task);
5332 static int tg3_poll(struct napi_struct *napi, int budget)
5334 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5335 struct tg3 *tp = tnapi->tp;
5337 struct tg3_hw_status *sblk = tnapi->hw_status;
5340 if (sblk->status & SD_STATUS_ERROR)
5341 tg3_process_error(tp);
5345 work_done = tg3_poll_work(tnapi, work_done, budget);
5347 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5350 if (unlikely(work_done >= budget))
5353 if (tg3_flag(tp, TAGGED_STATUS)) {
5354 /* tp->last_tag is used in tg3_int_reenable() below
5355 * to tell the hw how much work has been processed,
5356 * so we must read it before checking for more work.
5358 tnapi->last_tag = sblk->status_tag;
5359 tnapi->last_irq_tag = tnapi->last_tag;
5362 sblk->status &= ~SD_STATUS_UPDATED;
5364 if (likely(!tg3_has_work(tnapi))) {
5365 napi_complete(napi);
5366 tg3_int_reenable(tnapi);
5374 /* work_done is guaranteed to be less than budget. */
5375 napi_complete(napi);
5376 schedule_work(&tp->reset_task);
5380 static void tg3_napi_disable(struct tg3 *tp)
5384 for (i = tp->irq_cnt - 1; i >= 0; i--)
5385 napi_disable(&tp->napi[i].napi);
5388 static void tg3_napi_enable(struct tg3 *tp)
5392 for (i = 0; i < tp->irq_cnt; i++)
5393 napi_enable(&tp->napi[i].napi);
5396 static void tg3_napi_init(struct tg3 *tp)
5400 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5401 for (i = 1; i < tp->irq_cnt; i++)
5402 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5405 static void tg3_napi_fini(struct tg3 *tp)
5409 for (i = 0; i < tp->irq_cnt; i++)
5410 netif_napi_del(&tp->napi[i].napi);
5413 static inline void tg3_netif_stop(struct tg3 *tp)
5415 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5416 tg3_napi_disable(tp);
5417 netif_tx_disable(tp->dev);
5420 static inline void tg3_netif_start(struct tg3 *tp)
5422 /* NOTE: unconditional netif_tx_wake_all_queues is only
5423 * appropriate so long as all callers are assured to
5424 * have free tx slots (such as after tg3_init_hw)
5426 netif_tx_wake_all_queues(tp->dev);
5428 tg3_napi_enable(tp);
5429 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5430 tg3_enable_ints(tp);
5433 static void tg3_irq_quiesce(struct tg3 *tp)
5437 BUG_ON(tp->irq_sync);
5442 for (i = 0; i < tp->irq_cnt; i++)
5443 synchronize_irq(tp->napi[i].irq_vec);
5446 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5447 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5448 * with as well. Most of the time, this is not necessary except when
5449 * shutting down the device.
5451 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5453 spin_lock_bh(&tp->lock);
5455 tg3_irq_quiesce(tp);
5458 static inline void tg3_full_unlock(struct tg3 *tp)
5460 spin_unlock_bh(&tp->lock);
5463 /* One-shot MSI handler - Chip automatically disables interrupt
5464 * after sending MSI so driver doesn't have to do it.
5466 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5468 struct tg3_napi *tnapi = dev_id;
5469 struct tg3 *tp = tnapi->tp;
5471 prefetch(tnapi->hw_status);
5473 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5475 if (likely(!tg3_irq_sync(tp)))
5476 napi_schedule(&tnapi->napi);
5481 /* MSI ISR - No need to check for interrupt sharing and no need to
5482 * flush status block and interrupt mailbox. PCI ordering rules
5483 * guarantee that MSI will arrive after the status block.
5485 static irqreturn_t tg3_msi(int irq, void *dev_id)
5487 struct tg3_napi *tnapi = dev_id;
5488 struct tg3 *tp = tnapi->tp;
5490 prefetch(tnapi->hw_status);
5492 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5494 * Writing any value to intr-mbox-0 clears PCI INTA# and
5495 * chip-internal interrupt pending events.
5496 * Writing non-zero to intr-mbox-0 additional tells the
5497 * NIC to stop sending us irqs, engaging "in-intr-handler"
5500 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5501 if (likely(!tg3_irq_sync(tp)))
5502 napi_schedule(&tnapi->napi);
5504 return IRQ_RETVAL(1);
5507 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5509 struct tg3_napi *tnapi = dev_id;
5510 struct tg3 *tp = tnapi->tp;
5511 struct tg3_hw_status *sblk = tnapi->hw_status;
5512 unsigned int handled = 1;
5514 /* In INTx mode, it is possible for the interrupt to arrive at
5515 * the CPU before the status block posted prior to the interrupt.
5516 * Reading the PCI State register will confirm whether the
5517 * interrupt is ours and will flush the status block.
5519 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5520 if (tg3_flag(tp, CHIP_RESETTING) ||
5521 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5528 * Writing any value to intr-mbox-0 clears PCI INTA# and
5529 * chip-internal interrupt pending events.
5530 * Writing non-zero to intr-mbox-0 additional tells the
5531 * NIC to stop sending us irqs, engaging "in-intr-handler"
5534 * Flush the mailbox to de-assert the IRQ immediately to prevent
5535 * spurious interrupts. The flush impacts performance but
5536 * excessive spurious interrupts can be worse in some cases.
5538 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5539 if (tg3_irq_sync(tp))
5541 sblk->status &= ~SD_STATUS_UPDATED;
5542 if (likely(tg3_has_work(tnapi))) {
5543 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5544 napi_schedule(&tnapi->napi);
5546 /* No work, shared interrupt perhaps? re-enable
5547 * interrupts, and flush that PCI write
5549 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5553 return IRQ_RETVAL(handled);
5556 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5558 struct tg3_napi *tnapi = dev_id;
5559 struct tg3 *tp = tnapi->tp;
5560 struct tg3_hw_status *sblk = tnapi->hw_status;
5561 unsigned int handled = 1;
5563 /* In INTx mode, it is possible for the interrupt to arrive at
5564 * the CPU before the status block posted prior to the interrupt.
5565 * Reading the PCI State register will confirm whether the
5566 * interrupt is ours and will flush the status block.
5568 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5569 if (tg3_flag(tp, CHIP_RESETTING) ||
5570 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5577 * writing any value to intr-mbox-0 clears PCI INTA# and
5578 * chip-internal interrupt pending events.
5579 * writing non-zero to intr-mbox-0 additional tells the
5580 * NIC to stop sending us irqs, engaging "in-intr-handler"
5583 * Flush the mailbox to de-assert the IRQ immediately to prevent
5584 * spurious interrupts. The flush impacts performance but
5585 * excessive spurious interrupts can be worse in some cases.
5587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5590 * In a shared interrupt configuration, sometimes other devices'
5591 * interrupts will scream. We record the current status tag here
5592 * so that the above check can report that the screaming interrupts
5593 * are unhandled. Eventually they will be silenced.
5595 tnapi->last_irq_tag = sblk->status_tag;
5597 if (tg3_irq_sync(tp))
5600 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5602 napi_schedule(&tnapi->napi);
5605 return IRQ_RETVAL(handled);
5608 /* ISR for interrupt test */
5609 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5611 struct tg3_napi *tnapi = dev_id;
5612 struct tg3 *tp = tnapi->tp;
5613 struct tg3_hw_status *sblk = tnapi->hw_status;
5615 if ((sblk->status & SD_STATUS_UPDATED) ||
5616 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5617 tg3_disable_ints(tp);
5618 return IRQ_RETVAL(1);
5620 return IRQ_RETVAL(0);
5623 static int tg3_init_hw(struct tg3 *, int);
5624 static int tg3_halt(struct tg3 *, int, int);
5626 /* Restart hardware after configuration changes, self-test, etc.
5627 * Invoked with tp->lock held.
5629 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5630 __releases(tp->lock)
5631 __acquires(tp->lock)
5635 err = tg3_init_hw(tp, reset_phy);
5638 "Failed to re-initialize device, aborting\n");
5639 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5640 tg3_full_unlock(tp);
5641 del_timer_sync(&tp->timer);
5643 tg3_napi_enable(tp);
5645 tg3_full_lock(tp, 0);
5650 #ifdef CONFIG_NET_POLL_CONTROLLER
5651 static void tg3_poll_controller(struct net_device *dev)
5654 struct tg3 *tp = netdev_priv(dev);
5656 for (i = 0; i < tp->irq_cnt; i++)
5657 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5661 static void tg3_reset_task(struct work_struct *work)
5663 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5665 unsigned int restart_timer;
5667 tg3_full_lock(tp, 0);
5669 if (!netif_running(tp->dev)) {
5670 tg3_full_unlock(tp);
5674 tg3_full_unlock(tp);
5680 tg3_full_lock(tp, 1);
5682 restart_timer = tg3_flag(tp, RESTART_TIMER);
5683 tg3_flag_clear(tp, RESTART_TIMER);
5685 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5686 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5687 tp->write32_rx_mbox = tg3_write_flush_reg32;
5688 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5689 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5692 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5693 err = tg3_init_hw(tp, 1);
5697 tg3_netif_start(tp);
5700 mod_timer(&tp->timer, jiffies + 1);
5703 tg3_full_unlock(tp);
5709 static void tg3_tx_timeout(struct net_device *dev)
5711 struct tg3 *tp = netdev_priv(dev);
5713 if (netif_msg_tx_err(tp)) {
5714 netdev_err(dev, "transmit timed out, resetting\n");
5718 schedule_work(&tp->reset_task);
5721 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5722 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5724 u32 base = (u32) mapping & 0xffffffff;
5726 return (base > 0xffffdcc0) && (base + len + 8 < base);
5729 /* Test for DMA addresses > 40-bit */
5730 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5733 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5734 if (tg3_flag(tp, 40BIT_DMA_BUG))
5735 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5742 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5743 dma_addr_t mapping, int len, u32 flags,
5746 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5747 int is_end = (mss_and_is_end & 0x1);
5748 u32 mss = (mss_and_is_end >> 1);
5752 flags |= TXD_FLAG_END;
5753 if (flags & TXD_FLAG_VLAN) {
5754 vlan_tag = flags >> 16;
5757 vlan_tag |= (mss << TXD_MSS_SHIFT);
5759 txd->addr_hi = ((u64) mapping >> 32);
5760 txd->addr_lo = ((u64) mapping & 0xffffffff);
5761 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5762 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5765 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5766 struct sk_buff *skb, int last)
5769 u32 entry = tnapi->tx_prod;
5770 struct ring_info *txb = &tnapi->tx_buffers[entry];
5772 pci_unmap_single(tnapi->tp->pdev,
5773 dma_unmap_addr(txb, mapping),
5776 for (i = 0; i <= last; i++) {
5777 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5779 entry = NEXT_TX(entry);
5780 txb = &tnapi->tx_buffers[entry];
5782 pci_unmap_page(tnapi->tp->pdev,
5783 dma_unmap_addr(txb, mapping),
5784 frag->size, PCI_DMA_TODEVICE);
5788 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5789 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5790 struct sk_buff *skb,
5791 u32 base_flags, u32 mss)
5793 struct tg3 *tp = tnapi->tp;
5794 struct sk_buff *new_skb;
5795 dma_addr_t new_addr = 0;
5796 u32 entry = tnapi->tx_prod;
5799 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5800 new_skb = skb_copy(skb, GFP_ATOMIC);
5802 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5804 new_skb = skb_copy_expand(skb,
5805 skb_headroom(skb) + more_headroom,
5806 skb_tailroom(skb), GFP_ATOMIC);
5812 /* New SKB is guaranteed to be linear. */
5813 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5815 /* Make sure the mapping succeeded */
5816 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5818 dev_kfree_skb(new_skb);
5820 /* Make sure new skb does not cross any 4G boundaries.
5821 * Drop the packet if it does.
5823 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5824 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5825 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5828 dev_kfree_skb(new_skb);
5830 tnapi->tx_buffers[entry].skb = new_skb;
5831 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5834 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5835 base_flags, 1 | (mss << 1));
5844 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5846 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5847 * TSO header is greater than 80 bytes.
5849 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5851 struct sk_buff *segs, *nskb;
5852 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5854 /* Estimate the number of fragments in the worst case */
5855 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5856 netif_stop_queue(tp->dev);
5858 /* netif_tx_stop_queue() must be done before checking
5859 * checking tx index in tg3_tx_avail() below, because in
5860 * tg3_tx(), we update tx index before checking for
5861 * netif_tx_queue_stopped().
5864 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5865 return NETDEV_TX_BUSY;
5867 netif_wake_queue(tp->dev);
5870 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5872 goto tg3_tso_bug_end;
5878 tg3_start_xmit(nskb, tp->dev);
5884 return NETDEV_TX_OK;
5887 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5888 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5890 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5892 struct tg3 *tp = netdev_priv(dev);
5893 u32 len, entry, base_flags, mss;
5894 int i = -1, would_hit_hwbug;
5896 struct tg3_napi *tnapi;
5897 struct netdev_queue *txq;
5900 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5901 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5902 if (tg3_flag(tp, ENABLE_TSS))
5905 /* We are running in BH disabled context with netif_tx_lock
5906 * and TX reclaim runs via tp->napi.poll inside of a software
5907 * interrupt. Furthermore, IRQ processing runs lockless so we have
5908 * no IRQ context deadlocks to worry about either. Rejoice!
5910 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5911 if (!netif_tx_queue_stopped(txq)) {
5912 netif_tx_stop_queue(txq);
5914 /* This is a hard error, log it. */
5916 "BUG! Tx Ring full when queue awake!\n");
5918 return NETDEV_TX_BUSY;
5921 entry = tnapi->tx_prod;
5923 if (skb->ip_summed == CHECKSUM_PARTIAL)
5924 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5926 mss = skb_shinfo(skb)->gso_size;
5929 u32 tcp_opt_len, hdr_len;
5931 if (skb_header_cloned(skb) &&
5932 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5938 tcp_opt_len = tcp_optlen(skb);
5940 if (skb_is_gso_v6(skb)) {
5941 hdr_len = skb_headlen(skb) - ETH_HLEN;
5945 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5946 hdr_len = ip_tcp_len + tcp_opt_len;
5949 iph->tot_len = htons(mss + hdr_len);
5952 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5953 tg3_flag(tp, TSO_BUG))
5954 return tg3_tso_bug(tp, skb);
5956 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5957 TXD_FLAG_CPU_POST_DMA);
5959 if (tg3_flag(tp, HW_TSO_1) ||
5960 tg3_flag(tp, HW_TSO_2) ||
5961 tg3_flag(tp, HW_TSO_3)) {
5962 tcp_hdr(skb)->check = 0;
5963 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5965 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5970 if (tg3_flag(tp, HW_TSO_3)) {
5971 mss |= (hdr_len & 0xc) << 12;
5973 base_flags |= 0x00000010;
5974 base_flags |= (hdr_len & 0x3e0) << 5;
5975 } else if (tg3_flag(tp, HW_TSO_2))
5976 mss |= hdr_len << 9;
5977 else if (tg3_flag(tp, HW_TSO_1) ||
5978 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5979 if (tcp_opt_len || iph->ihl > 5) {
5982 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5983 mss |= (tsflags << 11);
5986 if (tcp_opt_len || iph->ihl > 5) {
5989 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5990 base_flags |= tsflags << 12;
5995 if (vlan_tx_tag_present(skb))
5996 base_flags |= (TXD_FLAG_VLAN |
5997 (vlan_tx_tag_get(skb) << 16));
5999 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6000 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6001 base_flags |= TXD_FLAG_JMB_PKT;
6003 len = skb_headlen(skb);
6005 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6006 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6011 tnapi->tx_buffers[entry].skb = skb;
6012 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6014 would_hit_hwbug = 0;
6016 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6017 would_hit_hwbug = 1;
6019 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6020 tg3_4g_overflow_test(mapping, len))
6021 would_hit_hwbug = 1;
6023 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6024 tg3_40bit_overflow_test(tp, mapping, len))
6025 would_hit_hwbug = 1;
6027 if (tg3_flag(tp, 5701_DMA_BUG))
6028 would_hit_hwbug = 1;
6030 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6031 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6033 entry = NEXT_TX(entry);
6035 /* Now loop through additional data fragments, and queue them. */
6036 if (skb_shinfo(skb)->nr_frags > 0) {
6037 last = skb_shinfo(skb)->nr_frags - 1;
6038 for (i = 0; i <= last; i++) {
6039 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6042 mapping = pci_map_page(tp->pdev,
6045 len, PCI_DMA_TODEVICE);
6047 tnapi->tx_buffers[entry].skb = NULL;
6048 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6050 if (pci_dma_mapping_error(tp->pdev, mapping))
6053 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6055 would_hit_hwbug = 1;
6057 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6058 tg3_4g_overflow_test(mapping, len))
6059 would_hit_hwbug = 1;
6061 if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6062 tg3_40bit_overflow_test(tp, mapping, len))
6063 would_hit_hwbug = 1;
6065 if (tg3_flag(tp, HW_TSO_1) ||
6066 tg3_flag(tp, HW_TSO_2) ||
6067 tg3_flag(tp, HW_TSO_3))
6068 tg3_set_txd(tnapi, entry, mapping, len,
6069 base_flags, (i == last)|(mss << 1));
6071 tg3_set_txd(tnapi, entry, mapping, len,
6072 base_flags, (i == last));
6074 entry = NEXT_TX(entry);
6078 if (would_hit_hwbug) {
6079 tg3_skb_error_unmap(tnapi, skb, i);
6081 /* If the workaround fails due to memory/mapping
6082 * failure, silently drop this packet.
6084 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6087 entry = NEXT_TX(tnapi->tx_prod);
6090 /* Packets are ready, update Tx producer idx local and on card. */
6091 tw32_tx_mbox(tnapi->prodmbox, entry);
6093 tnapi->tx_prod = entry;
6094 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6095 netif_tx_stop_queue(txq);
6097 /* netif_tx_stop_queue() must be done before checking
6098 * checking tx index in tg3_tx_avail() below, because in
6099 * tg3_tx(), we update tx index before checking for
6100 * netif_tx_queue_stopped().
6103 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6104 netif_tx_wake_queue(txq);
6110 return NETDEV_TX_OK;
6113 tg3_skb_error_unmap(tnapi, skb, i);
6115 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6116 return NETDEV_TX_OK;
6119 static void tg3_set_loopback(struct net_device *dev, u32 features)
6121 struct tg3 *tp = netdev_priv(dev);
6123 if (features & NETIF_F_LOOPBACK) {
6124 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6128 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6129 * loopback mode if Half-Duplex mode was negotiated earlier.
6131 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6133 /* Enable internal MAC loopback mode */
6134 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6135 spin_lock_bh(&tp->lock);
6136 tw32(MAC_MODE, tp->mac_mode);
6137 netif_carrier_on(tp->dev);
6138 spin_unlock_bh(&tp->lock);
6139 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6141 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6144 /* Disable internal MAC loopback mode */
6145 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6146 spin_lock_bh(&tp->lock);
6147 tw32(MAC_MODE, tp->mac_mode);
6148 /* Force link status check */
6149 tg3_setup_phy(tp, 1);
6150 spin_unlock_bh(&tp->lock);
6151 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6155 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6157 struct tg3 *tp = netdev_priv(dev);
6159 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6160 features &= ~NETIF_F_ALL_TSO;
6165 static int tg3_set_features(struct net_device *dev, u32 features)
6167 u32 changed = dev->features ^ features;
6169 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6170 tg3_set_loopback(dev, features);
6175 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6180 if (new_mtu > ETH_DATA_LEN) {
6181 if (tg3_flag(tp, 5780_CLASS)) {
6182 netdev_update_features(dev);
6183 tg3_flag_clear(tp, TSO_CAPABLE);
6185 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6188 if (tg3_flag(tp, 5780_CLASS)) {
6189 tg3_flag_set(tp, TSO_CAPABLE);
6190 netdev_update_features(dev);
6192 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6196 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6198 struct tg3 *tp = netdev_priv(dev);
6201 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6204 if (!netif_running(dev)) {
6205 /* We'll just catch it later when the
6208 tg3_set_mtu(dev, tp, new_mtu);
6216 tg3_full_lock(tp, 1);
6218 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6220 tg3_set_mtu(dev, tp, new_mtu);
6222 err = tg3_restart_hw(tp, 0);
6225 tg3_netif_start(tp);
6227 tg3_full_unlock(tp);
6235 static void tg3_rx_prodring_free(struct tg3 *tp,
6236 struct tg3_rx_prodring_set *tpr)
6240 if (tpr != &tp->napi[0].prodring) {
6241 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6242 i = (i + 1) & tp->rx_std_ring_mask)
6243 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6246 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6247 for (i = tpr->rx_jmb_cons_idx;
6248 i != tpr->rx_jmb_prod_idx;
6249 i = (i + 1) & tp->rx_jmb_ring_mask) {
6250 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6258 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6259 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6262 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6263 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6264 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6269 /* Initialize rx rings for packet processing.
6271 * The chip has been shut down and the driver detached from
6272 * the networking, so no interrupts or new tx packets will
6273 * end up in the driver. tp->{tx,}lock are held and thus
6276 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6277 struct tg3_rx_prodring_set *tpr)
6279 u32 i, rx_pkt_dma_sz;
6281 tpr->rx_std_cons_idx = 0;
6282 tpr->rx_std_prod_idx = 0;
6283 tpr->rx_jmb_cons_idx = 0;
6284 tpr->rx_jmb_prod_idx = 0;
6286 if (tpr != &tp->napi[0].prodring) {
6287 memset(&tpr->rx_std_buffers[0], 0,
6288 TG3_RX_STD_BUFF_RING_SIZE(tp));
6289 if (tpr->rx_jmb_buffers)
6290 memset(&tpr->rx_jmb_buffers[0], 0,
6291 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6295 /* Zero out all descriptors. */
6296 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6298 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6299 if (tg3_flag(tp, 5780_CLASS) &&
6300 tp->dev->mtu > ETH_DATA_LEN)
6301 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6302 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6304 /* Initialize invariants of the rings, we only set this
6305 * stuff once. This works because the card does not
6306 * write into the rx buffer posting rings.
6308 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6309 struct tg3_rx_buffer_desc *rxd;
6311 rxd = &tpr->rx_std[i];
6312 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6313 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6314 rxd->opaque = (RXD_OPAQUE_RING_STD |
6315 (i << RXD_OPAQUE_INDEX_SHIFT));
6318 /* Now allocate fresh SKBs for each rx ring. */
6319 for (i = 0; i < tp->rx_pending; i++) {
6320 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6321 netdev_warn(tp->dev,
6322 "Using a smaller RX standard ring. Only "
6323 "%d out of %d buffers were allocated "
6324 "successfully\n", i, tp->rx_pending);
6332 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6335 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6337 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6340 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6341 struct tg3_rx_buffer_desc *rxd;
6343 rxd = &tpr->rx_jmb[i].std;
6344 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6345 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6347 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6348 (i << RXD_OPAQUE_INDEX_SHIFT));
6351 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6352 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6353 netdev_warn(tp->dev,
6354 "Using a smaller RX jumbo ring. Only %d "
6355 "out of %d buffers were allocated "
6356 "successfully\n", i, tp->rx_jumbo_pending);
6359 tp->rx_jumbo_pending = i;
6368 tg3_rx_prodring_free(tp, tpr);
6372 static void tg3_rx_prodring_fini(struct tg3 *tp,
6373 struct tg3_rx_prodring_set *tpr)
6375 kfree(tpr->rx_std_buffers);
6376 tpr->rx_std_buffers = NULL;
6377 kfree(tpr->rx_jmb_buffers);
6378 tpr->rx_jmb_buffers = NULL;
6380 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6381 tpr->rx_std, tpr->rx_std_mapping);
6385 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6386 tpr->rx_jmb, tpr->rx_jmb_mapping);
6391 static int tg3_rx_prodring_init(struct tg3 *tp,
6392 struct tg3_rx_prodring_set *tpr)
6394 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6396 if (!tpr->rx_std_buffers)
6399 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6400 TG3_RX_STD_RING_BYTES(tp),
6401 &tpr->rx_std_mapping,
6406 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6407 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6409 if (!tpr->rx_jmb_buffers)
6412 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6413 TG3_RX_JMB_RING_BYTES(tp),
6414 &tpr->rx_jmb_mapping,
6423 tg3_rx_prodring_fini(tp, tpr);
6427 /* Free up pending packets in all rx/tx rings.
6429 * The chip has been shut down and the driver detached from
6430 * the networking, so no interrupts or new tx packets will
6431 * end up in the driver. tp->{tx,}lock is not held and we are not
6432 * in an interrupt context and thus may sleep.
6434 static void tg3_free_rings(struct tg3 *tp)
6438 for (j = 0; j < tp->irq_cnt; j++) {
6439 struct tg3_napi *tnapi = &tp->napi[j];
6441 tg3_rx_prodring_free(tp, &tnapi->prodring);
6443 if (!tnapi->tx_buffers)
6446 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6447 struct ring_info *txp;
6448 struct sk_buff *skb;
6451 txp = &tnapi->tx_buffers[i];
6459 pci_unmap_single(tp->pdev,
6460 dma_unmap_addr(txp, mapping),
6467 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6468 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6469 pci_unmap_page(tp->pdev,
6470 dma_unmap_addr(txp, mapping),
6471 skb_shinfo(skb)->frags[k].size,
6476 dev_kfree_skb_any(skb);
6481 /* Initialize tx/rx rings for packet processing.
6483 * The chip has been shut down and the driver detached from
6484 * the networking, so no interrupts or new tx packets will
6485 * end up in the driver. tp->{tx,}lock are held and thus
6488 static int tg3_init_rings(struct tg3 *tp)
6492 /* Free up all the SKBs. */
6495 for (i = 0; i < tp->irq_cnt; i++) {
6496 struct tg3_napi *tnapi = &tp->napi[i];
6498 tnapi->last_tag = 0;
6499 tnapi->last_irq_tag = 0;
6500 tnapi->hw_status->status = 0;
6501 tnapi->hw_status->status_tag = 0;
6502 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6507 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6509 tnapi->rx_rcb_ptr = 0;
6511 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6513 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6523 * Must not be invoked with interrupt sources disabled and
6524 * the hardware shutdown down.
6526 static void tg3_free_consistent(struct tg3 *tp)
6530 for (i = 0; i < tp->irq_cnt; i++) {
6531 struct tg3_napi *tnapi = &tp->napi[i];
6533 if (tnapi->tx_ring) {
6534 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6535 tnapi->tx_ring, tnapi->tx_desc_mapping);
6536 tnapi->tx_ring = NULL;
6539 kfree(tnapi->tx_buffers);
6540 tnapi->tx_buffers = NULL;
6542 if (tnapi->rx_rcb) {
6543 dma_free_coherent(&tp->pdev->dev,
6544 TG3_RX_RCB_RING_BYTES(tp),
6546 tnapi->rx_rcb_mapping);
6547 tnapi->rx_rcb = NULL;
6550 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6552 if (tnapi->hw_status) {
6553 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6555 tnapi->status_mapping);
6556 tnapi->hw_status = NULL;
6561 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6562 tp->hw_stats, tp->stats_mapping);
6563 tp->hw_stats = NULL;
6568 * Must not be invoked with interrupt sources disabled and
6569 * the hardware shutdown down. Can sleep.
6571 static int tg3_alloc_consistent(struct tg3 *tp)
6575 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6576 sizeof(struct tg3_hw_stats),
6582 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6584 for (i = 0; i < tp->irq_cnt; i++) {
6585 struct tg3_napi *tnapi = &tp->napi[i];
6586 struct tg3_hw_status *sblk;
6588 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6590 &tnapi->status_mapping,
6592 if (!tnapi->hw_status)
6595 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6596 sblk = tnapi->hw_status;
6598 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6601 /* If multivector TSS is enabled, vector 0 does not handle
6602 * tx interrupts. Don't allocate any resources for it.
6604 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6605 (i && tg3_flag(tp, ENABLE_TSS))) {
6606 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6609 if (!tnapi->tx_buffers)
6612 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6614 &tnapi->tx_desc_mapping,
6616 if (!tnapi->tx_ring)
6621 * When RSS is enabled, the status block format changes
6622 * slightly. The "rx_jumbo_consumer", "reserved",
6623 * and "rx_mini_consumer" members get mapped to the
6624 * other three rx return ring producer indexes.
6628 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6631 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6634 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6637 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6642 * If multivector RSS is enabled, vector 0 does not handle
6643 * rx or tx interrupts. Don't allocate any resources for it.
6645 if (!i && tg3_flag(tp, ENABLE_RSS))
6648 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6649 TG3_RX_RCB_RING_BYTES(tp),
6650 &tnapi->rx_rcb_mapping,
6655 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6661 tg3_free_consistent(tp);
6665 #define MAX_WAIT_CNT 1000
6667 /* To stop a block, clear the enable bit and poll till it
6668 * clears. tp->lock is held.
6670 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6675 if (tg3_flag(tp, 5705_PLUS)) {
6682 /* We can't enable/disable these bits of the
6683 * 5705/5750, just say success.
6696 for (i = 0; i < MAX_WAIT_CNT; i++) {
6699 if ((val & enable_bit) == 0)
6703 if (i == MAX_WAIT_CNT && !silent) {
6704 dev_err(&tp->pdev->dev,
6705 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6713 /* tp->lock is held. */
6714 static int tg3_abort_hw(struct tg3 *tp, int silent)
6718 tg3_disable_ints(tp);
6720 tp->rx_mode &= ~RX_MODE_ENABLE;
6721 tw32_f(MAC_RX_MODE, tp->rx_mode);
6724 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6725 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6726 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6727 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6728 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6729 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6731 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6732 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6733 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6734 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6735 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6736 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6737 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6739 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6740 tw32_f(MAC_MODE, tp->mac_mode);
6743 tp->tx_mode &= ~TX_MODE_ENABLE;
6744 tw32_f(MAC_TX_MODE, tp->tx_mode);
6746 for (i = 0; i < MAX_WAIT_CNT; i++) {
6748 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6751 if (i >= MAX_WAIT_CNT) {
6752 dev_err(&tp->pdev->dev,
6753 "%s timed out, TX_MODE_ENABLE will not clear "
6754 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6758 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6759 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6760 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6762 tw32(FTQ_RESET, 0xffffffff);
6763 tw32(FTQ_RESET, 0x00000000);
6765 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6766 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6768 for (i = 0; i < tp->irq_cnt; i++) {
6769 struct tg3_napi *tnapi = &tp->napi[i];
6770 if (tnapi->hw_status)
6771 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6774 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6779 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6784 /* NCSI does not support APE events */
6785 if (tg3_flag(tp, APE_HAS_NCSI))
6788 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6789 if (apedata != APE_SEG_SIG_MAGIC)
6792 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6793 if (!(apedata & APE_FW_STATUS_READY))
6796 /* Wait for up to 1 millisecond for APE to service previous event. */
6797 for (i = 0; i < 10; i++) {
6798 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6801 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6803 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6804 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6805 event | APE_EVENT_STATUS_EVENT_PENDING);
6807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6809 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6815 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6816 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6819 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6824 if (!tg3_flag(tp, ENABLE_APE))
6828 case RESET_KIND_INIT:
6829 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6830 APE_HOST_SEG_SIG_MAGIC);
6831 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6832 APE_HOST_SEG_LEN_MAGIC);
6833 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6834 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6835 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6836 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6837 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6838 APE_HOST_BEHAV_NO_PHYLOCK);
6839 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6840 TG3_APE_HOST_DRVR_STATE_START);
6842 event = APE_EVENT_STATUS_STATE_START;
6844 case RESET_KIND_SHUTDOWN:
6845 /* With the interface we are currently using,
6846 * APE does not track driver state. Wiping
6847 * out the HOST SEGMENT SIGNATURE forces
6848 * the APE to assume OS absent status.
6850 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6852 if (device_may_wakeup(&tp->pdev->dev) &&
6853 tg3_flag(tp, WOL_ENABLE)) {
6854 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6855 TG3_APE_HOST_WOL_SPEED_AUTO);
6856 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6858 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6860 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6862 event = APE_EVENT_STATUS_STATE_UNLOAD;
6864 case RESET_KIND_SUSPEND:
6865 event = APE_EVENT_STATUS_STATE_SUSPEND;
6871 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6873 tg3_ape_send_event(tp, event);
6876 /* tp->lock is held. */
6877 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6879 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6880 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6882 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6884 case RESET_KIND_INIT:
6885 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6889 case RESET_KIND_SHUTDOWN:
6890 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6894 case RESET_KIND_SUSPEND:
6895 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6904 if (kind == RESET_KIND_INIT ||
6905 kind == RESET_KIND_SUSPEND)
6906 tg3_ape_driver_state_change(tp, kind);
6909 /* tp->lock is held. */
6910 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6912 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6914 case RESET_KIND_INIT:
6915 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6916 DRV_STATE_START_DONE);
6919 case RESET_KIND_SHUTDOWN:
6920 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6921 DRV_STATE_UNLOAD_DONE);
6929 if (kind == RESET_KIND_SHUTDOWN)
6930 tg3_ape_driver_state_change(tp, kind);
6933 /* tp->lock is held. */
6934 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6936 if (tg3_flag(tp, ENABLE_ASF)) {
6938 case RESET_KIND_INIT:
6939 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6943 case RESET_KIND_SHUTDOWN:
6944 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6948 case RESET_KIND_SUSPEND:
6949 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6959 static int tg3_poll_fw(struct tg3 *tp)
6964 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6965 /* Wait up to 20ms for init done. */
6966 for (i = 0; i < 200; i++) {
6967 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6974 /* Wait for firmware initialization to complete. */
6975 for (i = 0; i < 100000; i++) {
6976 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6977 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6982 /* Chip might not be fitted with firmware. Some Sun onboard
6983 * parts are configured like that. So don't signal the timeout
6984 * of the above loop as an error, but do report the lack of
6985 * running firmware once.
6987 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6988 tg3_flag_set(tp, NO_FWARE_REPORTED);
6990 netdev_info(tp->dev, "No firmware running\n");
6993 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6994 /* The 57765 A0 needs a little more
6995 * time to do some important work.
7003 /* Save PCI command register before chip reset */
7004 static void tg3_save_pci_state(struct tg3 *tp)
7006 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7009 /* Restore PCI state after chip reset */
7010 static void tg3_restore_pci_state(struct tg3 *tp)
7014 /* Re-enable indirect register accesses. */
7015 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7016 tp->misc_host_ctrl);
7018 /* Set MAX PCI retry to zero. */
7019 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7020 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7021 tg3_flag(tp, PCIX_MODE))
7022 val |= PCISTATE_RETRY_SAME_DMA;
7023 /* Allow reads and writes to the APE register and memory space. */
7024 if (tg3_flag(tp, ENABLE_APE))
7025 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7026 PCISTATE_ALLOW_APE_SHMEM_WR |
7027 PCISTATE_ALLOW_APE_PSPACE_WR;
7028 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7030 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7032 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7033 if (tg3_flag(tp, PCI_EXPRESS))
7034 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7036 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7037 tp->pci_cacheline_sz);
7038 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7043 /* Make sure PCI-X relaxed ordering bit is clear. */
7044 if (tg3_flag(tp, PCIX_MODE)) {
7047 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7049 pcix_cmd &= ~PCI_X_CMD_ERO;
7050 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7054 if (tg3_flag(tp, 5780_CLASS)) {
7056 /* Chip reset on 5780 will reset MSI enable bit,
7057 * so need to restore it.
7059 if (tg3_flag(tp, USING_MSI)) {
7062 pci_read_config_word(tp->pdev,
7063 tp->msi_cap + PCI_MSI_FLAGS,
7065 pci_write_config_word(tp->pdev,
7066 tp->msi_cap + PCI_MSI_FLAGS,
7067 ctrl | PCI_MSI_FLAGS_ENABLE);
7068 val = tr32(MSGINT_MODE);
7069 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7074 static void tg3_stop_fw(struct tg3 *);
7076 /* tp->lock is held. */
7077 static int tg3_chip_reset(struct tg3 *tp)
7080 void (*write_op)(struct tg3 *, u32, u32);
7085 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7087 /* No matching tg3_nvram_unlock() after this because
7088 * chip reset below will undo the nvram lock.
7090 tp->nvram_lock_cnt = 0;
7092 /* GRC_MISC_CFG core clock reset will clear the memory
7093 * enable bit in PCI register 4 and the MSI enable bit
7094 * on some chips, so we save relevant registers here.
7096 tg3_save_pci_state(tp);
7098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7099 tg3_flag(tp, 5755_PLUS))
7100 tw32(GRC_FASTBOOT_PC, 0);
7103 * We must avoid the readl() that normally takes place.
7104 * It locks machines, causes machine checks, and other
7105 * fun things. So, temporarily disable the 5701
7106 * hardware workaround, while we do the reset.
7108 write_op = tp->write32;
7109 if (write_op == tg3_write_flush_reg32)
7110 tp->write32 = tg3_write32;
7112 /* Prevent the irq handler from reading or writing PCI registers
7113 * during chip reset when the memory enable bit in the PCI command
7114 * register may be cleared. The chip does not generate interrupt
7115 * at this time, but the irq handler may still be called due to irq
7116 * sharing or irqpoll.
7118 tg3_flag_set(tp, CHIP_RESETTING);
7119 for (i = 0; i < tp->irq_cnt; i++) {
7120 struct tg3_napi *tnapi = &tp->napi[i];
7121 if (tnapi->hw_status) {
7122 tnapi->hw_status->status = 0;
7123 tnapi->hw_status->status_tag = 0;
7125 tnapi->last_tag = 0;
7126 tnapi->last_irq_tag = 0;
7130 for (i = 0; i < tp->irq_cnt; i++)
7131 synchronize_irq(tp->napi[i].irq_vec);
7133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7134 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7135 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7139 val = GRC_MISC_CFG_CORECLK_RESET;
7141 if (tg3_flag(tp, PCI_EXPRESS)) {
7142 /* Force PCIe 1.0a mode */
7143 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7144 !tg3_flag(tp, 57765_PLUS) &&
7145 tr32(TG3_PCIE_PHY_TSTCTL) ==
7146 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7147 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7149 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7150 tw32(GRC_MISC_CFG, (1 << 29));
7155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7156 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7157 tw32(GRC_VCPU_EXT_CTRL,
7158 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7161 /* Manage gphy power for all CPMU absent PCIe devices. */
7162 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7163 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7165 tw32(GRC_MISC_CFG, val);
7167 /* restore 5701 hardware bug workaround write method */
7168 tp->write32 = write_op;
7170 /* Unfortunately, we have to delay before the PCI read back.
7171 * Some 575X chips even will not respond to a PCI cfg access
7172 * when the reset command is given to the chip.
7174 * How do these hardware designers expect things to work
7175 * properly if the PCI write is posted for a long period
7176 * of time? It is always necessary to have some method by
7177 * which a register read back can occur to push the write
7178 * out which does the reset.
7180 * For most tg3 variants the trick below was working.
7185 /* Flush PCI posted writes. The normal MMIO registers
7186 * are inaccessible at this time so this is the only
7187 * way to make this reliably (actually, this is no longer
7188 * the case, see above). I tried to use indirect
7189 * register read/write but this upset some 5701 variants.
7191 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7195 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7198 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7202 /* Wait for link training to complete. */
7203 for (i = 0; i < 5000; i++)
7206 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7207 pci_write_config_dword(tp->pdev, 0xc4,
7208 cfg_val | (1 << 15));
7211 /* Clear the "no snoop" and "relaxed ordering" bits. */
7212 pci_read_config_word(tp->pdev,
7213 tp->pcie_cap + PCI_EXP_DEVCTL,
7215 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7216 PCI_EXP_DEVCTL_NOSNOOP_EN);
7218 * Older PCIe devices only support the 128 byte
7219 * MPS setting. Enforce the restriction.
7221 if (!tg3_flag(tp, CPMU_PRESENT))
7222 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7223 pci_write_config_word(tp->pdev,
7224 tp->pcie_cap + PCI_EXP_DEVCTL,
7227 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7229 /* Clear error status */
7230 pci_write_config_word(tp->pdev,
7231 tp->pcie_cap + PCI_EXP_DEVSTA,
7232 PCI_EXP_DEVSTA_CED |
7233 PCI_EXP_DEVSTA_NFED |
7234 PCI_EXP_DEVSTA_FED |
7235 PCI_EXP_DEVSTA_URD);
7238 tg3_restore_pci_state(tp);
7240 tg3_flag_clear(tp, CHIP_RESETTING);
7241 tg3_flag_clear(tp, ERROR_PROCESSED);
7244 if (tg3_flag(tp, 5780_CLASS))
7245 val = tr32(MEMARB_MODE);
7246 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7248 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7250 tw32(0x5000, 0x400);
7253 tw32(GRC_MODE, tp->grc_mode);
7255 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7258 tw32(0xc4, val | (1 << 15));
7261 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7262 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7263 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7264 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7265 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7266 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7269 if (tg3_flag(tp, ENABLE_APE))
7270 tp->mac_mode = MAC_MODE_APE_TX_EN |
7271 MAC_MODE_APE_RX_EN |
7272 MAC_MODE_TDE_ENABLE;
7274 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7275 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7277 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7278 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7283 tw32_f(MAC_MODE, val);
7286 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7288 err = tg3_poll_fw(tp);
7294 if (tg3_flag(tp, PCI_EXPRESS) &&
7295 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7296 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7297 !tg3_flag(tp, 57765_PLUS)) {
7300 tw32(0x7c00, val | (1 << 25));
7303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7304 val = tr32(TG3_CPMU_CLCK_ORIDE);
7305 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7308 /* Reprobe ASF enable state. */
7309 tg3_flag_clear(tp, ENABLE_ASF);
7310 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7311 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7312 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7315 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7316 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7317 tg3_flag_set(tp, ENABLE_ASF);
7318 tp->last_event_jiffies = jiffies;
7319 if (tg3_flag(tp, 5750_PLUS))
7320 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7327 /* tp->lock is held. */
7328 static void tg3_stop_fw(struct tg3 *tp)
7330 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7331 /* Wait for RX cpu to ACK the previous event. */
7332 tg3_wait_for_event_ack(tp);
7334 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7336 tg3_generate_fw_event(tp);
7338 /* Wait for RX cpu to ACK this event. */
7339 tg3_wait_for_event_ack(tp);
7343 /* tp->lock is held. */
7344 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7350 tg3_write_sig_pre_reset(tp, kind);
7352 tg3_abort_hw(tp, silent);
7353 err = tg3_chip_reset(tp);
7355 __tg3_set_mac_addr(tp, 0);
7357 tg3_write_sig_legacy(tp, kind);
7358 tg3_write_sig_post_reset(tp, kind);
7366 #define RX_CPU_SCRATCH_BASE 0x30000
7367 #define RX_CPU_SCRATCH_SIZE 0x04000
7368 #define TX_CPU_SCRATCH_BASE 0x34000
7369 #define TX_CPU_SCRATCH_SIZE 0x04000
7371 /* tp->lock is held. */
7372 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7376 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7379 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7381 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7384 if (offset == RX_CPU_BASE) {
7385 for (i = 0; i < 10000; i++) {
7386 tw32(offset + CPU_STATE, 0xffffffff);
7387 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7388 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7392 tw32(offset + CPU_STATE, 0xffffffff);
7393 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7396 for (i = 0; i < 10000; i++) {
7397 tw32(offset + CPU_STATE, 0xffffffff);
7398 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7399 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7405 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7406 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7410 /* Clear firmware's nvram arbitration. */
7411 if (tg3_flag(tp, NVRAM))
7412 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7417 unsigned int fw_base;
7418 unsigned int fw_len;
7419 const __be32 *fw_data;
7422 /* tp->lock is held. */
7423 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7424 int cpu_scratch_size, struct fw_info *info)
7426 int err, lock_err, i;
7427 void (*write_op)(struct tg3 *, u32, u32);
7429 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7431 "%s: Trying to load TX cpu firmware which is 5705\n",
7436 if (tg3_flag(tp, 5705_PLUS))
7437 write_op = tg3_write_mem;
7439 write_op = tg3_write_indirect_reg32;
7441 /* It is possible that bootcode is still loading at this point.
7442 * Get the nvram lock first before halting the cpu.
7444 lock_err = tg3_nvram_lock(tp);
7445 err = tg3_halt_cpu(tp, cpu_base);
7447 tg3_nvram_unlock(tp);
7451 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7452 write_op(tp, cpu_scratch_base + i, 0);
7453 tw32(cpu_base + CPU_STATE, 0xffffffff);
7454 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7455 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7456 write_op(tp, (cpu_scratch_base +
7457 (info->fw_base & 0xffff) +
7459 be32_to_cpu(info->fw_data[i]));
7467 /* tp->lock is held. */
7468 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7470 struct fw_info info;
7471 const __be32 *fw_data;
7474 fw_data = (void *)tp->fw->data;
7476 /* Firmware blob starts with version numbers, followed by
7477 start address and length. We are setting complete length.
7478 length = end_address_of_bss - start_address_of_text.
7479 Remainder is the blob to be loaded contiguously
7480 from start address. */
7482 info.fw_base = be32_to_cpu(fw_data[1]);
7483 info.fw_len = tp->fw->size - 12;
7484 info.fw_data = &fw_data[3];
7486 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7487 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7492 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7493 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7498 /* Now startup only the RX cpu. */
7499 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7500 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7502 for (i = 0; i < 5; i++) {
7503 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7505 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7506 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7507 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7511 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7512 "should be %08x\n", __func__,
7513 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7516 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7517 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7522 /* tp->lock is held. */
7523 static int tg3_load_tso_firmware(struct tg3 *tp)
7525 struct fw_info info;
7526 const __be32 *fw_data;
7527 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7530 if (tg3_flag(tp, HW_TSO_1) ||
7531 tg3_flag(tp, HW_TSO_2) ||
7532 tg3_flag(tp, HW_TSO_3))
7535 fw_data = (void *)tp->fw->data;
7537 /* Firmware blob starts with version numbers, followed by
7538 start address and length. We are setting complete length.
7539 length = end_address_of_bss - start_address_of_text.
7540 Remainder is the blob to be loaded contiguously
7541 from start address. */
7543 info.fw_base = be32_to_cpu(fw_data[1]);
7544 cpu_scratch_size = tp->fw_len;
7545 info.fw_len = tp->fw->size - 12;
7546 info.fw_data = &fw_data[3];
7548 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7549 cpu_base = RX_CPU_BASE;
7550 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7552 cpu_base = TX_CPU_BASE;
7553 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7554 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7557 err = tg3_load_firmware_cpu(tp, cpu_base,
7558 cpu_scratch_base, cpu_scratch_size,
7563 /* Now startup the cpu. */
7564 tw32(cpu_base + CPU_STATE, 0xffffffff);
7565 tw32_f(cpu_base + CPU_PC, info.fw_base);
7567 for (i = 0; i < 5; i++) {
7568 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7570 tw32(cpu_base + CPU_STATE, 0xffffffff);
7571 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7572 tw32_f(cpu_base + CPU_PC, info.fw_base);
7577 "%s fails to set CPU PC, is %08x should be %08x\n",
7578 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7581 tw32(cpu_base + CPU_STATE, 0xffffffff);
7582 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7587 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7589 struct tg3 *tp = netdev_priv(dev);
7590 struct sockaddr *addr = p;
7591 int err = 0, skip_mac_1 = 0;
7593 if (!is_valid_ether_addr(addr->sa_data))
7596 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7598 if (!netif_running(dev))
7601 if (tg3_flag(tp, ENABLE_ASF)) {
7602 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7604 addr0_high = tr32(MAC_ADDR_0_HIGH);
7605 addr0_low = tr32(MAC_ADDR_0_LOW);
7606 addr1_high = tr32(MAC_ADDR_1_HIGH);
7607 addr1_low = tr32(MAC_ADDR_1_LOW);
7609 /* Skip MAC addr 1 if ASF is using it. */
7610 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7611 !(addr1_high == 0 && addr1_low == 0))
7614 spin_lock_bh(&tp->lock);
7615 __tg3_set_mac_addr(tp, skip_mac_1);
7616 spin_unlock_bh(&tp->lock);
7621 /* tp->lock is held. */
7622 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7623 dma_addr_t mapping, u32 maxlen_flags,
7627 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7628 ((u64) mapping >> 32));
7630 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7631 ((u64) mapping & 0xffffffff));
7633 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7636 if (!tg3_flag(tp, 5705_PLUS))
7638 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7642 static void __tg3_set_rx_mode(struct net_device *);
7643 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7647 if (!tg3_flag(tp, ENABLE_TSS)) {
7648 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7649 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7650 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7652 tw32(HOSTCC_TXCOL_TICKS, 0);
7653 tw32(HOSTCC_TXMAX_FRAMES, 0);
7654 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7657 if (!tg3_flag(tp, ENABLE_RSS)) {
7658 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7659 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7660 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7662 tw32(HOSTCC_RXCOL_TICKS, 0);
7663 tw32(HOSTCC_RXMAX_FRAMES, 0);
7664 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7667 if (!tg3_flag(tp, 5705_PLUS)) {
7668 u32 val = ec->stats_block_coalesce_usecs;
7670 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7671 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7673 if (!netif_carrier_ok(tp->dev))
7676 tw32(HOSTCC_STAT_COAL_TICKS, val);
7679 for (i = 0; i < tp->irq_cnt - 1; i++) {
7682 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7683 tw32(reg, ec->rx_coalesce_usecs);
7684 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7685 tw32(reg, ec->rx_max_coalesced_frames);
7686 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7687 tw32(reg, ec->rx_max_coalesced_frames_irq);
7689 if (tg3_flag(tp, ENABLE_TSS)) {
7690 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7691 tw32(reg, ec->tx_coalesce_usecs);
7692 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7693 tw32(reg, ec->tx_max_coalesced_frames);
7694 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7695 tw32(reg, ec->tx_max_coalesced_frames_irq);
7699 for (; i < tp->irq_max - 1; i++) {
7700 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7701 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7702 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7704 if (tg3_flag(tp, ENABLE_TSS)) {
7705 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7706 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7707 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7712 /* tp->lock is held. */
7713 static void tg3_rings_reset(struct tg3 *tp)
7716 u32 stblk, txrcb, rxrcb, limit;
7717 struct tg3_napi *tnapi = &tp->napi[0];
7719 /* Disable all transmit rings but the first. */
7720 if (!tg3_flag(tp, 5705_PLUS))
7721 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7722 else if (tg3_flag(tp, 5717_PLUS))
7723 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7724 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7725 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7727 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7729 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7730 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7731 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7732 BDINFO_FLAGS_DISABLED);
7735 /* Disable all receive return rings but the first. */
7736 if (tg3_flag(tp, 5717_PLUS))
7737 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7738 else if (!tg3_flag(tp, 5705_PLUS))
7739 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7740 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7741 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7742 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7744 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7746 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7747 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7748 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7749 BDINFO_FLAGS_DISABLED);
7751 /* Disable interrupts */
7752 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7754 /* Zero mailbox registers. */
7755 if (tg3_flag(tp, SUPPORT_MSIX)) {
7756 for (i = 1; i < tp->irq_max; i++) {
7757 tp->napi[i].tx_prod = 0;
7758 tp->napi[i].tx_cons = 0;
7759 if (tg3_flag(tp, ENABLE_TSS))
7760 tw32_mailbox(tp->napi[i].prodmbox, 0);
7761 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7762 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7764 if (!tg3_flag(tp, ENABLE_TSS))
7765 tw32_mailbox(tp->napi[0].prodmbox, 0);
7767 tp->napi[0].tx_prod = 0;
7768 tp->napi[0].tx_cons = 0;
7769 tw32_mailbox(tp->napi[0].prodmbox, 0);
7770 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7773 /* Make sure the NIC-based send BD rings are disabled. */
7774 if (!tg3_flag(tp, 5705_PLUS)) {
7775 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7776 for (i = 0; i < 16; i++)
7777 tw32_tx_mbox(mbox + i * 8, 0);
7780 txrcb = NIC_SRAM_SEND_RCB;
7781 rxrcb = NIC_SRAM_RCV_RET_RCB;
7783 /* Clear status block in ram. */
7784 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7786 /* Set status block DMA address */
7787 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7788 ((u64) tnapi->status_mapping >> 32));
7789 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7790 ((u64) tnapi->status_mapping & 0xffffffff));
7792 if (tnapi->tx_ring) {
7793 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7794 (TG3_TX_RING_SIZE <<
7795 BDINFO_FLAGS_MAXLEN_SHIFT),
7796 NIC_SRAM_TX_BUFFER_DESC);
7797 txrcb += TG3_BDINFO_SIZE;
7800 if (tnapi->rx_rcb) {
7801 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7802 (tp->rx_ret_ring_mask + 1) <<
7803 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7804 rxrcb += TG3_BDINFO_SIZE;
7807 stblk = HOSTCC_STATBLCK_RING1;
7809 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7810 u64 mapping = (u64)tnapi->status_mapping;
7811 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7812 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7814 /* Clear status block in ram. */
7815 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7817 if (tnapi->tx_ring) {
7818 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7819 (TG3_TX_RING_SIZE <<
7820 BDINFO_FLAGS_MAXLEN_SHIFT),
7821 NIC_SRAM_TX_BUFFER_DESC);
7822 txrcb += TG3_BDINFO_SIZE;
7825 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7826 ((tp->rx_ret_ring_mask + 1) <<
7827 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7830 rxrcb += TG3_BDINFO_SIZE;
7834 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7836 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7838 if (!tg3_flag(tp, 5750_PLUS) ||
7839 tg3_flag(tp, 5780_CLASS) ||
7840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7841 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7842 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7843 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7845 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7847 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7849 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7850 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7852 val = min(nic_rep_thresh, host_rep_thresh);
7853 tw32(RCVBDI_STD_THRESH, val);
7855 if (tg3_flag(tp, 57765_PLUS))
7856 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7858 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7861 if (!tg3_flag(tp, 5705_PLUS))
7862 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7864 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7866 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7868 val = min(bdcache_maxcnt / 2, host_rep_thresh);
7869 tw32(RCVBDI_JUMBO_THRESH, val);
7871 if (tg3_flag(tp, 57765_PLUS))
7872 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7875 /* tp->lock is held. */
7876 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7878 u32 val, rdmac_mode;
7880 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7882 tg3_disable_ints(tp);
7886 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7888 if (tg3_flag(tp, INIT_COMPLETE))
7889 tg3_abort_hw(tp, 1);
7891 /* Enable MAC control of LPI */
7892 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7893 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7894 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7895 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7897 tw32_f(TG3_CPMU_EEE_CTRL,
7898 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7900 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7901 TG3_CPMU_EEEMD_LPI_IN_TX |
7902 TG3_CPMU_EEEMD_LPI_IN_RX |
7903 TG3_CPMU_EEEMD_EEE_ENABLE;
7905 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7906 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7908 if (tg3_flag(tp, ENABLE_APE))
7909 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7911 tw32_f(TG3_CPMU_EEE_MODE, val);
7913 tw32_f(TG3_CPMU_EEE_DBTMR1,
7914 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7915 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7917 tw32_f(TG3_CPMU_EEE_DBTMR2,
7918 TG3_CPMU_DBTMR2_APE_TX_2047US |
7919 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7925 err = tg3_chip_reset(tp);
7929 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7931 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7932 val = tr32(TG3_CPMU_CTRL);
7933 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7934 tw32(TG3_CPMU_CTRL, val);
7936 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7937 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7938 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7939 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7941 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7942 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7943 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7944 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7946 val = tr32(TG3_CPMU_HST_ACC);
7947 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7948 val |= CPMU_HST_ACC_MACCLK_6_25;
7949 tw32(TG3_CPMU_HST_ACC, val);
7952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7953 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7954 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7955 PCIE_PWR_MGMT_L1_THRESH_4MS;
7956 tw32(PCIE_PWR_MGMT_THRESH, val);
7958 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7959 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7961 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7963 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7964 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7967 if (tg3_flag(tp, L1PLLPD_EN)) {
7968 u32 grc_mode = tr32(GRC_MODE);
7970 /* Access the lower 1K of PL PCIE block registers. */
7971 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7972 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7974 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7975 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7976 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7978 tw32(GRC_MODE, grc_mode);
7981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7982 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7983 u32 grc_mode = tr32(GRC_MODE);
7985 /* Access the lower 1K of PL PCIE block registers. */
7986 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7987 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7989 val = tr32(TG3_PCIE_TLDLPL_PORT +
7990 TG3_PCIE_PL_LO_PHYCTL5);
7991 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7992 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7994 tw32(GRC_MODE, grc_mode);
7997 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
7998 u32 grc_mode = tr32(GRC_MODE);
8000 /* Access the lower 1K of DL PCIE block registers. */
8001 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8002 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8004 val = tr32(TG3_PCIE_TLDLPL_PORT +
8005 TG3_PCIE_DL_LO_FTSMAX);
8006 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8007 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8008 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8010 tw32(GRC_MODE, grc_mode);
8013 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8014 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8015 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8016 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8019 /* This works around an issue with Athlon chipsets on
8020 * B3 tigon3 silicon. This bit has no effect on any
8021 * other revision. But do not set this on PCI Express
8022 * chips and don't even touch the clocks if the CPMU is present.
8024 if (!tg3_flag(tp, CPMU_PRESENT)) {
8025 if (!tg3_flag(tp, PCI_EXPRESS))
8026 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8027 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8030 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8031 tg3_flag(tp, PCIX_MODE)) {
8032 val = tr32(TG3PCI_PCISTATE);
8033 val |= PCISTATE_RETRY_SAME_DMA;
8034 tw32(TG3PCI_PCISTATE, val);
8037 if (tg3_flag(tp, ENABLE_APE)) {
8038 /* Allow reads and writes to the
8039 * APE register and memory space.
8041 val = tr32(TG3PCI_PCISTATE);
8042 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8043 PCISTATE_ALLOW_APE_SHMEM_WR |
8044 PCISTATE_ALLOW_APE_PSPACE_WR;
8045 tw32(TG3PCI_PCISTATE, val);
8048 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8049 /* Enable some hw fixes. */
8050 val = tr32(TG3PCI_MSI_DATA);
8051 val |= (1 << 26) | (1 << 28) | (1 << 29);
8052 tw32(TG3PCI_MSI_DATA, val);
8055 /* Descriptor ring init may make accesses to the
8056 * NIC SRAM area to setup the TX descriptors, so we
8057 * can only do this after the hardware has been
8058 * successfully reset.
8060 err = tg3_init_rings(tp);
8064 if (tg3_flag(tp, 57765_PLUS)) {
8065 val = tr32(TG3PCI_DMA_RW_CTRL) &
8066 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8067 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8068 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8069 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8071 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8072 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8073 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8074 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8075 /* This value is determined during the probe time DMA
8076 * engine test, tg3_test_dma.
8078 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8081 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8082 GRC_MODE_4X_NIC_SEND_RINGS |
8083 GRC_MODE_NO_TX_PHDR_CSUM |
8084 GRC_MODE_NO_RX_PHDR_CSUM);
8085 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8087 /* Pseudo-header checksum is done by hardware logic and not
8088 * the offload processers, so make the chip do the pseudo-
8089 * header checksums on receive. For transmit it is more
8090 * convenient to do the pseudo-header checksum in software
8091 * as Linux does that on transmit for us in all cases.
8093 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8097 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8099 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8100 val = tr32(GRC_MISC_CFG);
8102 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8103 tw32(GRC_MISC_CFG, val);
8105 /* Initialize MBUF/DESC pool. */
8106 if (tg3_flag(tp, 5750_PLUS)) {
8108 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8109 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8111 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8113 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8114 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8115 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8116 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8119 fw_len = tp->fw_len;
8120 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8121 tw32(BUFMGR_MB_POOL_ADDR,
8122 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8123 tw32(BUFMGR_MB_POOL_SIZE,
8124 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8127 if (tp->dev->mtu <= ETH_DATA_LEN) {
8128 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8129 tp->bufmgr_config.mbuf_read_dma_low_water);
8130 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8131 tp->bufmgr_config.mbuf_mac_rx_low_water);
8132 tw32(BUFMGR_MB_HIGH_WATER,
8133 tp->bufmgr_config.mbuf_high_water);
8135 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8136 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8137 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8138 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8139 tw32(BUFMGR_MB_HIGH_WATER,
8140 tp->bufmgr_config.mbuf_high_water_jumbo);
8142 tw32(BUFMGR_DMA_LOW_WATER,
8143 tp->bufmgr_config.dma_low_water);
8144 tw32(BUFMGR_DMA_HIGH_WATER,
8145 tp->bufmgr_config.dma_high_water);
8147 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8149 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8151 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8152 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8153 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8154 tw32(BUFMGR_MODE, val);
8155 for (i = 0; i < 2000; i++) {
8156 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8161 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8165 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8166 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8168 tg3_setup_rxbd_thresholds(tp);
8170 /* Initialize TG3_BDINFO's at:
8171 * RCVDBDI_STD_BD: standard eth size rx ring
8172 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8173 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8176 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8177 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8178 * ring attribute flags
8179 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8181 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8182 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8184 * The size of each ring is fixed in the firmware, but the location is
8187 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8188 ((u64) tpr->rx_std_mapping >> 32));
8189 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8190 ((u64) tpr->rx_std_mapping & 0xffffffff));
8191 if (!tg3_flag(tp, 5717_PLUS))
8192 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8193 NIC_SRAM_RX_BUFFER_DESC);
8195 /* Disable the mini ring */
8196 if (!tg3_flag(tp, 5705_PLUS))
8197 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8198 BDINFO_FLAGS_DISABLED);
8200 /* Program the jumbo buffer descriptor ring control
8201 * blocks on those devices that have them.
8203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8204 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8206 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8207 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8208 ((u64) tpr->rx_jmb_mapping >> 32));
8209 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8210 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8211 val = TG3_RX_JMB_RING_SIZE(tp) <<
8212 BDINFO_FLAGS_MAXLEN_SHIFT;
8213 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8214 val | BDINFO_FLAGS_USE_EXT_RECV);
8215 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8217 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8218 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8220 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8221 BDINFO_FLAGS_DISABLED);
8224 if (tg3_flag(tp, 57765_PLUS)) {
8225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8226 val = TG3_RX_STD_MAX_SIZE_5700;
8228 val = TG3_RX_STD_MAX_SIZE_5717;
8229 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8230 val |= (TG3_RX_STD_DMA_SZ << 2);
8232 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8234 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8236 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8238 tpr->rx_std_prod_idx = tp->rx_pending;
8239 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8241 tpr->rx_jmb_prod_idx =
8242 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8243 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8245 tg3_rings_reset(tp);
8247 /* Initialize MAC address and backoff seed. */
8248 __tg3_set_mac_addr(tp, 0);
8250 /* MTU + ethernet header + FCS + optional VLAN tag */
8251 tw32(MAC_RX_MTU_SIZE,
8252 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8254 /* The slot time is changed by tg3_setup_phy if we
8255 * run at gigabit with half duplex.
8257 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8258 (6 << TX_LENGTHS_IPG_SHIFT) |
8259 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8262 val |= tr32(MAC_TX_LENGTHS) &
8263 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8264 TX_LENGTHS_CNT_DWN_VAL_MSK);
8266 tw32(MAC_TX_LENGTHS, val);
8268 /* Receive rules. */
8269 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8270 tw32(RCVLPC_CONFIG, 0x0181);
8272 /* Calculate RDMAC_MODE setting early, we need it to determine
8273 * the RCVLPC_STATE_ENABLE mask.
8275 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8276 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8277 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8278 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8279 RDMAC_MODE_LNGREAD_ENAB);
8281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8282 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8284 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8285 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8287 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8288 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8289 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8292 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8293 if (tg3_flag(tp, TSO_CAPABLE) &&
8294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8295 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8296 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8297 !tg3_flag(tp, IS_5788)) {
8298 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8302 if (tg3_flag(tp, PCI_EXPRESS))
8303 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8305 if (tg3_flag(tp, HW_TSO_1) ||
8306 tg3_flag(tp, HW_TSO_2) ||
8307 tg3_flag(tp, HW_TSO_3))
8308 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8310 if (tg3_flag(tp, 57765_PLUS) ||
8311 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8312 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8313 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8315 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8316 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8318 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8319 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8320 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8321 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8322 tg3_flag(tp, 57765_PLUS)) {
8323 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8325 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8326 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8327 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8328 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8329 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8330 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8331 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8333 tw32(TG3_RDMA_RSRVCTRL_REG,
8334 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8337 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8338 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8339 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8340 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8341 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8342 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8345 /* Receive/send statistics. */
8346 if (tg3_flag(tp, 5750_PLUS)) {
8347 val = tr32(RCVLPC_STATS_ENABLE);
8348 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8349 tw32(RCVLPC_STATS_ENABLE, val);
8350 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8351 tg3_flag(tp, TSO_CAPABLE)) {
8352 val = tr32(RCVLPC_STATS_ENABLE);
8353 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8354 tw32(RCVLPC_STATS_ENABLE, val);
8356 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8358 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8359 tw32(SNDDATAI_STATSENAB, 0xffffff);
8360 tw32(SNDDATAI_STATSCTRL,
8361 (SNDDATAI_SCTRL_ENABLE |
8362 SNDDATAI_SCTRL_FASTUPD));
8364 /* Setup host coalescing engine. */
8365 tw32(HOSTCC_MODE, 0);
8366 for (i = 0; i < 2000; i++) {
8367 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8372 __tg3_set_coalesce(tp, &tp->coal);
8374 if (!tg3_flag(tp, 5705_PLUS)) {
8375 /* Status/statistics block address. See tg3_timer,
8376 * the tg3_periodic_fetch_stats call there, and
8377 * tg3_get_stats to see how this works for 5705/5750 chips.
8379 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8380 ((u64) tp->stats_mapping >> 32));
8381 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8382 ((u64) tp->stats_mapping & 0xffffffff));
8383 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8385 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8387 /* Clear statistics and status block memory areas */
8388 for (i = NIC_SRAM_STATS_BLK;
8389 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8391 tg3_write_mem(tp, i, 0);
8396 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8398 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8399 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8400 if (!tg3_flag(tp, 5705_PLUS))
8401 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8403 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8404 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8405 /* reset to prevent losing 1st rx packet intermittently */
8406 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8410 if (tg3_flag(tp, ENABLE_APE))
8411 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8414 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8415 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8416 if (!tg3_flag(tp, 5705_PLUS) &&
8417 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8418 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8419 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8420 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8423 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8424 * If TG3_FLAG_IS_NIC is zero, we should read the
8425 * register to preserve the GPIO settings for LOMs. The GPIOs,
8426 * whether used as inputs or outputs, are set by boot code after
8429 if (!tg3_flag(tp, IS_NIC)) {
8432 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8433 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8434 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8437 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8438 GRC_LCLCTRL_GPIO_OUTPUT3;
8440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8441 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8443 tp->grc_local_ctrl &= ~gpio_mask;
8444 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8446 /* GPIO1 must be driven high for eeprom write protect */
8447 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8448 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8449 GRC_LCLCTRL_GPIO_OUTPUT1);
8451 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8454 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8455 val = tr32(MSGINT_MODE);
8456 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8457 tw32(MSGINT_MODE, val);
8460 if (!tg3_flag(tp, 5705_PLUS)) {
8461 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8465 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8466 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8467 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8468 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8469 WDMAC_MODE_LNGREAD_ENAB);
8471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8472 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8473 if (tg3_flag(tp, TSO_CAPABLE) &&
8474 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8475 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8477 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8478 !tg3_flag(tp, IS_5788)) {
8479 val |= WDMAC_MODE_RX_ACCEL;
8483 /* Enable host coalescing bug fix */
8484 if (tg3_flag(tp, 5755_PLUS))
8485 val |= WDMAC_MODE_STATUS_TAG_FIX;
8487 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8488 val |= WDMAC_MODE_BURST_ALL_DATA;
8490 tw32_f(WDMAC_MODE, val);
8493 if (tg3_flag(tp, PCIX_MODE)) {
8496 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8498 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8499 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8500 pcix_cmd |= PCI_X_CMD_READ_2K;
8501 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8502 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8503 pcix_cmd |= PCI_X_CMD_READ_2K;
8505 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8509 tw32_f(RDMAC_MODE, rdmac_mode);
8512 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8513 if (!tg3_flag(tp, 5705_PLUS))
8514 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8518 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8520 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8522 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8523 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8524 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8525 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8526 val |= RCVDBDI_MODE_LRG_RING_SZ;
8527 tw32(RCVDBDI_MODE, val);
8528 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8529 if (tg3_flag(tp, HW_TSO_1) ||
8530 tg3_flag(tp, HW_TSO_2) ||
8531 tg3_flag(tp, HW_TSO_3))
8532 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8533 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8534 if (tg3_flag(tp, ENABLE_TSS))
8535 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8536 tw32(SNDBDI_MODE, val);
8537 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8539 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8540 err = tg3_load_5701_a0_firmware_fix(tp);
8545 if (tg3_flag(tp, TSO_CAPABLE)) {
8546 err = tg3_load_tso_firmware(tp);
8551 tp->tx_mode = TX_MODE_ENABLE;
8553 if (tg3_flag(tp, 5755_PLUS) ||
8554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8555 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8558 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8559 tp->tx_mode &= ~val;
8560 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8563 tw32_f(MAC_TX_MODE, tp->tx_mode);
8566 if (tg3_flag(tp, ENABLE_RSS)) {
8567 u32 reg = MAC_RSS_INDIR_TBL_0;
8568 u8 *ent = (u8 *)&val;
8570 /* Setup the indirection table */
8571 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8572 int idx = i % sizeof(val);
8574 ent[idx] = i % (tp->irq_cnt - 1);
8575 if (idx == sizeof(val) - 1) {
8581 /* Setup the "secret" hash key. */
8582 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8583 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8584 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8585 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8586 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8587 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8588 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8589 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8590 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8591 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8594 tp->rx_mode = RX_MODE_ENABLE;
8595 if (tg3_flag(tp, 5755_PLUS))
8596 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8598 if (tg3_flag(tp, ENABLE_RSS))
8599 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8600 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8601 RX_MODE_RSS_IPV6_HASH_EN |
8602 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8603 RX_MODE_RSS_IPV4_HASH_EN |
8604 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8606 tw32_f(MAC_RX_MODE, tp->rx_mode);
8609 tw32(MAC_LED_CTRL, tp->led_ctrl);
8611 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8612 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8613 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8616 tw32_f(MAC_RX_MODE, tp->rx_mode);
8619 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8620 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8621 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8622 /* Set drive transmission level to 1.2V */
8623 /* only if the signal pre-emphasis bit is not set */
8624 val = tr32(MAC_SERDES_CFG);
8627 tw32(MAC_SERDES_CFG, val);
8629 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8630 tw32(MAC_SERDES_CFG, 0x616000);
8633 /* Prevent chip from dropping frames when flow control
8636 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8640 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8642 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8643 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8644 /* Use hardware link auto-negotiation */
8645 tg3_flag_set(tp, HW_AUTONEG);
8648 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8649 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8652 tmp = tr32(SERDES_RX_CTRL);
8653 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8654 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8655 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8656 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8659 if (!tg3_flag(tp, USE_PHYLIB)) {
8660 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8661 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8662 tp->link_config.speed = tp->link_config.orig_speed;
8663 tp->link_config.duplex = tp->link_config.orig_duplex;
8664 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8667 err = tg3_setup_phy(tp, 0);
8671 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8672 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8675 /* Clear CRC stats. */
8676 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8677 tg3_writephy(tp, MII_TG3_TEST1,
8678 tmp | MII_TG3_TEST1_CRC_EN);
8679 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8684 __tg3_set_rx_mode(tp->dev);
8686 /* Initialize receive rules. */
8687 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8688 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8689 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8690 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8692 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8696 if (tg3_flag(tp, ENABLE_ASF))
8700 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8702 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8704 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8706 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8708 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8710 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8712 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8714 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8716 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8718 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8720 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8722 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8724 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8726 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8734 if (tg3_flag(tp, ENABLE_APE))
8735 /* Write our heartbeat update interval to APE. */
8736 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8737 APE_HOST_HEARTBEAT_INT_DISABLE);
8739 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8744 /* Called at device open time to get the chip ready for
8745 * packet processing. Invoked with tp->lock held.
8747 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8749 tg3_switch_clocks(tp);
8751 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8753 return tg3_reset_hw(tp, reset_phy);
8756 #define TG3_STAT_ADD32(PSTAT, REG) \
8757 do { u32 __val = tr32(REG); \
8758 (PSTAT)->low += __val; \
8759 if ((PSTAT)->low < __val) \
8760 (PSTAT)->high += 1; \
8763 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8765 struct tg3_hw_stats *sp = tp->hw_stats;
8767 if (!netif_carrier_ok(tp->dev))
8770 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8771 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8772 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8773 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8774 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8775 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8776 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8777 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8778 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8779 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8780 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8781 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8782 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8784 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8785 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8786 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8787 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8788 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8789 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8790 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8791 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8792 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8793 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8794 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8795 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8796 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8797 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8799 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8800 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
8801 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8803 u32 val = tr32(HOSTCC_FLOW_ATTN);
8804 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8806 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8807 sp->rx_discards.low += val;
8808 if (sp->rx_discards.low < val)
8809 sp->rx_discards.high += 1;
8811 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8813 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8816 static void tg3_timer(unsigned long __opaque)
8818 struct tg3 *tp = (struct tg3 *) __opaque;
8823 spin_lock(&tp->lock);
8825 if (!tg3_flag(tp, TAGGED_STATUS)) {
8826 /* All of this garbage is because when using non-tagged
8827 * IRQ status the mailbox/status_block protocol the chip
8828 * uses with the cpu is race prone.
8830 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8831 tw32(GRC_LOCAL_CTRL,
8832 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8834 tw32(HOSTCC_MODE, tp->coalesce_mode |
8835 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8838 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8839 tg3_flag_set(tp, RESTART_TIMER);
8840 spin_unlock(&tp->lock);
8841 schedule_work(&tp->reset_task);
8846 /* This part only runs once per second. */
8847 if (!--tp->timer_counter) {
8848 if (tg3_flag(tp, 5705_PLUS))
8849 tg3_periodic_fetch_stats(tp);
8851 if (tp->setlpicnt && !--tp->setlpicnt)
8852 tg3_phy_eee_enable(tp);
8854 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8858 mac_stat = tr32(MAC_STATUS);
8861 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8862 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8864 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8868 tg3_setup_phy(tp, 0);
8869 } else if (tg3_flag(tp, POLL_SERDES)) {
8870 u32 mac_stat = tr32(MAC_STATUS);
8873 if (netif_carrier_ok(tp->dev) &&
8874 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8877 if (!netif_carrier_ok(tp->dev) &&
8878 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8879 MAC_STATUS_SIGNAL_DET))) {
8883 if (!tp->serdes_counter) {
8886 ~MAC_MODE_PORT_MODE_MASK));
8888 tw32_f(MAC_MODE, tp->mac_mode);
8891 tg3_setup_phy(tp, 0);
8893 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8894 tg3_flag(tp, 5780_CLASS)) {
8895 tg3_serdes_parallel_detect(tp);
8898 tp->timer_counter = tp->timer_multiplier;
8901 /* Heartbeat is only sent once every 2 seconds.
8903 * The heartbeat is to tell the ASF firmware that the host
8904 * driver is still alive. In the event that the OS crashes,
8905 * ASF needs to reset the hardware to free up the FIFO space
8906 * that may be filled with rx packets destined for the host.
8907 * If the FIFO is full, ASF will no longer function properly.
8909 * Unintended resets have been reported on real time kernels
8910 * where the timer doesn't run on time. Netpoll will also have
8913 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8914 * to check the ring condition when the heartbeat is expiring
8915 * before doing the reset. This will prevent most unintended
8918 if (!--tp->asf_counter) {
8919 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8920 tg3_wait_for_event_ack(tp);
8922 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8923 FWCMD_NICDRV_ALIVE3);
8924 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8925 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8926 TG3_FW_UPDATE_TIMEOUT_SEC);
8928 tg3_generate_fw_event(tp);
8930 tp->asf_counter = tp->asf_multiplier;
8933 spin_unlock(&tp->lock);
8936 tp->timer.expires = jiffies + tp->timer_offset;
8937 add_timer(&tp->timer);
8940 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8943 unsigned long flags;
8945 struct tg3_napi *tnapi = &tp->napi[irq_num];
8947 if (tp->irq_cnt == 1)
8948 name = tp->dev->name;
8950 name = &tnapi->irq_lbl[0];
8951 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8952 name[IFNAMSIZ-1] = 0;
8955 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8957 if (tg3_flag(tp, 1SHOT_MSI))
8962 if (tg3_flag(tp, TAGGED_STATUS))
8963 fn = tg3_interrupt_tagged;
8964 flags = IRQF_SHARED;
8967 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8970 static int tg3_test_interrupt(struct tg3 *tp)
8972 struct tg3_napi *tnapi = &tp->napi[0];
8973 struct net_device *dev = tp->dev;
8974 int err, i, intr_ok = 0;
8977 if (!netif_running(dev))
8980 tg3_disable_ints(tp);
8982 free_irq(tnapi->irq_vec, tnapi);
8985 * Turn off MSI one shot mode. Otherwise this test has no
8986 * observable way to know whether the interrupt was delivered.
8988 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
8989 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8990 tw32(MSGINT_MODE, val);
8993 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8994 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8998 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8999 tg3_enable_ints(tp);
9001 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9004 for (i = 0; i < 5; i++) {
9005 u32 int_mbox, misc_host_ctrl;
9007 int_mbox = tr32_mailbox(tnapi->int_mbox);
9008 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9010 if ((int_mbox != 0) ||
9011 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9019 tg3_disable_ints(tp);
9021 free_irq(tnapi->irq_vec, tnapi);
9023 err = tg3_request_irq(tp, 0);
9029 /* Reenable MSI one shot mode. */
9030 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9031 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9032 tw32(MSGINT_MODE, val);
9040 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9041 * successfully restored
9043 static int tg3_test_msi(struct tg3 *tp)
9048 if (!tg3_flag(tp, USING_MSI))
9051 /* Turn off SERR reporting in case MSI terminates with Master
9054 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9055 pci_write_config_word(tp->pdev, PCI_COMMAND,
9056 pci_cmd & ~PCI_COMMAND_SERR);
9058 err = tg3_test_interrupt(tp);
9060 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9065 /* other failures */
9069 /* MSI test failed, go back to INTx mode */
9070 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9071 "to INTx mode. Please report this failure to the PCI "
9072 "maintainer and include system chipset information\n");
9074 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9076 pci_disable_msi(tp->pdev);
9078 tg3_flag_clear(tp, USING_MSI);
9079 tp->napi[0].irq_vec = tp->pdev->irq;
9081 err = tg3_request_irq(tp, 0);
9085 /* Need to reset the chip because the MSI cycle may have terminated
9086 * with Master Abort.
9088 tg3_full_lock(tp, 1);
9090 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9091 err = tg3_init_hw(tp, 1);
9093 tg3_full_unlock(tp);
9096 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9101 static int tg3_request_firmware(struct tg3 *tp)
9103 const __be32 *fw_data;
9105 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9106 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9111 fw_data = (void *)tp->fw->data;
9113 /* Firmware blob starts with version numbers, followed by
9114 * start address and _full_ length including BSS sections
9115 * (which must be longer than the actual data, of course
9118 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9119 if (tp->fw_len < (tp->fw->size - 12)) {
9120 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9121 tp->fw_len, tp->fw_needed);
9122 release_firmware(tp->fw);
9127 /* We no longer need firmware; we have it. */
9128 tp->fw_needed = NULL;
9132 static bool tg3_enable_msix(struct tg3 *tp)
9134 int i, rc, cpus = num_online_cpus();
9135 struct msix_entry msix_ent[tp->irq_max];
9138 /* Just fallback to the simpler MSI mode. */
9142 * We want as many rx rings enabled as there are cpus.
9143 * The first MSIX vector only deals with link interrupts, etc,
9144 * so we add one to the number of vectors we are requesting.
9146 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9148 for (i = 0; i < tp->irq_max; i++) {
9149 msix_ent[i].entry = i;
9150 msix_ent[i].vector = 0;
9153 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9156 } else if (rc != 0) {
9157 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9159 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9164 for (i = 0; i < tp->irq_max; i++)
9165 tp->napi[i].irq_vec = msix_ent[i].vector;
9167 netif_set_real_num_tx_queues(tp->dev, 1);
9168 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9169 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9170 pci_disable_msix(tp->pdev);
9174 if (tp->irq_cnt > 1) {
9175 tg3_flag_set(tp, ENABLE_RSS);
9177 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9178 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9179 tg3_flag_set(tp, ENABLE_TSS);
9180 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9187 static void tg3_ints_init(struct tg3 *tp)
9189 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9190 !tg3_flag(tp, TAGGED_STATUS)) {
9191 /* All MSI supporting chips should support tagged
9192 * status. Assert that this is the case.
9194 netdev_warn(tp->dev,
9195 "MSI without TAGGED_STATUS? Not using MSI\n");
9199 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9200 tg3_flag_set(tp, USING_MSIX);
9201 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9202 tg3_flag_set(tp, USING_MSI);
9204 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9205 u32 msi_mode = tr32(MSGINT_MODE);
9206 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9207 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9208 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9211 if (!tg3_flag(tp, USING_MSIX)) {
9213 tp->napi[0].irq_vec = tp->pdev->irq;
9214 netif_set_real_num_tx_queues(tp->dev, 1);
9215 netif_set_real_num_rx_queues(tp->dev, 1);
9219 static void tg3_ints_fini(struct tg3 *tp)
9221 if (tg3_flag(tp, USING_MSIX))
9222 pci_disable_msix(tp->pdev);
9223 else if (tg3_flag(tp, USING_MSI))
9224 pci_disable_msi(tp->pdev);
9225 tg3_flag_clear(tp, USING_MSI);
9226 tg3_flag_clear(tp, USING_MSIX);
9227 tg3_flag_clear(tp, ENABLE_RSS);
9228 tg3_flag_clear(tp, ENABLE_TSS);
9231 static int tg3_open(struct net_device *dev)
9233 struct tg3 *tp = netdev_priv(dev);
9236 if (tp->fw_needed) {
9237 err = tg3_request_firmware(tp);
9238 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9242 netdev_warn(tp->dev, "TSO capability disabled\n");
9243 tg3_flag_clear(tp, TSO_CAPABLE);
9244 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9245 netdev_notice(tp->dev, "TSO capability restored\n");
9246 tg3_flag_set(tp, TSO_CAPABLE);
9250 netif_carrier_off(tp->dev);
9252 err = tg3_power_up(tp);
9256 tg3_full_lock(tp, 0);
9258 tg3_disable_ints(tp);
9259 tg3_flag_clear(tp, INIT_COMPLETE);
9261 tg3_full_unlock(tp);
9264 * Setup interrupts first so we know how
9265 * many NAPI resources to allocate
9269 /* The placement of this call is tied
9270 * to the setup and use of Host TX descriptors.
9272 err = tg3_alloc_consistent(tp);
9278 tg3_napi_enable(tp);
9280 for (i = 0; i < tp->irq_cnt; i++) {
9281 struct tg3_napi *tnapi = &tp->napi[i];
9282 err = tg3_request_irq(tp, i);
9284 for (i--; i >= 0; i--)
9285 free_irq(tnapi->irq_vec, tnapi);
9293 tg3_full_lock(tp, 0);
9295 err = tg3_init_hw(tp, 1);
9297 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9300 if (tg3_flag(tp, TAGGED_STATUS))
9301 tp->timer_offset = HZ;
9303 tp->timer_offset = HZ / 10;
9305 BUG_ON(tp->timer_offset > HZ);
9306 tp->timer_counter = tp->timer_multiplier =
9307 (HZ / tp->timer_offset);
9308 tp->asf_counter = tp->asf_multiplier =
9309 ((HZ / tp->timer_offset) * 2);
9311 init_timer(&tp->timer);
9312 tp->timer.expires = jiffies + tp->timer_offset;
9313 tp->timer.data = (unsigned long) tp;
9314 tp->timer.function = tg3_timer;
9317 tg3_full_unlock(tp);
9322 if (tg3_flag(tp, USING_MSI)) {
9323 err = tg3_test_msi(tp);
9326 tg3_full_lock(tp, 0);
9327 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9329 tg3_full_unlock(tp);
9334 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9335 u32 val = tr32(PCIE_TRANSACTION_CFG);
9337 tw32(PCIE_TRANSACTION_CFG,
9338 val | PCIE_TRANS_CFG_1SHOT_MSI);
9344 tg3_full_lock(tp, 0);
9346 add_timer(&tp->timer);
9347 tg3_flag_set(tp, INIT_COMPLETE);
9348 tg3_enable_ints(tp);
9350 tg3_full_unlock(tp);
9352 netif_tx_start_all_queues(dev);
9355 * Reset loopback feature if it was turned on while the device was down
9356 * make sure that it's installed properly now.
9358 if (dev->features & NETIF_F_LOOPBACK)
9359 tg3_set_loopback(dev, dev->features);
9364 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9365 struct tg3_napi *tnapi = &tp->napi[i];
9366 free_irq(tnapi->irq_vec, tnapi);
9370 tg3_napi_disable(tp);
9372 tg3_free_consistent(tp);
9379 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9380 struct rtnl_link_stats64 *);
9381 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9383 static int tg3_close(struct net_device *dev)
9386 struct tg3 *tp = netdev_priv(dev);
9388 tg3_napi_disable(tp);
9389 cancel_work_sync(&tp->reset_task);
9391 netif_tx_stop_all_queues(dev);
9393 del_timer_sync(&tp->timer);
9397 tg3_full_lock(tp, 1);
9399 tg3_disable_ints(tp);
9401 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9403 tg3_flag_clear(tp, INIT_COMPLETE);
9405 tg3_full_unlock(tp);
9407 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9408 struct tg3_napi *tnapi = &tp->napi[i];
9409 free_irq(tnapi->irq_vec, tnapi);
9414 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9416 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9417 sizeof(tp->estats_prev));
9421 tg3_free_consistent(tp);
9425 netif_carrier_off(tp->dev);
9430 static inline u64 get_stat64(tg3_stat64_t *val)
9432 return ((u64)val->high << 32) | ((u64)val->low);
9435 static u64 calc_crc_errors(struct tg3 *tp)
9437 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9439 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9440 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9444 spin_lock_bh(&tp->lock);
9445 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9446 tg3_writephy(tp, MII_TG3_TEST1,
9447 val | MII_TG3_TEST1_CRC_EN);
9448 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9451 spin_unlock_bh(&tp->lock);
9453 tp->phy_crc_errors += val;
9455 return tp->phy_crc_errors;
9458 return get_stat64(&hw_stats->rx_fcs_errors);
9461 #define ESTAT_ADD(member) \
9462 estats->member = old_estats->member + \
9463 get_stat64(&hw_stats->member)
9465 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9467 struct tg3_ethtool_stats *estats = &tp->estats;
9468 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9469 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9474 ESTAT_ADD(rx_octets);
9475 ESTAT_ADD(rx_fragments);
9476 ESTAT_ADD(rx_ucast_packets);
9477 ESTAT_ADD(rx_mcast_packets);
9478 ESTAT_ADD(rx_bcast_packets);
9479 ESTAT_ADD(rx_fcs_errors);
9480 ESTAT_ADD(rx_align_errors);
9481 ESTAT_ADD(rx_xon_pause_rcvd);
9482 ESTAT_ADD(rx_xoff_pause_rcvd);
9483 ESTAT_ADD(rx_mac_ctrl_rcvd);
9484 ESTAT_ADD(rx_xoff_entered);
9485 ESTAT_ADD(rx_frame_too_long_errors);
9486 ESTAT_ADD(rx_jabbers);
9487 ESTAT_ADD(rx_undersize_packets);
9488 ESTAT_ADD(rx_in_length_errors);
9489 ESTAT_ADD(rx_out_length_errors);
9490 ESTAT_ADD(rx_64_or_less_octet_packets);
9491 ESTAT_ADD(rx_65_to_127_octet_packets);
9492 ESTAT_ADD(rx_128_to_255_octet_packets);
9493 ESTAT_ADD(rx_256_to_511_octet_packets);
9494 ESTAT_ADD(rx_512_to_1023_octet_packets);
9495 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9496 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9497 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9498 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9499 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9501 ESTAT_ADD(tx_octets);
9502 ESTAT_ADD(tx_collisions);
9503 ESTAT_ADD(tx_xon_sent);
9504 ESTAT_ADD(tx_xoff_sent);
9505 ESTAT_ADD(tx_flow_control);
9506 ESTAT_ADD(tx_mac_errors);
9507 ESTAT_ADD(tx_single_collisions);
9508 ESTAT_ADD(tx_mult_collisions);
9509 ESTAT_ADD(tx_deferred);
9510 ESTAT_ADD(tx_excessive_collisions);
9511 ESTAT_ADD(tx_late_collisions);
9512 ESTAT_ADD(tx_collide_2times);
9513 ESTAT_ADD(tx_collide_3times);
9514 ESTAT_ADD(tx_collide_4times);
9515 ESTAT_ADD(tx_collide_5times);
9516 ESTAT_ADD(tx_collide_6times);
9517 ESTAT_ADD(tx_collide_7times);
9518 ESTAT_ADD(tx_collide_8times);
9519 ESTAT_ADD(tx_collide_9times);
9520 ESTAT_ADD(tx_collide_10times);
9521 ESTAT_ADD(tx_collide_11times);
9522 ESTAT_ADD(tx_collide_12times);
9523 ESTAT_ADD(tx_collide_13times);
9524 ESTAT_ADD(tx_collide_14times);
9525 ESTAT_ADD(tx_collide_15times);
9526 ESTAT_ADD(tx_ucast_packets);
9527 ESTAT_ADD(tx_mcast_packets);
9528 ESTAT_ADD(tx_bcast_packets);
9529 ESTAT_ADD(tx_carrier_sense_errors);
9530 ESTAT_ADD(tx_discards);
9531 ESTAT_ADD(tx_errors);
9533 ESTAT_ADD(dma_writeq_full);
9534 ESTAT_ADD(dma_write_prioq_full);
9535 ESTAT_ADD(rxbds_empty);
9536 ESTAT_ADD(rx_discards);
9537 ESTAT_ADD(rx_errors);
9538 ESTAT_ADD(rx_threshold_hit);
9540 ESTAT_ADD(dma_readq_full);
9541 ESTAT_ADD(dma_read_prioq_full);
9542 ESTAT_ADD(tx_comp_queue_full);
9544 ESTAT_ADD(ring_set_send_prod_index);
9545 ESTAT_ADD(ring_status_update);
9546 ESTAT_ADD(nic_irqs);
9547 ESTAT_ADD(nic_avoided_irqs);
9548 ESTAT_ADD(nic_tx_threshold_hit);
9550 ESTAT_ADD(mbuf_lwm_thresh_hit);
9555 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9556 struct rtnl_link_stats64 *stats)
9558 struct tg3 *tp = netdev_priv(dev);
9559 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9560 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9565 stats->rx_packets = old_stats->rx_packets +
9566 get_stat64(&hw_stats->rx_ucast_packets) +
9567 get_stat64(&hw_stats->rx_mcast_packets) +
9568 get_stat64(&hw_stats->rx_bcast_packets);
9570 stats->tx_packets = old_stats->tx_packets +
9571 get_stat64(&hw_stats->tx_ucast_packets) +
9572 get_stat64(&hw_stats->tx_mcast_packets) +
9573 get_stat64(&hw_stats->tx_bcast_packets);
9575 stats->rx_bytes = old_stats->rx_bytes +
9576 get_stat64(&hw_stats->rx_octets);
9577 stats->tx_bytes = old_stats->tx_bytes +
9578 get_stat64(&hw_stats->tx_octets);
9580 stats->rx_errors = old_stats->rx_errors +
9581 get_stat64(&hw_stats->rx_errors);
9582 stats->tx_errors = old_stats->tx_errors +
9583 get_stat64(&hw_stats->tx_errors) +
9584 get_stat64(&hw_stats->tx_mac_errors) +
9585 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9586 get_stat64(&hw_stats->tx_discards);
9588 stats->multicast = old_stats->multicast +
9589 get_stat64(&hw_stats->rx_mcast_packets);
9590 stats->collisions = old_stats->collisions +
9591 get_stat64(&hw_stats->tx_collisions);
9593 stats->rx_length_errors = old_stats->rx_length_errors +
9594 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9595 get_stat64(&hw_stats->rx_undersize_packets);
9597 stats->rx_over_errors = old_stats->rx_over_errors +
9598 get_stat64(&hw_stats->rxbds_empty);
9599 stats->rx_frame_errors = old_stats->rx_frame_errors +
9600 get_stat64(&hw_stats->rx_align_errors);
9601 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9602 get_stat64(&hw_stats->tx_discards);
9603 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9604 get_stat64(&hw_stats->tx_carrier_sense_errors);
9606 stats->rx_crc_errors = old_stats->rx_crc_errors +
9607 calc_crc_errors(tp);
9609 stats->rx_missed_errors = old_stats->rx_missed_errors +
9610 get_stat64(&hw_stats->rx_discards);
9612 stats->rx_dropped = tp->rx_dropped;
9617 static inline u32 calc_crc(unsigned char *buf, int len)
9625 for (j = 0; j < len; j++) {
9628 for (k = 0; k < 8; k++) {
9641 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9643 /* accept or reject all multicast frames */
9644 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9645 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9646 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9647 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9650 static void __tg3_set_rx_mode(struct net_device *dev)
9652 struct tg3 *tp = netdev_priv(dev);
9655 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9656 RX_MODE_KEEP_VLAN_TAG);
9658 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9659 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9662 if (!tg3_flag(tp, ENABLE_ASF))
9663 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9666 if (dev->flags & IFF_PROMISC) {
9667 /* Promiscuous mode. */
9668 rx_mode |= RX_MODE_PROMISC;
9669 } else if (dev->flags & IFF_ALLMULTI) {
9670 /* Accept all multicast. */
9671 tg3_set_multi(tp, 1);
9672 } else if (netdev_mc_empty(dev)) {
9673 /* Reject all multicast. */
9674 tg3_set_multi(tp, 0);
9676 /* Accept one or more multicast(s). */
9677 struct netdev_hw_addr *ha;
9678 u32 mc_filter[4] = { 0, };
9683 netdev_for_each_mc_addr(ha, dev) {
9684 crc = calc_crc(ha->addr, ETH_ALEN);
9686 regidx = (bit & 0x60) >> 5;
9688 mc_filter[regidx] |= (1 << bit);
9691 tw32(MAC_HASH_REG_0, mc_filter[0]);
9692 tw32(MAC_HASH_REG_1, mc_filter[1]);
9693 tw32(MAC_HASH_REG_2, mc_filter[2]);
9694 tw32(MAC_HASH_REG_3, mc_filter[3]);
9697 if (rx_mode != tp->rx_mode) {
9698 tp->rx_mode = rx_mode;
9699 tw32_f(MAC_RX_MODE, rx_mode);
9704 static void tg3_set_rx_mode(struct net_device *dev)
9706 struct tg3 *tp = netdev_priv(dev);
9708 if (!netif_running(dev))
9711 tg3_full_lock(tp, 0);
9712 __tg3_set_rx_mode(dev);
9713 tg3_full_unlock(tp);
9716 static int tg3_get_regs_len(struct net_device *dev)
9718 return TG3_REG_BLK_SIZE;
9721 static void tg3_get_regs(struct net_device *dev,
9722 struct ethtool_regs *regs, void *_p)
9724 struct tg3 *tp = netdev_priv(dev);
9728 memset(_p, 0, TG3_REG_BLK_SIZE);
9730 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9733 tg3_full_lock(tp, 0);
9735 tg3_dump_legacy_regs(tp, (u32 *)_p);
9737 tg3_full_unlock(tp);
9740 static int tg3_get_eeprom_len(struct net_device *dev)
9742 struct tg3 *tp = netdev_priv(dev);
9744 return tp->nvram_size;
9747 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9749 struct tg3 *tp = netdev_priv(dev);
9752 u32 i, offset, len, b_offset, b_count;
9755 if (tg3_flag(tp, NO_NVRAM))
9758 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9761 offset = eeprom->offset;
9765 eeprom->magic = TG3_EEPROM_MAGIC;
9768 /* adjustments to start on required 4 byte boundary */
9769 b_offset = offset & 3;
9770 b_count = 4 - b_offset;
9771 if (b_count > len) {
9772 /* i.e. offset=1 len=2 */
9775 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9778 memcpy(data, ((char *)&val) + b_offset, b_count);
9781 eeprom->len += b_count;
9784 /* read bytes up to the last 4 byte boundary */
9785 pd = &data[eeprom->len];
9786 for (i = 0; i < (len - (len & 3)); i += 4) {
9787 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9792 memcpy(pd + i, &val, 4);
9797 /* read last bytes not ending on 4 byte boundary */
9798 pd = &data[eeprom->len];
9800 b_offset = offset + len - b_count;
9801 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9804 memcpy(pd, &val, b_count);
9805 eeprom->len += b_count;
9810 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9812 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9814 struct tg3 *tp = netdev_priv(dev);
9816 u32 offset, len, b_offset, odd_len;
9820 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9823 if (tg3_flag(tp, NO_NVRAM) ||
9824 eeprom->magic != TG3_EEPROM_MAGIC)
9827 offset = eeprom->offset;
9830 if ((b_offset = (offset & 3))) {
9831 /* adjustments to start on required 4 byte boundary */
9832 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9843 /* adjustments to end on required 4 byte boundary */
9845 len = (len + 3) & ~3;
9846 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9852 if (b_offset || odd_len) {
9853 buf = kmalloc(len, GFP_KERNEL);
9857 memcpy(buf, &start, 4);
9859 memcpy(buf+len-4, &end, 4);
9860 memcpy(buf + b_offset, data, eeprom->len);
9863 ret = tg3_nvram_write_block(tp, offset, len, buf);
9871 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9873 struct tg3 *tp = netdev_priv(dev);
9875 if (tg3_flag(tp, USE_PHYLIB)) {
9876 struct phy_device *phydev;
9877 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9879 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9880 return phy_ethtool_gset(phydev, cmd);
9883 cmd->supported = (SUPPORTED_Autoneg);
9885 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9886 cmd->supported |= (SUPPORTED_1000baseT_Half |
9887 SUPPORTED_1000baseT_Full);
9889 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9890 cmd->supported |= (SUPPORTED_100baseT_Half |
9891 SUPPORTED_100baseT_Full |
9892 SUPPORTED_10baseT_Half |
9893 SUPPORTED_10baseT_Full |
9895 cmd->port = PORT_TP;
9897 cmd->supported |= SUPPORTED_FIBRE;
9898 cmd->port = PORT_FIBRE;
9901 cmd->advertising = tp->link_config.advertising;
9902 if (netif_running(dev)) {
9903 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9904 cmd->duplex = tp->link_config.active_duplex;
9906 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9907 cmd->duplex = DUPLEX_INVALID;
9909 cmd->phy_address = tp->phy_addr;
9910 cmd->transceiver = XCVR_INTERNAL;
9911 cmd->autoneg = tp->link_config.autoneg;
9917 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9919 struct tg3 *tp = netdev_priv(dev);
9920 u32 speed = ethtool_cmd_speed(cmd);
9922 if (tg3_flag(tp, USE_PHYLIB)) {
9923 struct phy_device *phydev;
9924 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9926 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9927 return phy_ethtool_sset(phydev, cmd);
9930 if (cmd->autoneg != AUTONEG_ENABLE &&
9931 cmd->autoneg != AUTONEG_DISABLE)
9934 if (cmd->autoneg == AUTONEG_DISABLE &&
9935 cmd->duplex != DUPLEX_FULL &&
9936 cmd->duplex != DUPLEX_HALF)
9939 if (cmd->autoneg == AUTONEG_ENABLE) {
9940 u32 mask = ADVERTISED_Autoneg |
9942 ADVERTISED_Asym_Pause;
9944 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9945 mask |= ADVERTISED_1000baseT_Half |
9946 ADVERTISED_1000baseT_Full;
9948 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9949 mask |= ADVERTISED_100baseT_Half |
9950 ADVERTISED_100baseT_Full |
9951 ADVERTISED_10baseT_Half |
9952 ADVERTISED_10baseT_Full |
9955 mask |= ADVERTISED_FIBRE;
9957 if (cmd->advertising & ~mask)
9960 mask &= (ADVERTISED_1000baseT_Half |
9961 ADVERTISED_1000baseT_Full |
9962 ADVERTISED_100baseT_Half |
9963 ADVERTISED_100baseT_Full |
9964 ADVERTISED_10baseT_Half |
9965 ADVERTISED_10baseT_Full);
9967 cmd->advertising &= mask;
9969 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9970 if (speed != SPEED_1000)
9973 if (cmd->duplex != DUPLEX_FULL)
9976 if (speed != SPEED_100 &&
9982 tg3_full_lock(tp, 0);
9984 tp->link_config.autoneg = cmd->autoneg;
9985 if (cmd->autoneg == AUTONEG_ENABLE) {
9986 tp->link_config.advertising = (cmd->advertising |
9987 ADVERTISED_Autoneg);
9988 tp->link_config.speed = SPEED_INVALID;
9989 tp->link_config.duplex = DUPLEX_INVALID;
9991 tp->link_config.advertising = 0;
9992 tp->link_config.speed = speed;
9993 tp->link_config.duplex = cmd->duplex;
9996 tp->link_config.orig_speed = tp->link_config.speed;
9997 tp->link_config.orig_duplex = tp->link_config.duplex;
9998 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10000 if (netif_running(dev))
10001 tg3_setup_phy(tp, 1);
10003 tg3_full_unlock(tp);
10008 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10010 struct tg3 *tp = netdev_priv(dev);
10012 strcpy(info->driver, DRV_MODULE_NAME);
10013 strcpy(info->version, DRV_MODULE_VERSION);
10014 strcpy(info->fw_version, tp->fw_ver);
10015 strcpy(info->bus_info, pci_name(tp->pdev));
10018 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10020 struct tg3 *tp = netdev_priv(dev);
10022 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10023 wol->supported = WAKE_MAGIC;
10025 wol->supported = 0;
10027 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10028 wol->wolopts = WAKE_MAGIC;
10029 memset(&wol->sopass, 0, sizeof(wol->sopass));
10032 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10034 struct tg3 *tp = netdev_priv(dev);
10035 struct device *dp = &tp->pdev->dev;
10037 if (wol->wolopts & ~WAKE_MAGIC)
10039 if ((wol->wolopts & WAKE_MAGIC) &&
10040 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10043 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10045 spin_lock_bh(&tp->lock);
10046 if (device_may_wakeup(dp))
10047 tg3_flag_set(tp, WOL_ENABLE);
10049 tg3_flag_clear(tp, WOL_ENABLE);
10050 spin_unlock_bh(&tp->lock);
10055 static u32 tg3_get_msglevel(struct net_device *dev)
10057 struct tg3 *tp = netdev_priv(dev);
10058 return tp->msg_enable;
10061 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10063 struct tg3 *tp = netdev_priv(dev);
10064 tp->msg_enable = value;
10067 static int tg3_nway_reset(struct net_device *dev)
10069 struct tg3 *tp = netdev_priv(dev);
10072 if (!netif_running(dev))
10075 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10078 if (tg3_flag(tp, USE_PHYLIB)) {
10079 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10081 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10085 spin_lock_bh(&tp->lock);
10087 tg3_readphy(tp, MII_BMCR, &bmcr);
10088 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10089 ((bmcr & BMCR_ANENABLE) ||
10090 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10091 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10095 spin_unlock_bh(&tp->lock);
10101 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10103 struct tg3 *tp = netdev_priv(dev);
10105 ering->rx_max_pending = tp->rx_std_ring_mask;
10106 ering->rx_mini_max_pending = 0;
10107 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10108 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10110 ering->rx_jumbo_max_pending = 0;
10112 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10114 ering->rx_pending = tp->rx_pending;
10115 ering->rx_mini_pending = 0;
10116 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10117 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10119 ering->rx_jumbo_pending = 0;
10121 ering->tx_pending = tp->napi[0].tx_pending;
10124 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10126 struct tg3 *tp = netdev_priv(dev);
10127 int i, irq_sync = 0, err = 0;
10129 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10130 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10131 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10132 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10133 (tg3_flag(tp, TSO_BUG) &&
10134 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10137 if (netif_running(dev)) {
10139 tg3_netif_stop(tp);
10143 tg3_full_lock(tp, irq_sync);
10145 tp->rx_pending = ering->rx_pending;
10147 if (tg3_flag(tp, MAX_RXPEND_64) &&
10148 tp->rx_pending > 63)
10149 tp->rx_pending = 63;
10150 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10152 for (i = 0; i < tp->irq_max; i++)
10153 tp->napi[i].tx_pending = ering->tx_pending;
10155 if (netif_running(dev)) {
10156 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10157 err = tg3_restart_hw(tp, 1);
10159 tg3_netif_start(tp);
10162 tg3_full_unlock(tp);
10164 if (irq_sync && !err)
10170 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10172 struct tg3 *tp = netdev_priv(dev);
10174 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10176 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10177 epause->rx_pause = 1;
10179 epause->rx_pause = 0;
10181 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10182 epause->tx_pause = 1;
10184 epause->tx_pause = 0;
10187 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10189 struct tg3 *tp = netdev_priv(dev);
10192 if (tg3_flag(tp, USE_PHYLIB)) {
10194 struct phy_device *phydev;
10196 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10198 if (!(phydev->supported & SUPPORTED_Pause) ||
10199 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10200 (epause->rx_pause != epause->tx_pause)))
10203 tp->link_config.flowctrl = 0;
10204 if (epause->rx_pause) {
10205 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10207 if (epause->tx_pause) {
10208 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10209 newadv = ADVERTISED_Pause;
10211 newadv = ADVERTISED_Pause |
10212 ADVERTISED_Asym_Pause;
10213 } else if (epause->tx_pause) {
10214 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10215 newadv = ADVERTISED_Asym_Pause;
10219 if (epause->autoneg)
10220 tg3_flag_set(tp, PAUSE_AUTONEG);
10222 tg3_flag_clear(tp, PAUSE_AUTONEG);
10224 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10225 u32 oldadv = phydev->advertising &
10226 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10227 if (oldadv != newadv) {
10228 phydev->advertising &=
10229 ~(ADVERTISED_Pause |
10230 ADVERTISED_Asym_Pause);
10231 phydev->advertising |= newadv;
10232 if (phydev->autoneg) {
10234 * Always renegotiate the link to
10235 * inform our link partner of our
10236 * flow control settings, even if the
10237 * flow control is forced. Let
10238 * tg3_adjust_link() do the final
10239 * flow control setup.
10241 return phy_start_aneg(phydev);
10245 if (!epause->autoneg)
10246 tg3_setup_flow_control(tp, 0, 0);
10248 tp->link_config.orig_advertising &=
10249 ~(ADVERTISED_Pause |
10250 ADVERTISED_Asym_Pause);
10251 tp->link_config.orig_advertising |= newadv;
10256 if (netif_running(dev)) {
10257 tg3_netif_stop(tp);
10261 tg3_full_lock(tp, irq_sync);
10263 if (epause->autoneg)
10264 tg3_flag_set(tp, PAUSE_AUTONEG);
10266 tg3_flag_clear(tp, PAUSE_AUTONEG);
10267 if (epause->rx_pause)
10268 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10270 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10271 if (epause->tx_pause)
10272 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10274 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10276 if (netif_running(dev)) {
10277 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10278 err = tg3_restart_hw(tp, 1);
10280 tg3_netif_start(tp);
10283 tg3_full_unlock(tp);
10289 static int tg3_get_sset_count(struct net_device *dev, int sset)
10293 return TG3_NUM_TEST;
10295 return TG3_NUM_STATS;
10297 return -EOPNOTSUPP;
10301 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10303 switch (stringset) {
10305 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10308 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10311 WARN_ON(1); /* we need a WARN() */
10316 static int tg3_set_phys_id(struct net_device *dev,
10317 enum ethtool_phys_id_state state)
10319 struct tg3 *tp = netdev_priv(dev);
10321 if (!netif_running(tp->dev))
10325 case ETHTOOL_ID_ACTIVE:
10326 return 1; /* cycle on/off once per second */
10328 case ETHTOOL_ID_ON:
10329 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10330 LED_CTRL_1000MBPS_ON |
10331 LED_CTRL_100MBPS_ON |
10332 LED_CTRL_10MBPS_ON |
10333 LED_CTRL_TRAFFIC_OVERRIDE |
10334 LED_CTRL_TRAFFIC_BLINK |
10335 LED_CTRL_TRAFFIC_LED);
10338 case ETHTOOL_ID_OFF:
10339 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10340 LED_CTRL_TRAFFIC_OVERRIDE);
10343 case ETHTOOL_ID_INACTIVE:
10344 tw32(MAC_LED_CTRL, tp->led_ctrl);
10351 static void tg3_get_ethtool_stats(struct net_device *dev,
10352 struct ethtool_stats *estats, u64 *tmp_stats)
10354 struct tg3 *tp = netdev_priv(dev);
10355 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10358 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10362 u32 offset = 0, len = 0;
10365 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10368 if (magic == TG3_EEPROM_MAGIC) {
10369 for (offset = TG3_NVM_DIR_START;
10370 offset < TG3_NVM_DIR_END;
10371 offset += TG3_NVM_DIRENT_SIZE) {
10372 if (tg3_nvram_read(tp, offset, &val))
10375 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10376 TG3_NVM_DIRTYPE_EXTVPD)
10380 if (offset != TG3_NVM_DIR_END) {
10381 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10382 if (tg3_nvram_read(tp, offset + 4, &offset))
10385 offset = tg3_nvram_logical_addr(tp, offset);
10389 if (!offset || !len) {
10390 offset = TG3_NVM_VPD_OFF;
10391 len = TG3_NVM_VPD_LEN;
10394 buf = kmalloc(len, GFP_KERNEL);
10398 if (magic == TG3_EEPROM_MAGIC) {
10399 for (i = 0; i < len; i += 4) {
10400 /* The data is in little-endian format in NVRAM.
10401 * Use the big-endian read routines to preserve
10402 * the byte order as it exists in NVRAM.
10404 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10410 unsigned int pos = 0;
10412 ptr = (u8 *)&buf[0];
10413 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10414 cnt = pci_read_vpd(tp->pdev, pos,
10416 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10432 #define NVRAM_TEST_SIZE 0x100
10433 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10434 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10435 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10436 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10437 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10439 static int tg3_test_nvram(struct tg3 *tp)
10443 int i, j, k, err = 0, size;
10445 if (tg3_flag(tp, NO_NVRAM))
10448 if (tg3_nvram_read(tp, 0, &magic) != 0)
10451 if (magic == TG3_EEPROM_MAGIC)
10452 size = NVRAM_TEST_SIZE;
10453 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10454 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10455 TG3_EEPROM_SB_FORMAT_1) {
10456 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10457 case TG3_EEPROM_SB_REVISION_0:
10458 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10460 case TG3_EEPROM_SB_REVISION_2:
10461 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10463 case TG3_EEPROM_SB_REVISION_3:
10464 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10471 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10472 size = NVRAM_SELFBOOT_HW_SIZE;
10476 buf = kmalloc(size, GFP_KERNEL);
10481 for (i = 0, j = 0; i < size; i += 4, j++) {
10482 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10489 /* Selfboot format */
10490 magic = be32_to_cpu(buf[0]);
10491 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10492 TG3_EEPROM_MAGIC_FW) {
10493 u8 *buf8 = (u8 *) buf, csum8 = 0;
10495 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10496 TG3_EEPROM_SB_REVISION_2) {
10497 /* For rev 2, the csum doesn't include the MBA. */
10498 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10500 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10503 for (i = 0; i < size; i++)
10516 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10517 TG3_EEPROM_MAGIC_HW) {
10518 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10519 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10520 u8 *buf8 = (u8 *) buf;
10522 /* Separate the parity bits and the data bytes. */
10523 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10524 if ((i == 0) || (i == 8)) {
10528 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10529 parity[k++] = buf8[i] & msk;
10531 } else if (i == 16) {
10535 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10536 parity[k++] = buf8[i] & msk;
10539 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10540 parity[k++] = buf8[i] & msk;
10543 data[j++] = buf8[i];
10547 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10548 u8 hw8 = hweight8(data[i]);
10550 if ((hw8 & 0x1) && parity[i])
10552 else if (!(hw8 & 0x1) && !parity[i])
10561 /* Bootstrap checksum at offset 0x10 */
10562 csum = calc_crc((unsigned char *) buf, 0x10);
10563 if (csum != le32_to_cpu(buf[0x10/4]))
10566 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10567 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10568 if (csum != le32_to_cpu(buf[0xfc/4]))
10573 buf = tg3_vpd_readblock(tp);
10577 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10578 PCI_VPD_LRDT_RO_DATA);
10580 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10584 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10587 i += PCI_VPD_LRDT_TAG_SIZE;
10588 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10589 PCI_VPD_RO_KEYWORD_CHKSUM);
10593 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10595 for (i = 0; i <= j; i++)
10596 csum8 += ((u8 *)buf)[i];
10610 #define TG3_SERDES_TIMEOUT_SEC 2
10611 #define TG3_COPPER_TIMEOUT_SEC 6
10613 static int tg3_test_link(struct tg3 *tp)
10617 if (!netif_running(tp->dev))
10620 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10621 max = TG3_SERDES_TIMEOUT_SEC;
10623 max = TG3_COPPER_TIMEOUT_SEC;
10625 for (i = 0; i < max; i++) {
10626 if (netif_carrier_ok(tp->dev))
10629 if (msleep_interruptible(1000))
10636 /* Only test the commonly used registers */
10637 static int tg3_test_registers(struct tg3 *tp)
10639 int i, is_5705, is_5750;
10640 u32 offset, read_mask, write_mask, val, save_val, read_val;
10644 #define TG3_FL_5705 0x1
10645 #define TG3_FL_NOT_5705 0x2
10646 #define TG3_FL_NOT_5788 0x4
10647 #define TG3_FL_NOT_5750 0x8
10651 /* MAC Control Registers */
10652 { MAC_MODE, TG3_FL_NOT_5705,
10653 0x00000000, 0x00ef6f8c },
10654 { MAC_MODE, TG3_FL_5705,
10655 0x00000000, 0x01ef6b8c },
10656 { MAC_STATUS, TG3_FL_NOT_5705,
10657 0x03800107, 0x00000000 },
10658 { MAC_STATUS, TG3_FL_5705,
10659 0x03800100, 0x00000000 },
10660 { MAC_ADDR_0_HIGH, 0x0000,
10661 0x00000000, 0x0000ffff },
10662 { MAC_ADDR_0_LOW, 0x0000,
10663 0x00000000, 0xffffffff },
10664 { MAC_RX_MTU_SIZE, 0x0000,
10665 0x00000000, 0x0000ffff },
10666 { MAC_TX_MODE, 0x0000,
10667 0x00000000, 0x00000070 },
10668 { MAC_TX_LENGTHS, 0x0000,
10669 0x00000000, 0x00003fff },
10670 { MAC_RX_MODE, TG3_FL_NOT_5705,
10671 0x00000000, 0x000007fc },
10672 { MAC_RX_MODE, TG3_FL_5705,
10673 0x00000000, 0x000007dc },
10674 { MAC_HASH_REG_0, 0x0000,
10675 0x00000000, 0xffffffff },
10676 { MAC_HASH_REG_1, 0x0000,
10677 0x00000000, 0xffffffff },
10678 { MAC_HASH_REG_2, 0x0000,
10679 0x00000000, 0xffffffff },
10680 { MAC_HASH_REG_3, 0x0000,
10681 0x00000000, 0xffffffff },
10683 /* Receive Data and Receive BD Initiator Control Registers. */
10684 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10685 0x00000000, 0xffffffff },
10686 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10687 0x00000000, 0xffffffff },
10688 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10689 0x00000000, 0x00000003 },
10690 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10691 0x00000000, 0xffffffff },
10692 { RCVDBDI_STD_BD+0, 0x0000,
10693 0x00000000, 0xffffffff },
10694 { RCVDBDI_STD_BD+4, 0x0000,
10695 0x00000000, 0xffffffff },
10696 { RCVDBDI_STD_BD+8, 0x0000,
10697 0x00000000, 0xffff0002 },
10698 { RCVDBDI_STD_BD+0xc, 0x0000,
10699 0x00000000, 0xffffffff },
10701 /* Receive BD Initiator Control Registers. */
10702 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10703 0x00000000, 0xffffffff },
10704 { RCVBDI_STD_THRESH, TG3_FL_5705,
10705 0x00000000, 0x000003ff },
10706 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10707 0x00000000, 0xffffffff },
10709 /* Host Coalescing Control Registers. */
10710 { HOSTCC_MODE, TG3_FL_NOT_5705,
10711 0x00000000, 0x00000004 },
10712 { HOSTCC_MODE, TG3_FL_5705,
10713 0x00000000, 0x000000f6 },
10714 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10715 0x00000000, 0xffffffff },
10716 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10717 0x00000000, 0x000003ff },
10718 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10719 0x00000000, 0xffffffff },
10720 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10721 0x00000000, 0x000003ff },
10722 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10723 0x00000000, 0xffffffff },
10724 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10725 0x00000000, 0x000000ff },
10726 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10727 0x00000000, 0xffffffff },
10728 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10729 0x00000000, 0x000000ff },
10730 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10731 0x00000000, 0xffffffff },
10732 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10733 0x00000000, 0xffffffff },
10734 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10735 0x00000000, 0xffffffff },
10736 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10737 0x00000000, 0x000000ff },
10738 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10739 0x00000000, 0xffffffff },
10740 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10741 0x00000000, 0x000000ff },
10742 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10743 0x00000000, 0xffffffff },
10744 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10745 0x00000000, 0xffffffff },
10746 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10747 0x00000000, 0xffffffff },
10748 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10749 0x00000000, 0xffffffff },
10750 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10751 0x00000000, 0xffffffff },
10752 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10753 0xffffffff, 0x00000000 },
10754 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10755 0xffffffff, 0x00000000 },
10757 /* Buffer Manager Control Registers. */
10758 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10759 0x00000000, 0x007fff80 },
10760 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10761 0x00000000, 0x007fffff },
10762 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10763 0x00000000, 0x0000003f },
10764 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10765 0x00000000, 0x000001ff },
10766 { BUFMGR_MB_HIGH_WATER, 0x0000,
10767 0x00000000, 0x000001ff },
10768 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10769 0xffffffff, 0x00000000 },
10770 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10771 0xffffffff, 0x00000000 },
10773 /* Mailbox Registers */
10774 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10775 0x00000000, 0x000001ff },
10776 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10777 0x00000000, 0x000001ff },
10778 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10779 0x00000000, 0x000007ff },
10780 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10781 0x00000000, 0x000001ff },
10783 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10786 is_5705 = is_5750 = 0;
10787 if (tg3_flag(tp, 5705_PLUS)) {
10789 if (tg3_flag(tp, 5750_PLUS))
10793 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10794 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10797 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10800 if (tg3_flag(tp, IS_5788) &&
10801 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10804 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10807 offset = (u32) reg_tbl[i].offset;
10808 read_mask = reg_tbl[i].read_mask;
10809 write_mask = reg_tbl[i].write_mask;
10811 /* Save the original register content */
10812 save_val = tr32(offset);
10814 /* Determine the read-only value. */
10815 read_val = save_val & read_mask;
10817 /* Write zero to the register, then make sure the read-only bits
10818 * are not changed and the read/write bits are all zeros.
10822 val = tr32(offset);
10824 /* Test the read-only and read/write bits. */
10825 if (((val & read_mask) != read_val) || (val & write_mask))
10828 /* Write ones to all the bits defined by RdMask and WrMask, then
10829 * make sure the read-only bits are not changed and the
10830 * read/write bits are all ones.
10832 tw32(offset, read_mask | write_mask);
10834 val = tr32(offset);
10836 /* Test the read-only bits. */
10837 if ((val & read_mask) != read_val)
10840 /* Test the read/write bits. */
10841 if ((val & write_mask) != write_mask)
10844 tw32(offset, save_val);
10850 if (netif_msg_hw(tp))
10851 netdev_err(tp->dev,
10852 "Register test failed at offset %x\n", offset);
10853 tw32(offset, save_val);
10857 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10859 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10863 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10864 for (j = 0; j < len; j += 4) {
10867 tg3_write_mem(tp, offset + j, test_pattern[i]);
10868 tg3_read_mem(tp, offset + j, &val);
10869 if (val != test_pattern[i])
10876 static int tg3_test_memory(struct tg3 *tp)
10878 static struct mem_entry {
10881 } mem_tbl_570x[] = {
10882 { 0x00000000, 0x00b50},
10883 { 0x00002000, 0x1c000},
10884 { 0xffffffff, 0x00000}
10885 }, mem_tbl_5705[] = {
10886 { 0x00000100, 0x0000c},
10887 { 0x00000200, 0x00008},
10888 { 0x00004000, 0x00800},
10889 { 0x00006000, 0x01000},
10890 { 0x00008000, 0x02000},
10891 { 0x00010000, 0x0e000},
10892 { 0xffffffff, 0x00000}
10893 }, mem_tbl_5755[] = {
10894 { 0x00000200, 0x00008},
10895 { 0x00004000, 0x00800},
10896 { 0x00006000, 0x00800},
10897 { 0x00008000, 0x02000},
10898 { 0x00010000, 0x0c000},
10899 { 0xffffffff, 0x00000}
10900 }, mem_tbl_5906[] = {
10901 { 0x00000200, 0x00008},
10902 { 0x00004000, 0x00400},
10903 { 0x00006000, 0x00400},
10904 { 0x00008000, 0x01000},
10905 { 0x00010000, 0x01000},
10906 { 0xffffffff, 0x00000}
10907 }, mem_tbl_5717[] = {
10908 { 0x00000200, 0x00008},
10909 { 0x00010000, 0x0a000},
10910 { 0x00020000, 0x13c00},
10911 { 0xffffffff, 0x00000}
10912 }, mem_tbl_57765[] = {
10913 { 0x00000200, 0x00008},
10914 { 0x00004000, 0x00800},
10915 { 0x00006000, 0x09800},
10916 { 0x00010000, 0x0a000},
10917 { 0xffffffff, 0x00000}
10919 struct mem_entry *mem_tbl;
10923 if (tg3_flag(tp, 5717_PLUS))
10924 mem_tbl = mem_tbl_5717;
10925 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10926 mem_tbl = mem_tbl_57765;
10927 else if (tg3_flag(tp, 5755_PLUS))
10928 mem_tbl = mem_tbl_5755;
10929 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10930 mem_tbl = mem_tbl_5906;
10931 else if (tg3_flag(tp, 5705_PLUS))
10932 mem_tbl = mem_tbl_5705;
10934 mem_tbl = mem_tbl_570x;
10936 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10937 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10945 #define TG3_MAC_LOOPBACK 0
10946 #define TG3_PHY_LOOPBACK 1
10947 #define TG3_TSO_LOOPBACK 2
10949 #define TG3_TSO_MSS 500
10951 #define TG3_TSO_IP_HDR_LEN 20
10952 #define TG3_TSO_TCP_HDR_LEN 20
10953 #define TG3_TSO_TCP_OPT_LEN 12
10955 static const u8 tg3_tso_header[] = {
10957 0x45, 0x00, 0x00, 0x00,
10958 0x00, 0x00, 0x40, 0x00,
10959 0x40, 0x06, 0x00, 0x00,
10960 0x0a, 0x00, 0x00, 0x01,
10961 0x0a, 0x00, 0x00, 0x02,
10962 0x0d, 0x00, 0xe0, 0x00,
10963 0x00, 0x00, 0x01, 0x00,
10964 0x00, 0x00, 0x02, 0x00,
10965 0x80, 0x10, 0x10, 0x00,
10966 0x14, 0x09, 0x00, 0x00,
10967 0x01, 0x01, 0x08, 0x0a,
10968 0x11, 0x11, 0x11, 0x11,
10969 0x11, 0x11, 0x11, 0x11,
10972 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10974 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10975 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10976 struct sk_buff *skb, *rx_skb;
10979 int num_pkts, tx_len, rx_len, i, err;
10980 struct tg3_rx_buffer_desc *desc;
10981 struct tg3_napi *tnapi, *rnapi;
10982 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10984 tnapi = &tp->napi[0];
10985 rnapi = &tp->napi[0];
10986 if (tp->irq_cnt > 1) {
10987 if (tg3_flag(tp, ENABLE_RSS))
10988 rnapi = &tp->napi[1];
10989 if (tg3_flag(tp, ENABLE_TSS))
10990 tnapi = &tp->napi[1];
10992 coal_now = tnapi->coal_now | rnapi->coal_now;
10994 if (loopback_mode == TG3_MAC_LOOPBACK) {
10995 /* HW errata - mac loopback fails in some cases on 5780.
10996 * Normal traffic and PHY loopback are not affected by
10997 * errata. Also, the MAC loopback test is deprecated for
10998 * all newer ASIC revisions.
11000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11001 tg3_flag(tp, CPMU_PRESENT))
11004 mac_mode = tp->mac_mode &
11005 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11006 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11007 if (!tg3_flag(tp, 5705_PLUS))
11008 mac_mode |= MAC_MODE_LINK_POLARITY;
11009 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11010 mac_mode |= MAC_MODE_PORT_MODE_MII;
11012 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11013 tw32(MAC_MODE, mac_mode);
11015 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11016 tg3_phy_fet_toggle_apd(tp, false);
11017 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11019 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11021 tg3_phy_toggle_automdix(tp, 0);
11023 tg3_writephy(tp, MII_BMCR, val);
11026 mac_mode = tp->mac_mode &
11027 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11028 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11029 tg3_writephy(tp, MII_TG3_FET_PTEST,
11030 MII_TG3_FET_PTEST_FRC_TX_LINK |
11031 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11032 /* The write needs to be flushed for the AC131 */
11033 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11034 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11035 mac_mode |= MAC_MODE_PORT_MODE_MII;
11037 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11039 /* reset to prevent losing 1st rx packet intermittently */
11040 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11041 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11043 tw32_f(MAC_RX_MODE, tp->rx_mode);
11045 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11046 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11047 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11048 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11049 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11050 mac_mode |= MAC_MODE_LINK_POLARITY;
11051 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11052 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11054 tw32(MAC_MODE, mac_mode);
11056 /* Wait for link */
11057 for (i = 0; i < 100; i++) {
11058 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11067 skb = netdev_alloc_skb(tp->dev, tx_len);
11071 tx_data = skb_put(skb, tx_len);
11072 memcpy(tx_data, tp->dev->dev_addr, 6);
11073 memset(tx_data + 6, 0x0, 8);
11075 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11077 if (loopback_mode == TG3_TSO_LOOPBACK) {
11078 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11080 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11081 TG3_TSO_TCP_OPT_LEN;
11083 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11084 sizeof(tg3_tso_header));
11087 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11088 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11090 /* Set the total length field in the IP header */
11091 iph->tot_len = htons((u16)(mss + hdr_len));
11093 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11094 TXD_FLAG_CPU_POST_DMA);
11096 if (tg3_flag(tp, HW_TSO_1) ||
11097 tg3_flag(tp, HW_TSO_2) ||
11098 tg3_flag(tp, HW_TSO_3)) {
11100 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11101 th = (struct tcphdr *)&tx_data[val];
11104 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11106 if (tg3_flag(tp, HW_TSO_3)) {
11107 mss |= (hdr_len & 0xc) << 12;
11108 if (hdr_len & 0x10)
11109 base_flags |= 0x00000010;
11110 base_flags |= (hdr_len & 0x3e0) << 5;
11111 } else if (tg3_flag(tp, HW_TSO_2))
11112 mss |= hdr_len << 9;
11113 else if (tg3_flag(tp, HW_TSO_1) ||
11114 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11115 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11117 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11120 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11123 data_off = ETH_HLEN;
11126 for (i = data_off; i < tx_len; i++)
11127 tx_data[i] = (u8) (i & 0xff);
11129 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11130 if (pci_dma_mapping_error(tp->pdev, map)) {
11131 dev_kfree_skb(skb);
11135 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11140 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11142 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11143 base_flags, (mss << 1) | 1);
11147 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11148 tr32_mailbox(tnapi->prodmbox);
11152 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11153 for (i = 0; i < 35; i++) {
11154 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11159 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11160 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11161 if ((tx_idx == tnapi->tx_prod) &&
11162 (rx_idx == (rx_start_idx + num_pkts)))
11166 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11167 dev_kfree_skb(skb);
11169 if (tx_idx != tnapi->tx_prod)
11172 if (rx_idx != rx_start_idx + num_pkts)
11176 while (rx_idx != rx_start_idx) {
11177 desc = &rnapi->rx_rcb[rx_start_idx++];
11178 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11179 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11181 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11182 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11185 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11188 if (loopback_mode != TG3_TSO_LOOPBACK) {
11189 if (rx_len != tx_len)
11192 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11193 if (opaque_key != RXD_OPAQUE_RING_STD)
11196 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11199 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11200 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11201 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11205 if (opaque_key == RXD_OPAQUE_RING_STD) {
11206 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11207 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11209 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11210 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11211 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11216 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11217 PCI_DMA_FROMDEVICE);
11219 for (i = data_off; i < rx_len; i++, val++) {
11220 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11227 /* tg3_free_rings will unmap and free the rx_skb */
11232 #define TG3_STD_LOOPBACK_FAILED 1
11233 #define TG3_JMB_LOOPBACK_FAILED 2
11234 #define TG3_TSO_LOOPBACK_FAILED 4
11236 #define TG3_MAC_LOOPBACK_SHIFT 0
11237 #define TG3_PHY_LOOPBACK_SHIFT 4
11238 #define TG3_LOOPBACK_FAILED 0x00000077
11240 static int tg3_test_loopback(struct tg3 *tp)
11243 u32 eee_cap, cpmuctrl = 0;
11245 if (!netif_running(tp->dev))
11246 return TG3_LOOPBACK_FAILED;
11248 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11249 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11251 err = tg3_reset_hw(tp, 1);
11253 err = TG3_LOOPBACK_FAILED;
11257 if (tg3_flag(tp, ENABLE_RSS)) {
11260 /* Reroute all rx packets to the 1st queue */
11261 for (i = MAC_RSS_INDIR_TBL_0;
11262 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11266 /* Turn off gphy autopowerdown. */
11267 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11268 tg3_phy_toggle_apd(tp, false);
11270 if (tg3_flag(tp, CPMU_PRESENT)) {
11274 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11276 /* Wait for up to 40 microseconds to acquire lock. */
11277 for (i = 0; i < 4; i++) {
11278 status = tr32(TG3_CPMU_MUTEX_GNT);
11279 if (status == CPMU_MUTEX_GNT_DRIVER)
11284 if (status != CPMU_MUTEX_GNT_DRIVER) {
11285 err = TG3_LOOPBACK_FAILED;
11289 /* Turn off link-based power management. */
11290 cpmuctrl = tr32(TG3_CPMU_CTRL);
11291 tw32(TG3_CPMU_CTRL,
11292 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11293 CPMU_CTRL_LINK_AWARE_MODE));
11296 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11297 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11299 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11300 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11301 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11303 if (tg3_flag(tp, CPMU_PRESENT)) {
11304 tw32(TG3_CPMU_CTRL, cpmuctrl);
11306 /* Release the mutex */
11307 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11310 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11311 !tg3_flag(tp, USE_PHYLIB)) {
11312 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11313 err |= TG3_STD_LOOPBACK_FAILED <<
11314 TG3_PHY_LOOPBACK_SHIFT;
11315 if (tg3_flag(tp, TSO_CAPABLE) &&
11316 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11317 err |= TG3_TSO_LOOPBACK_FAILED <<
11318 TG3_PHY_LOOPBACK_SHIFT;
11319 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11320 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11321 err |= TG3_JMB_LOOPBACK_FAILED <<
11322 TG3_PHY_LOOPBACK_SHIFT;
11325 /* Re-enable gphy autopowerdown. */
11326 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11327 tg3_phy_toggle_apd(tp, true);
11330 tp->phy_flags |= eee_cap;
11335 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11338 struct tg3 *tp = netdev_priv(dev);
11340 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11343 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11345 if (tg3_test_nvram(tp) != 0) {
11346 etest->flags |= ETH_TEST_FL_FAILED;
11349 if (tg3_test_link(tp) != 0) {
11350 etest->flags |= ETH_TEST_FL_FAILED;
11353 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11354 int err, err2 = 0, irq_sync = 0;
11356 if (netif_running(dev)) {
11358 tg3_netif_stop(tp);
11362 tg3_full_lock(tp, irq_sync);
11364 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11365 err = tg3_nvram_lock(tp);
11366 tg3_halt_cpu(tp, RX_CPU_BASE);
11367 if (!tg3_flag(tp, 5705_PLUS))
11368 tg3_halt_cpu(tp, TX_CPU_BASE);
11370 tg3_nvram_unlock(tp);
11372 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11375 if (tg3_test_registers(tp) != 0) {
11376 etest->flags |= ETH_TEST_FL_FAILED;
11379 if (tg3_test_memory(tp) != 0) {
11380 etest->flags |= ETH_TEST_FL_FAILED;
11383 if ((data[4] = tg3_test_loopback(tp)) != 0)
11384 etest->flags |= ETH_TEST_FL_FAILED;
11386 tg3_full_unlock(tp);
11388 if (tg3_test_interrupt(tp) != 0) {
11389 etest->flags |= ETH_TEST_FL_FAILED;
11393 tg3_full_lock(tp, 0);
11395 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11396 if (netif_running(dev)) {
11397 tg3_flag_set(tp, INIT_COMPLETE);
11398 err2 = tg3_restart_hw(tp, 1);
11400 tg3_netif_start(tp);
11403 tg3_full_unlock(tp);
11405 if (irq_sync && !err2)
11408 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11409 tg3_power_down(tp);
11413 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11415 struct mii_ioctl_data *data = if_mii(ifr);
11416 struct tg3 *tp = netdev_priv(dev);
11419 if (tg3_flag(tp, USE_PHYLIB)) {
11420 struct phy_device *phydev;
11421 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11423 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11424 return phy_mii_ioctl(phydev, ifr, cmd);
11429 data->phy_id = tp->phy_addr;
11432 case SIOCGMIIREG: {
11435 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11436 break; /* We have no PHY */
11438 if (!netif_running(dev))
11441 spin_lock_bh(&tp->lock);
11442 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11443 spin_unlock_bh(&tp->lock);
11445 data->val_out = mii_regval;
11451 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11452 break; /* We have no PHY */
11454 if (!netif_running(dev))
11457 spin_lock_bh(&tp->lock);
11458 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11459 spin_unlock_bh(&tp->lock);
11467 return -EOPNOTSUPP;
11470 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11472 struct tg3 *tp = netdev_priv(dev);
11474 memcpy(ec, &tp->coal, sizeof(*ec));
11478 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11480 struct tg3 *tp = netdev_priv(dev);
11481 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11482 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11484 if (!tg3_flag(tp, 5705_PLUS)) {
11485 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11486 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11487 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11488 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11491 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11492 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11493 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11494 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11495 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11496 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11497 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11498 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11499 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11500 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11503 /* No rx interrupts will be generated if both are zero */
11504 if ((ec->rx_coalesce_usecs == 0) &&
11505 (ec->rx_max_coalesced_frames == 0))
11508 /* No tx interrupts will be generated if both are zero */
11509 if ((ec->tx_coalesce_usecs == 0) &&
11510 (ec->tx_max_coalesced_frames == 0))
11513 /* Only copy relevant parameters, ignore all others. */
11514 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11515 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11516 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11517 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11518 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11519 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11520 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11521 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11522 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11524 if (netif_running(dev)) {
11525 tg3_full_lock(tp, 0);
11526 __tg3_set_coalesce(tp, &tp->coal);
11527 tg3_full_unlock(tp);
11532 static const struct ethtool_ops tg3_ethtool_ops = {
11533 .get_settings = tg3_get_settings,
11534 .set_settings = tg3_set_settings,
11535 .get_drvinfo = tg3_get_drvinfo,
11536 .get_regs_len = tg3_get_regs_len,
11537 .get_regs = tg3_get_regs,
11538 .get_wol = tg3_get_wol,
11539 .set_wol = tg3_set_wol,
11540 .get_msglevel = tg3_get_msglevel,
11541 .set_msglevel = tg3_set_msglevel,
11542 .nway_reset = tg3_nway_reset,
11543 .get_link = ethtool_op_get_link,
11544 .get_eeprom_len = tg3_get_eeprom_len,
11545 .get_eeprom = tg3_get_eeprom,
11546 .set_eeprom = tg3_set_eeprom,
11547 .get_ringparam = tg3_get_ringparam,
11548 .set_ringparam = tg3_set_ringparam,
11549 .get_pauseparam = tg3_get_pauseparam,
11550 .set_pauseparam = tg3_set_pauseparam,
11551 .self_test = tg3_self_test,
11552 .get_strings = tg3_get_strings,
11553 .set_phys_id = tg3_set_phys_id,
11554 .get_ethtool_stats = tg3_get_ethtool_stats,
11555 .get_coalesce = tg3_get_coalesce,
11556 .set_coalesce = tg3_set_coalesce,
11557 .get_sset_count = tg3_get_sset_count,
11560 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11562 u32 cursize, val, magic;
11564 tp->nvram_size = EEPROM_CHIP_SIZE;
11566 if (tg3_nvram_read(tp, 0, &magic) != 0)
11569 if ((magic != TG3_EEPROM_MAGIC) &&
11570 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11571 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11575 * Size the chip by reading offsets at increasing powers of two.
11576 * When we encounter our validation signature, we know the addressing
11577 * has wrapped around, and thus have our chip size.
11581 while (cursize < tp->nvram_size) {
11582 if (tg3_nvram_read(tp, cursize, &val) != 0)
11591 tp->nvram_size = cursize;
11594 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11598 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11601 /* Selfboot format */
11602 if (val != TG3_EEPROM_MAGIC) {
11603 tg3_get_eeprom_size(tp);
11607 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11609 /* This is confusing. We want to operate on the
11610 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11611 * call will read from NVRAM and byteswap the data
11612 * according to the byteswapping settings for all
11613 * other register accesses. This ensures the data we
11614 * want will always reside in the lower 16-bits.
11615 * However, the data in NVRAM is in LE format, which
11616 * means the data from the NVRAM read will always be
11617 * opposite the endianness of the CPU. The 16-bit
11618 * byteswap then brings the data to CPU endianness.
11620 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11624 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11627 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11631 nvcfg1 = tr32(NVRAM_CFG1);
11632 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11633 tg3_flag_set(tp, FLASH);
11635 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11636 tw32(NVRAM_CFG1, nvcfg1);
11639 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11640 tg3_flag(tp, 5780_CLASS)) {
11641 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11642 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11643 tp->nvram_jedecnum = JEDEC_ATMEL;
11644 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11645 tg3_flag_set(tp, NVRAM_BUFFERED);
11647 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11648 tp->nvram_jedecnum = JEDEC_ATMEL;
11649 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11651 case FLASH_VENDOR_ATMEL_EEPROM:
11652 tp->nvram_jedecnum = JEDEC_ATMEL;
11653 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11654 tg3_flag_set(tp, NVRAM_BUFFERED);
11656 case FLASH_VENDOR_ST:
11657 tp->nvram_jedecnum = JEDEC_ST;
11658 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11659 tg3_flag_set(tp, NVRAM_BUFFERED);
11661 case FLASH_VENDOR_SAIFUN:
11662 tp->nvram_jedecnum = JEDEC_SAIFUN;
11663 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11665 case FLASH_VENDOR_SST_SMALL:
11666 case FLASH_VENDOR_SST_LARGE:
11667 tp->nvram_jedecnum = JEDEC_SST;
11668 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11672 tp->nvram_jedecnum = JEDEC_ATMEL;
11673 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11674 tg3_flag_set(tp, NVRAM_BUFFERED);
11678 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11680 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11681 case FLASH_5752PAGE_SIZE_256:
11682 tp->nvram_pagesize = 256;
11684 case FLASH_5752PAGE_SIZE_512:
11685 tp->nvram_pagesize = 512;
11687 case FLASH_5752PAGE_SIZE_1K:
11688 tp->nvram_pagesize = 1024;
11690 case FLASH_5752PAGE_SIZE_2K:
11691 tp->nvram_pagesize = 2048;
11693 case FLASH_5752PAGE_SIZE_4K:
11694 tp->nvram_pagesize = 4096;
11696 case FLASH_5752PAGE_SIZE_264:
11697 tp->nvram_pagesize = 264;
11699 case FLASH_5752PAGE_SIZE_528:
11700 tp->nvram_pagesize = 528;
11705 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11709 nvcfg1 = tr32(NVRAM_CFG1);
11711 /* NVRAM protection for TPM */
11712 if (nvcfg1 & (1 << 27))
11713 tg3_flag_set(tp, PROTECTED_NVRAM);
11715 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11716 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11717 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11718 tp->nvram_jedecnum = JEDEC_ATMEL;
11719 tg3_flag_set(tp, NVRAM_BUFFERED);
11721 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11722 tp->nvram_jedecnum = JEDEC_ATMEL;
11723 tg3_flag_set(tp, NVRAM_BUFFERED);
11724 tg3_flag_set(tp, FLASH);
11726 case FLASH_5752VENDOR_ST_M45PE10:
11727 case FLASH_5752VENDOR_ST_M45PE20:
11728 case FLASH_5752VENDOR_ST_M45PE40:
11729 tp->nvram_jedecnum = JEDEC_ST;
11730 tg3_flag_set(tp, NVRAM_BUFFERED);
11731 tg3_flag_set(tp, FLASH);
11735 if (tg3_flag(tp, FLASH)) {
11736 tg3_nvram_get_pagesize(tp, nvcfg1);
11738 /* For eeprom, set pagesize to maximum eeprom size */
11739 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11741 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11742 tw32(NVRAM_CFG1, nvcfg1);
11746 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11748 u32 nvcfg1, protect = 0;
11750 nvcfg1 = tr32(NVRAM_CFG1);
11752 /* NVRAM protection for TPM */
11753 if (nvcfg1 & (1 << 27)) {
11754 tg3_flag_set(tp, PROTECTED_NVRAM);
11758 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11760 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11761 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11762 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11763 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11764 tp->nvram_jedecnum = JEDEC_ATMEL;
11765 tg3_flag_set(tp, NVRAM_BUFFERED);
11766 tg3_flag_set(tp, FLASH);
11767 tp->nvram_pagesize = 264;
11768 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11769 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11770 tp->nvram_size = (protect ? 0x3e200 :
11771 TG3_NVRAM_SIZE_512KB);
11772 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11773 tp->nvram_size = (protect ? 0x1f200 :
11774 TG3_NVRAM_SIZE_256KB);
11776 tp->nvram_size = (protect ? 0x1f200 :
11777 TG3_NVRAM_SIZE_128KB);
11779 case FLASH_5752VENDOR_ST_M45PE10:
11780 case FLASH_5752VENDOR_ST_M45PE20:
11781 case FLASH_5752VENDOR_ST_M45PE40:
11782 tp->nvram_jedecnum = JEDEC_ST;
11783 tg3_flag_set(tp, NVRAM_BUFFERED);
11784 tg3_flag_set(tp, FLASH);
11785 tp->nvram_pagesize = 256;
11786 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11787 tp->nvram_size = (protect ?
11788 TG3_NVRAM_SIZE_64KB :
11789 TG3_NVRAM_SIZE_128KB);
11790 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11791 tp->nvram_size = (protect ?
11792 TG3_NVRAM_SIZE_64KB :
11793 TG3_NVRAM_SIZE_256KB);
11795 tp->nvram_size = (protect ?
11796 TG3_NVRAM_SIZE_128KB :
11797 TG3_NVRAM_SIZE_512KB);
11802 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11806 nvcfg1 = tr32(NVRAM_CFG1);
11808 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11809 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11810 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11811 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11812 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11813 tp->nvram_jedecnum = JEDEC_ATMEL;
11814 tg3_flag_set(tp, NVRAM_BUFFERED);
11815 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11817 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11818 tw32(NVRAM_CFG1, nvcfg1);
11820 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11821 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11822 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11823 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11824 tp->nvram_jedecnum = JEDEC_ATMEL;
11825 tg3_flag_set(tp, NVRAM_BUFFERED);
11826 tg3_flag_set(tp, FLASH);
11827 tp->nvram_pagesize = 264;
11829 case FLASH_5752VENDOR_ST_M45PE10:
11830 case FLASH_5752VENDOR_ST_M45PE20:
11831 case FLASH_5752VENDOR_ST_M45PE40:
11832 tp->nvram_jedecnum = JEDEC_ST;
11833 tg3_flag_set(tp, NVRAM_BUFFERED);
11834 tg3_flag_set(tp, FLASH);
11835 tp->nvram_pagesize = 256;
11840 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11842 u32 nvcfg1, protect = 0;
11844 nvcfg1 = tr32(NVRAM_CFG1);
11846 /* NVRAM protection for TPM */
11847 if (nvcfg1 & (1 << 27)) {
11848 tg3_flag_set(tp, PROTECTED_NVRAM);
11852 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11854 case FLASH_5761VENDOR_ATMEL_ADB021D:
11855 case FLASH_5761VENDOR_ATMEL_ADB041D:
11856 case FLASH_5761VENDOR_ATMEL_ADB081D:
11857 case FLASH_5761VENDOR_ATMEL_ADB161D:
11858 case FLASH_5761VENDOR_ATMEL_MDB021D:
11859 case FLASH_5761VENDOR_ATMEL_MDB041D:
11860 case FLASH_5761VENDOR_ATMEL_MDB081D:
11861 case FLASH_5761VENDOR_ATMEL_MDB161D:
11862 tp->nvram_jedecnum = JEDEC_ATMEL;
11863 tg3_flag_set(tp, NVRAM_BUFFERED);
11864 tg3_flag_set(tp, FLASH);
11865 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11866 tp->nvram_pagesize = 256;
11868 case FLASH_5761VENDOR_ST_A_M45PE20:
11869 case FLASH_5761VENDOR_ST_A_M45PE40:
11870 case FLASH_5761VENDOR_ST_A_M45PE80:
11871 case FLASH_5761VENDOR_ST_A_M45PE16:
11872 case FLASH_5761VENDOR_ST_M_M45PE20:
11873 case FLASH_5761VENDOR_ST_M_M45PE40:
11874 case FLASH_5761VENDOR_ST_M_M45PE80:
11875 case FLASH_5761VENDOR_ST_M_M45PE16:
11876 tp->nvram_jedecnum = JEDEC_ST;
11877 tg3_flag_set(tp, NVRAM_BUFFERED);
11878 tg3_flag_set(tp, FLASH);
11879 tp->nvram_pagesize = 256;
11884 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11887 case FLASH_5761VENDOR_ATMEL_ADB161D:
11888 case FLASH_5761VENDOR_ATMEL_MDB161D:
11889 case FLASH_5761VENDOR_ST_A_M45PE16:
11890 case FLASH_5761VENDOR_ST_M_M45PE16:
11891 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11893 case FLASH_5761VENDOR_ATMEL_ADB081D:
11894 case FLASH_5761VENDOR_ATMEL_MDB081D:
11895 case FLASH_5761VENDOR_ST_A_M45PE80:
11896 case FLASH_5761VENDOR_ST_M_M45PE80:
11897 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11899 case FLASH_5761VENDOR_ATMEL_ADB041D:
11900 case FLASH_5761VENDOR_ATMEL_MDB041D:
11901 case FLASH_5761VENDOR_ST_A_M45PE40:
11902 case FLASH_5761VENDOR_ST_M_M45PE40:
11903 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11905 case FLASH_5761VENDOR_ATMEL_ADB021D:
11906 case FLASH_5761VENDOR_ATMEL_MDB021D:
11907 case FLASH_5761VENDOR_ST_A_M45PE20:
11908 case FLASH_5761VENDOR_ST_M_M45PE20:
11909 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11915 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11917 tp->nvram_jedecnum = JEDEC_ATMEL;
11918 tg3_flag_set(tp, NVRAM_BUFFERED);
11919 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11922 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11926 nvcfg1 = tr32(NVRAM_CFG1);
11928 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11929 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11930 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11931 tp->nvram_jedecnum = JEDEC_ATMEL;
11932 tg3_flag_set(tp, NVRAM_BUFFERED);
11933 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11935 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11936 tw32(NVRAM_CFG1, nvcfg1);
11938 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11939 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11940 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11941 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11942 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11943 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11944 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11945 tp->nvram_jedecnum = JEDEC_ATMEL;
11946 tg3_flag_set(tp, NVRAM_BUFFERED);
11947 tg3_flag_set(tp, FLASH);
11949 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11950 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11951 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11952 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11953 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11955 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11956 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11957 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11959 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11960 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11961 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11965 case FLASH_5752VENDOR_ST_M45PE10:
11966 case FLASH_5752VENDOR_ST_M45PE20:
11967 case FLASH_5752VENDOR_ST_M45PE40:
11968 tp->nvram_jedecnum = JEDEC_ST;
11969 tg3_flag_set(tp, NVRAM_BUFFERED);
11970 tg3_flag_set(tp, FLASH);
11972 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11973 case FLASH_5752VENDOR_ST_M45PE10:
11974 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11976 case FLASH_5752VENDOR_ST_M45PE20:
11977 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11979 case FLASH_5752VENDOR_ST_M45PE40:
11980 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11985 tg3_flag_set(tp, NO_NVRAM);
11989 tg3_nvram_get_pagesize(tp, nvcfg1);
11990 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11991 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11995 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11999 nvcfg1 = tr32(NVRAM_CFG1);
12001 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12002 case FLASH_5717VENDOR_ATMEL_EEPROM:
12003 case FLASH_5717VENDOR_MICRO_EEPROM:
12004 tp->nvram_jedecnum = JEDEC_ATMEL;
12005 tg3_flag_set(tp, NVRAM_BUFFERED);
12006 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12008 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12009 tw32(NVRAM_CFG1, nvcfg1);
12011 case FLASH_5717VENDOR_ATMEL_MDB011D:
12012 case FLASH_5717VENDOR_ATMEL_ADB011B:
12013 case FLASH_5717VENDOR_ATMEL_ADB011D:
12014 case FLASH_5717VENDOR_ATMEL_MDB021D:
12015 case FLASH_5717VENDOR_ATMEL_ADB021B:
12016 case FLASH_5717VENDOR_ATMEL_ADB021D:
12017 case FLASH_5717VENDOR_ATMEL_45USPT:
12018 tp->nvram_jedecnum = JEDEC_ATMEL;
12019 tg3_flag_set(tp, NVRAM_BUFFERED);
12020 tg3_flag_set(tp, FLASH);
12022 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12023 case FLASH_5717VENDOR_ATMEL_MDB021D:
12024 /* Detect size with tg3_nvram_get_size() */
12026 case FLASH_5717VENDOR_ATMEL_ADB021B:
12027 case FLASH_5717VENDOR_ATMEL_ADB021D:
12028 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12031 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12035 case FLASH_5717VENDOR_ST_M_M25PE10:
12036 case FLASH_5717VENDOR_ST_A_M25PE10:
12037 case FLASH_5717VENDOR_ST_M_M45PE10:
12038 case FLASH_5717VENDOR_ST_A_M45PE10:
12039 case FLASH_5717VENDOR_ST_M_M25PE20:
12040 case FLASH_5717VENDOR_ST_A_M25PE20:
12041 case FLASH_5717VENDOR_ST_M_M45PE20:
12042 case FLASH_5717VENDOR_ST_A_M45PE20:
12043 case FLASH_5717VENDOR_ST_25USPT:
12044 case FLASH_5717VENDOR_ST_45USPT:
12045 tp->nvram_jedecnum = JEDEC_ST;
12046 tg3_flag_set(tp, NVRAM_BUFFERED);
12047 tg3_flag_set(tp, FLASH);
12049 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12050 case FLASH_5717VENDOR_ST_M_M25PE20:
12051 case FLASH_5717VENDOR_ST_M_M45PE20:
12052 /* Detect size with tg3_nvram_get_size() */
12054 case FLASH_5717VENDOR_ST_A_M25PE20:
12055 case FLASH_5717VENDOR_ST_A_M45PE20:
12056 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12059 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12064 tg3_flag_set(tp, NO_NVRAM);
12068 tg3_nvram_get_pagesize(tp, nvcfg1);
12069 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12070 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12073 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12075 u32 nvcfg1, nvmpinstrp;
12077 nvcfg1 = tr32(NVRAM_CFG1);
12078 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12080 switch (nvmpinstrp) {
12081 case FLASH_5720_EEPROM_HD:
12082 case FLASH_5720_EEPROM_LD:
12083 tp->nvram_jedecnum = JEDEC_ATMEL;
12084 tg3_flag_set(tp, NVRAM_BUFFERED);
12086 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12087 tw32(NVRAM_CFG1, nvcfg1);
12088 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12089 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12091 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12093 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12094 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12095 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12096 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12097 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12098 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12099 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12100 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12101 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12102 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12103 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12104 case FLASH_5720VENDOR_ATMEL_45USPT:
12105 tp->nvram_jedecnum = JEDEC_ATMEL;
12106 tg3_flag_set(tp, NVRAM_BUFFERED);
12107 tg3_flag_set(tp, FLASH);
12109 switch (nvmpinstrp) {
12110 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12111 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12112 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12113 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12115 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12116 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12117 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12118 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12120 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12121 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12122 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12125 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12129 case FLASH_5720VENDOR_M_ST_M25PE10:
12130 case FLASH_5720VENDOR_M_ST_M45PE10:
12131 case FLASH_5720VENDOR_A_ST_M25PE10:
12132 case FLASH_5720VENDOR_A_ST_M45PE10:
12133 case FLASH_5720VENDOR_M_ST_M25PE20:
12134 case FLASH_5720VENDOR_M_ST_M45PE20:
12135 case FLASH_5720VENDOR_A_ST_M25PE20:
12136 case FLASH_5720VENDOR_A_ST_M45PE20:
12137 case FLASH_5720VENDOR_M_ST_M25PE40:
12138 case FLASH_5720VENDOR_M_ST_M45PE40:
12139 case FLASH_5720VENDOR_A_ST_M25PE40:
12140 case FLASH_5720VENDOR_A_ST_M45PE40:
12141 case FLASH_5720VENDOR_M_ST_M25PE80:
12142 case FLASH_5720VENDOR_M_ST_M45PE80:
12143 case FLASH_5720VENDOR_A_ST_M25PE80:
12144 case FLASH_5720VENDOR_A_ST_M45PE80:
12145 case FLASH_5720VENDOR_ST_25USPT:
12146 case FLASH_5720VENDOR_ST_45USPT:
12147 tp->nvram_jedecnum = JEDEC_ST;
12148 tg3_flag_set(tp, NVRAM_BUFFERED);
12149 tg3_flag_set(tp, FLASH);
12151 switch (nvmpinstrp) {
12152 case FLASH_5720VENDOR_M_ST_M25PE20:
12153 case FLASH_5720VENDOR_M_ST_M45PE20:
12154 case FLASH_5720VENDOR_A_ST_M25PE20:
12155 case FLASH_5720VENDOR_A_ST_M45PE20:
12156 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12158 case FLASH_5720VENDOR_M_ST_M25PE40:
12159 case FLASH_5720VENDOR_M_ST_M45PE40:
12160 case FLASH_5720VENDOR_A_ST_M25PE40:
12161 case FLASH_5720VENDOR_A_ST_M45PE40:
12162 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12164 case FLASH_5720VENDOR_M_ST_M25PE80:
12165 case FLASH_5720VENDOR_M_ST_M45PE80:
12166 case FLASH_5720VENDOR_A_ST_M25PE80:
12167 case FLASH_5720VENDOR_A_ST_M45PE80:
12168 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12171 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12176 tg3_flag_set(tp, NO_NVRAM);
12180 tg3_nvram_get_pagesize(tp, nvcfg1);
12181 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12182 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12185 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12186 static void __devinit tg3_nvram_init(struct tg3 *tp)
12188 tw32_f(GRC_EEPROM_ADDR,
12189 (EEPROM_ADDR_FSM_RESET |
12190 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12191 EEPROM_ADDR_CLKPERD_SHIFT)));
12195 /* Enable seeprom accesses. */
12196 tw32_f(GRC_LOCAL_CTRL,
12197 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12200 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12201 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12202 tg3_flag_set(tp, NVRAM);
12204 if (tg3_nvram_lock(tp)) {
12205 netdev_warn(tp->dev,
12206 "Cannot get nvram lock, %s failed\n",
12210 tg3_enable_nvram_access(tp);
12212 tp->nvram_size = 0;
12214 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12215 tg3_get_5752_nvram_info(tp);
12216 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12217 tg3_get_5755_nvram_info(tp);
12218 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12221 tg3_get_5787_nvram_info(tp);
12222 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12223 tg3_get_5761_nvram_info(tp);
12224 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12225 tg3_get_5906_nvram_info(tp);
12226 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12228 tg3_get_57780_nvram_info(tp);
12229 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12230 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12231 tg3_get_5717_nvram_info(tp);
12232 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12233 tg3_get_5720_nvram_info(tp);
12235 tg3_get_nvram_info(tp);
12237 if (tp->nvram_size == 0)
12238 tg3_get_nvram_size(tp);
12240 tg3_disable_nvram_access(tp);
12241 tg3_nvram_unlock(tp);
12244 tg3_flag_clear(tp, NVRAM);
12245 tg3_flag_clear(tp, NVRAM_BUFFERED);
12247 tg3_get_eeprom_size(tp);
12251 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12252 u32 offset, u32 len, u8 *buf)
12257 for (i = 0; i < len; i += 4) {
12263 memcpy(&data, buf + i, 4);
12266 * The SEEPROM interface expects the data to always be opposite
12267 * the native endian format. We accomplish this by reversing
12268 * all the operations that would have been performed on the
12269 * data from a call to tg3_nvram_read_be32().
12271 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12273 val = tr32(GRC_EEPROM_ADDR);
12274 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12276 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12278 tw32(GRC_EEPROM_ADDR, val |
12279 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12280 (addr & EEPROM_ADDR_ADDR_MASK) |
12281 EEPROM_ADDR_START |
12282 EEPROM_ADDR_WRITE);
12284 for (j = 0; j < 1000; j++) {
12285 val = tr32(GRC_EEPROM_ADDR);
12287 if (val & EEPROM_ADDR_COMPLETE)
12291 if (!(val & EEPROM_ADDR_COMPLETE)) {
12300 /* offset and length are dword aligned */
12301 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12305 u32 pagesize = tp->nvram_pagesize;
12306 u32 pagemask = pagesize - 1;
12310 tmp = kmalloc(pagesize, GFP_KERNEL);
12316 u32 phy_addr, page_off, size;
12318 phy_addr = offset & ~pagemask;
12320 for (j = 0; j < pagesize; j += 4) {
12321 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12322 (__be32 *) (tmp + j));
12329 page_off = offset & pagemask;
12336 memcpy(tmp + page_off, buf, size);
12338 offset = offset + (pagesize - page_off);
12340 tg3_enable_nvram_access(tp);
12343 * Before we can erase the flash page, we need
12344 * to issue a special "write enable" command.
12346 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12348 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12351 /* Erase the target page */
12352 tw32(NVRAM_ADDR, phy_addr);
12354 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12355 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12357 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12360 /* Issue another write enable to start the write. */
12361 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12363 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12366 for (j = 0; j < pagesize; j += 4) {
12369 data = *((__be32 *) (tmp + j));
12371 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12373 tw32(NVRAM_ADDR, phy_addr + j);
12375 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12379 nvram_cmd |= NVRAM_CMD_FIRST;
12380 else if (j == (pagesize - 4))
12381 nvram_cmd |= NVRAM_CMD_LAST;
12383 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12390 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12391 tg3_nvram_exec_cmd(tp, nvram_cmd);
12398 /* offset and length are dword aligned */
12399 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12404 for (i = 0; i < len; i += 4, offset += 4) {
12405 u32 page_off, phy_addr, nvram_cmd;
12408 memcpy(&data, buf + i, 4);
12409 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12411 page_off = offset % tp->nvram_pagesize;
12413 phy_addr = tg3_nvram_phys_addr(tp, offset);
12415 tw32(NVRAM_ADDR, phy_addr);
12417 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12419 if (page_off == 0 || i == 0)
12420 nvram_cmd |= NVRAM_CMD_FIRST;
12421 if (page_off == (tp->nvram_pagesize - 4))
12422 nvram_cmd |= NVRAM_CMD_LAST;
12424 if (i == (len - 4))
12425 nvram_cmd |= NVRAM_CMD_LAST;
12427 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12428 !tg3_flag(tp, 5755_PLUS) &&
12429 (tp->nvram_jedecnum == JEDEC_ST) &&
12430 (nvram_cmd & NVRAM_CMD_FIRST)) {
12432 if ((ret = tg3_nvram_exec_cmd(tp,
12433 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12438 if (!tg3_flag(tp, FLASH)) {
12439 /* We always do complete word writes to eeprom. */
12440 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12443 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12449 /* offset and length are dword aligned */
12450 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12454 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12455 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12456 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12460 if (!tg3_flag(tp, NVRAM)) {
12461 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12465 ret = tg3_nvram_lock(tp);
12469 tg3_enable_nvram_access(tp);
12470 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12471 tw32(NVRAM_WRITE1, 0x406);
12473 grc_mode = tr32(GRC_MODE);
12474 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12476 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12477 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12480 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12484 grc_mode = tr32(GRC_MODE);
12485 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12487 tg3_disable_nvram_access(tp);
12488 tg3_nvram_unlock(tp);
12491 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12492 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12499 struct subsys_tbl_ent {
12500 u16 subsys_vendor, subsys_devid;
12504 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12505 /* Broadcom boards. */
12506 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12507 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12508 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12509 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12510 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12511 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12512 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12513 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12514 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12515 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12516 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12517 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12518 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12519 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12520 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12521 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12522 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12523 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12524 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12525 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12526 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12527 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12530 { TG3PCI_SUBVENDOR_ID_3COM,
12531 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12532 { TG3PCI_SUBVENDOR_ID_3COM,
12533 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12534 { TG3PCI_SUBVENDOR_ID_3COM,
12535 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12536 { TG3PCI_SUBVENDOR_ID_3COM,
12537 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12538 { TG3PCI_SUBVENDOR_ID_3COM,
12539 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12542 { TG3PCI_SUBVENDOR_ID_DELL,
12543 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12544 { TG3PCI_SUBVENDOR_ID_DELL,
12545 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12546 { TG3PCI_SUBVENDOR_ID_DELL,
12547 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12548 { TG3PCI_SUBVENDOR_ID_DELL,
12549 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12551 /* Compaq boards. */
12552 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12553 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12554 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12555 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12556 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12557 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12558 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12559 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12560 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12561 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12564 { TG3PCI_SUBVENDOR_ID_IBM,
12565 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12568 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12572 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12573 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12574 tp->pdev->subsystem_vendor) &&
12575 (subsys_id_to_phy_id[i].subsys_devid ==
12576 tp->pdev->subsystem_device))
12577 return &subsys_id_to_phy_id[i];
12582 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12587 /* On some early chips the SRAM cannot be accessed in D3hot state,
12588 * so need make sure we're in D0.
12590 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12591 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12592 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12595 /* Make sure register accesses (indirect or otherwise)
12596 * will function correctly.
12598 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12599 tp->misc_host_ctrl);
12601 /* The memory arbiter has to be enabled in order for SRAM accesses
12602 * to succeed. Normally on powerup the tg3 chip firmware will make
12603 * sure it is enabled, but other entities such as system netboot
12604 * code might disable it.
12606 val = tr32(MEMARB_MODE);
12607 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12609 tp->phy_id = TG3_PHY_ID_INVALID;
12610 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12612 /* Assume an onboard device and WOL capable by default. */
12613 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12614 tg3_flag_set(tp, WOL_CAP);
12616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12617 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12618 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12619 tg3_flag_set(tp, IS_NIC);
12621 val = tr32(VCPU_CFGSHDW);
12622 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12623 tg3_flag_set(tp, ASPM_WORKAROUND);
12624 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12625 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12626 tg3_flag_set(tp, WOL_ENABLE);
12627 device_set_wakeup_enable(&tp->pdev->dev, true);
12632 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12633 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12634 u32 nic_cfg, led_cfg;
12635 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12636 int eeprom_phy_serdes = 0;
12638 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12639 tp->nic_sram_data_cfg = nic_cfg;
12641 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12642 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12643 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12644 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12645 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12646 (ver > 0) && (ver < 0x100))
12647 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12649 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12650 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12652 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12653 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12654 eeprom_phy_serdes = 1;
12656 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12657 if (nic_phy_id != 0) {
12658 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12659 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12661 eeprom_phy_id = (id1 >> 16) << 10;
12662 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12663 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12667 tp->phy_id = eeprom_phy_id;
12668 if (eeprom_phy_serdes) {
12669 if (!tg3_flag(tp, 5705_PLUS))
12670 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12672 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12675 if (tg3_flag(tp, 5750_PLUS))
12676 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12677 SHASTA_EXT_LED_MODE_MASK);
12679 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12683 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12684 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12687 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12688 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12691 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12692 tp->led_ctrl = LED_CTRL_MODE_MAC;
12694 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12695 * read on some older 5700/5701 bootcode.
12697 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12699 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12701 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12705 case SHASTA_EXT_LED_SHARED:
12706 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12707 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12708 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12709 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12710 LED_CTRL_MODE_PHY_2);
12713 case SHASTA_EXT_LED_MAC:
12714 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12717 case SHASTA_EXT_LED_COMBO:
12718 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12719 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12720 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12721 LED_CTRL_MODE_PHY_2);
12726 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12727 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12728 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12729 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12731 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12732 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12734 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12735 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12736 if ((tp->pdev->subsystem_vendor ==
12737 PCI_VENDOR_ID_ARIMA) &&
12738 (tp->pdev->subsystem_device == 0x205a ||
12739 tp->pdev->subsystem_device == 0x2063))
12740 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12742 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12743 tg3_flag_set(tp, IS_NIC);
12746 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12747 tg3_flag_set(tp, ENABLE_ASF);
12748 if (tg3_flag(tp, 5750_PLUS))
12749 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12752 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12753 tg3_flag(tp, 5750_PLUS))
12754 tg3_flag_set(tp, ENABLE_APE);
12756 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12757 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12758 tg3_flag_clear(tp, WOL_CAP);
12760 if (tg3_flag(tp, WOL_CAP) &&
12761 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12762 tg3_flag_set(tp, WOL_ENABLE);
12763 device_set_wakeup_enable(&tp->pdev->dev, true);
12766 if (cfg2 & (1 << 17))
12767 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12769 /* serdes signal pre-emphasis in register 0x590 set by */
12770 /* bootcode if bit 18 is set */
12771 if (cfg2 & (1 << 18))
12772 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12774 if ((tg3_flag(tp, 57765_PLUS) ||
12775 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12776 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12777 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12778 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12780 if (tg3_flag(tp, PCI_EXPRESS) &&
12781 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12782 !tg3_flag(tp, 57765_PLUS)) {
12785 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12786 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12787 tg3_flag_set(tp, ASPM_WORKAROUND);
12790 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12791 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12792 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12793 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12794 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12795 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12798 if (tg3_flag(tp, WOL_CAP))
12799 device_set_wakeup_enable(&tp->pdev->dev,
12800 tg3_flag(tp, WOL_ENABLE));
12802 device_set_wakeup_capable(&tp->pdev->dev, false);
12805 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12810 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12811 tw32(OTP_CTRL, cmd);
12813 /* Wait for up to 1 ms for command to execute. */
12814 for (i = 0; i < 100; i++) {
12815 val = tr32(OTP_STATUS);
12816 if (val & OTP_STATUS_CMD_DONE)
12821 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12824 /* Read the gphy configuration from the OTP region of the chip. The gphy
12825 * configuration is a 32-bit value that straddles the alignment boundary.
12826 * We do two 32-bit reads and then shift and merge the results.
12828 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12830 u32 bhalf_otp, thalf_otp;
12832 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12834 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12837 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12839 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12842 thalf_otp = tr32(OTP_READ_DATA);
12844 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12846 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12849 bhalf_otp = tr32(OTP_READ_DATA);
12851 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12854 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12856 u32 adv = ADVERTISED_Autoneg |
12859 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12860 adv |= ADVERTISED_1000baseT_Half |
12861 ADVERTISED_1000baseT_Full;
12863 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12864 adv |= ADVERTISED_100baseT_Half |
12865 ADVERTISED_100baseT_Full |
12866 ADVERTISED_10baseT_Half |
12867 ADVERTISED_10baseT_Full |
12870 adv |= ADVERTISED_FIBRE;
12872 tp->link_config.advertising = adv;
12873 tp->link_config.speed = SPEED_INVALID;
12874 tp->link_config.duplex = DUPLEX_INVALID;
12875 tp->link_config.autoneg = AUTONEG_ENABLE;
12876 tp->link_config.active_speed = SPEED_INVALID;
12877 tp->link_config.active_duplex = DUPLEX_INVALID;
12878 tp->link_config.orig_speed = SPEED_INVALID;
12879 tp->link_config.orig_duplex = DUPLEX_INVALID;
12880 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12883 static int __devinit tg3_phy_probe(struct tg3 *tp)
12885 u32 hw_phy_id_1, hw_phy_id_2;
12886 u32 hw_phy_id, hw_phy_id_masked;
12889 /* flow control autonegotiation is default behavior */
12890 tg3_flag_set(tp, PAUSE_AUTONEG);
12891 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12893 if (tg3_flag(tp, USE_PHYLIB))
12894 return tg3_phy_init(tp);
12896 /* Reading the PHY ID register can conflict with ASF
12897 * firmware access to the PHY hardware.
12900 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12901 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12903 /* Now read the physical PHY_ID from the chip and verify
12904 * that it is sane. If it doesn't look good, we fall back
12905 * to either the hard-coded table based PHY_ID and failing
12906 * that the value found in the eeprom area.
12908 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12909 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12911 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12912 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12913 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12915 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12918 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12919 tp->phy_id = hw_phy_id;
12920 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12921 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12923 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12925 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12926 /* Do nothing, phy ID already set up in
12927 * tg3_get_eeprom_hw_cfg().
12930 struct subsys_tbl_ent *p;
12932 /* No eeprom signature? Try the hardcoded
12933 * subsys device table.
12935 p = tg3_lookup_by_subsys(tp);
12939 tp->phy_id = p->phy_id;
12941 tp->phy_id == TG3_PHY_ID_BCM8002)
12942 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12946 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12947 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12948 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12949 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12950 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12951 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12953 tg3_phy_init_link_config(tp);
12955 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12956 !tg3_flag(tp, ENABLE_APE) &&
12957 !tg3_flag(tp, ENABLE_ASF)) {
12960 tg3_readphy(tp, MII_BMSR, &bmsr);
12961 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12962 (bmsr & BMSR_LSTATUS))
12963 goto skip_phy_reset;
12965 err = tg3_phy_reset(tp);
12969 tg3_phy_set_wirespeed(tp);
12971 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12972 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12973 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12974 if (!tg3_copper_is_advertising_all(tp, mask)) {
12975 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
12976 tp->link_config.flowctrl);
12978 tg3_writephy(tp, MII_BMCR,
12979 BMCR_ANENABLE | BMCR_ANRESTART);
12984 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12985 err = tg3_init_5401phy_dsp(tp);
12989 err = tg3_init_5401phy_dsp(tp);
12995 static void __devinit tg3_read_vpd(struct tg3 *tp)
12998 unsigned int block_end, rosize, len;
13001 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13005 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13006 PCI_VPD_LRDT_RO_DATA);
13008 goto out_not_found;
13010 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13011 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13012 i += PCI_VPD_LRDT_TAG_SIZE;
13014 if (block_end > TG3_NVM_VPD_LEN)
13015 goto out_not_found;
13017 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13018 PCI_VPD_RO_KEYWORD_MFR_ID);
13020 len = pci_vpd_info_field_size(&vpd_data[j]);
13022 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13023 if (j + len > block_end || len != 4 ||
13024 memcmp(&vpd_data[j], "1028", 4))
13027 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13028 PCI_VPD_RO_KEYWORD_VENDOR0);
13032 len = pci_vpd_info_field_size(&vpd_data[j]);
13034 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13035 if (j + len > block_end)
13038 memcpy(tp->fw_ver, &vpd_data[j], len);
13039 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13043 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13044 PCI_VPD_RO_KEYWORD_PARTNO);
13046 goto out_not_found;
13048 len = pci_vpd_info_field_size(&vpd_data[i]);
13050 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13051 if (len > TG3_BPN_SIZE ||
13052 (len + i) > TG3_NVM_VPD_LEN)
13053 goto out_not_found;
13055 memcpy(tp->board_part_number, &vpd_data[i], len);
13059 if (tp->board_part_number[0])
13063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13064 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13065 strcpy(tp->board_part_number, "BCM5717");
13066 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13067 strcpy(tp->board_part_number, "BCM5718");
13070 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13071 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13072 strcpy(tp->board_part_number, "BCM57780");
13073 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13074 strcpy(tp->board_part_number, "BCM57760");
13075 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13076 strcpy(tp->board_part_number, "BCM57790");
13077 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13078 strcpy(tp->board_part_number, "BCM57788");
13081 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13082 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13083 strcpy(tp->board_part_number, "BCM57761");
13084 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13085 strcpy(tp->board_part_number, "BCM57765");
13086 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13087 strcpy(tp->board_part_number, "BCM57781");
13088 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13089 strcpy(tp->board_part_number, "BCM57785");
13090 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13091 strcpy(tp->board_part_number, "BCM57791");
13092 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13093 strcpy(tp->board_part_number, "BCM57795");
13096 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13097 strcpy(tp->board_part_number, "BCM95906");
13100 strcpy(tp->board_part_number, "none");
13104 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13108 if (tg3_nvram_read(tp, offset, &val) ||
13109 (val & 0xfc000000) != 0x0c000000 ||
13110 tg3_nvram_read(tp, offset + 4, &val) ||
13117 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13119 u32 val, offset, start, ver_offset;
13121 bool newver = false;
13123 if (tg3_nvram_read(tp, 0xc, &offset) ||
13124 tg3_nvram_read(tp, 0x4, &start))
13127 offset = tg3_nvram_logical_addr(tp, offset);
13129 if (tg3_nvram_read(tp, offset, &val))
13132 if ((val & 0xfc000000) == 0x0c000000) {
13133 if (tg3_nvram_read(tp, offset + 4, &val))
13140 dst_off = strlen(tp->fw_ver);
13143 if (TG3_VER_SIZE - dst_off < 16 ||
13144 tg3_nvram_read(tp, offset + 8, &ver_offset))
13147 offset = offset + ver_offset - start;
13148 for (i = 0; i < 16; i += 4) {
13150 if (tg3_nvram_read_be32(tp, offset + i, &v))
13153 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13158 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13161 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13162 TG3_NVM_BCVER_MAJSFT;
13163 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13164 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13165 "v%d.%02d", major, minor);
13169 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13171 u32 val, major, minor;
13173 /* Use native endian representation */
13174 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13177 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13178 TG3_NVM_HWSB_CFG1_MAJSFT;
13179 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13180 TG3_NVM_HWSB_CFG1_MINSFT;
13182 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13185 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13187 u32 offset, major, minor, build;
13189 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13191 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13194 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13195 case TG3_EEPROM_SB_REVISION_0:
13196 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13198 case TG3_EEPROM_SB_REVISION_2:
13199 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13201 case TG3_EEPROM_SB_REVISION_3:
13202 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13204 case TG3_EEPROM_SB_REVISION_4:
13205 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13207 case TG3_EEPROM_SB_REVISION_5:
13208 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13210 case TG3_EEPROM_SB_REVISION_6:
13211 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13217 if (tg3_nvram_read(tp, offset, &val))
13220 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13221 TG3_EEPROM_SB_EDH_BLD_SHFT;
13222 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13223 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13224 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13226 if (minor > 99 || build > 26)
13229 offset = strlen(tp->fw_ver);
13230 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13231 " v%d.%02d", major, minor);
13234 offset = strlen(tp->fw_ver);
13235 if (offset < TG3_VER_SIZE - 1)
13236 tp->fw_ver[offset] = 'a' + build - 1;
13240 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13242 u32 val, offset, start;
13245 for (offset = TG3_NVM_DIR_START;
13246 offset < TG3_NVM_DIR_END;
13247 offset += TG3_NVM_DIRENT_SIZE) {
13248 if (tg3_nvram_read(tp, offset, &val))
13251 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13255 if (offset == TG3_NVM_DIR_END)
13258 if (!tg3_flag(tp, 5705_PLUS))
13259 start = 0x08000000;
13260 else if (tg3_nvram_read(tp, offset - 4, &start))
13263 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13264 !tg3_fw_img_is_valid(tp, offset) ||
13265 tg3_nvram_read(tp, offset + 8, &val))
13268 offset += val - start;
13270 vlen = strlen(tp->fw_ver);
13272 tp->fw_ver[vlen++] = ',';
13273 tp->fw_ver[vlen++] = ' ';
13275 for (i = 0; i < 4; i++) {
13277 if (tg3_nvram_read_be32(tp, offset, &v))
13280 offset += sizeof(v);
13282 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13283 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13287 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13292 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13298 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13301 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13302 if (apedata != APE_SEG_SIG_MAGIC)
13305 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13306 if (!(apedata & APE_FW_STATUS_READY))
13309 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13311 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13312 tg3_flag_set(tp, APE_HAS_NCSI);
13318 vlen = strlen(tp->fw_ver);
13320 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13322 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13323 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13324 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13325 (apedata & APE_FW_VERSION_BLDMSK));
13328 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13331 bool vpd_vers = false;
13333 if (tp->fw_ver[0] != 0)
13336 if (tg3_flag(tp, NO_NVRAM)) {
13337 strcat(tp->fw_ver, "sb");
13341 if (tg3_nvram_read(tp, 0, &val))
13344 if (val == TG3_EEPROM_MAGIC)
13345 tg3_read_bc_ver(tp);
13346 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13347 tg3_read_sb_ver(tp, val);
13348 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13349 tg3_read_hwsb_ver(tp);
13353 if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13356 tg3_read_mgmtfw_ver(tp);
13359 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13362 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13364 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13366 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13367 return TG3_RX_RET_MAX_SIZE_5717;
13368 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13369 return TG3_RX_RET_MAX_SIZE_5700;
13371 return TG3_RX_RET_MAX_SIZE_5705;
13374 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13375 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13376 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13377 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13381 static int __devinit tg3_get_invariants(struct tg3 *tp)
13384 u32 pci_state_reg, grc_misc_cfg;
13389 /* Force memory write invalidate off. If we leave it on,
13390 * then on 5700_BX chips we have to enable a workaround.
13391 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13392 * to match the cacheline size. The Broadcom driver have this
13393 * workaround but turns MWI off all the times so never uses
13394 * it. This seems to suggest that the workaround is insufficient.
13396 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13397 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13398 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13400 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13401 * has the register indirect write enable bit set before
13402 * we try to access any of the MMIO registers. It is also
13403 * critical that the PCI-X hw workaround situation is decided
13404 * before that as well.
13406 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13409 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13410 MISC_HOST_CTRL_CHIPREV_SHIFT);
13411 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13412 u32 prod_id_asic_rev;
13414 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13415 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13416 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13417 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13418 pci_read_config_dword(tp->pdev,
13419 TG3PCI_GEN2_PRODID_ASICREV,
13420 &prod_id_asic_rev);
13421 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13422 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13423 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13424 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13425 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13426 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13427 pci_read_config_dword(tp->pdev,
13428 TG3PCI_GEN15_PRODID_ASICREV,
13429 &prod_id_asic_rev);
13431 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13432 &prod_id_asic_rev);
13434 tp->pci_chip_rev_id = prod_id_asic_rev;
13437 /* Wrong chip ID in 5752 A0. This code can be removed later
13438 * as A0 is not in production.
13440 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13441 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13443 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13444 * we need to disable memory and use config. cycles
13445 * only to access all registers. The 5702/03 chips
13446 * can mistakenly decode the special cycles from the
13447 * ICH chipsets as memory write cycles, causing corruption
13448 * of register and memory space. Only certain ICH bridges
13449 * will drive special cycles with non-zero data during the
13450 * address phase which can fall within the 5703's address
13451 * range. This is not an ICH bug as the PCI spec allows
13452 * non-zero address during special cycles. However, only
13453 * these ICH bridges are known to drive non-zero addresses
13454 * during special cycles.
13456 * Since special cycles do not cross PCI bridges, we only
13457 * enable this workaround if the 5703 is on the secondary
13458 * bus of these ICH bridges.
13460 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13461 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13462 static struct tg3_dev_id {
13466 } ich_chipsets[] = {
13467 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13469 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13471 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13473 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13477 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13478 struct pci_dev *bridge = NULL;
13480 while (pci_id->vendor != 0) {
13481 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13487 if (pci_id->rev != PCI_ANY_ID) {
13488 if (bridge->revision > pci_id->rev)
13491 if (bridge->subordinate &&
13492 (bridge->subordinate->number ==
13493 tp->pdev->bus->number)) {
13494 tg3_flag_set(tp, ICH_WORKAROUND);
13495 pci_dev_put(bridge);
13501 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13502 static struct tg3_dev_id {
13505 } bridge_chipsets[] = {
13506 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13507 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13510 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13511 struct pci_dev *bridge = NULL;
13513 while (pci_id->vendor != 0) {
13514 bridge = pci_get_device(pci_id->vendor,
13521 if (bridge->subordinate &&
13522 (bridge->subordinate->number <=
13523 tp->pdev->bus->number) &&
13524 (bridge->subordinate->subordinate >=
13525 tp->pdev->bus->number)) {
13526 tg3_flag_set(tp, 5701_DMA_BUG);
13527 pci_dev_put(bridge);
13533 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13534 * DMA addresses > 40-bit. This bridge may have other additional
13535 * 57xx devices behind it in some 4-port NIC designs for example.
13536 * Any tg3 device found behind the bridge will also need the 40-bit
13539 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13540 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13541 tg3_flag_set(tp, 5780_CLASS);
13542 tg3_flag_set(tp, 40BIT_DMA_BUG);
13543 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13545 struct pci_dev *bridge = NULL;
13548 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13549 PCI_DEVICE_ID_SERVERWORKS_EPB,
13551 if (bridge && bridge->subordinate &&
13552 (bridge->subordinate->number <=
13553 tp->pdev->bus->number) &&
13554 (bridge->subordinate->subordinate >=
13555 tp->pdev->bus->number)) {
13556 tg3_flag_set(tp, 40BIT_DMA_BUG);
13557 pci_dev_put(bridge);
13563 /* Initialize misc host control in PCI block. */
13564 tp->misc_host_ctrl |= (misc_ctrl_reg &
13565 MISC_HOST_CTRL_CHIPREV);
13566 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13567 tp->misc_host_ctrl);
13569 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13570 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13571 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13573 tp->pdev_peer = tg3_find_peer(tp);
13575 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13576 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13577 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13578 tg3_flag_set(tp, 5717_PLUS);
13580 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13581 tg3_flag(tp, 5717_PLUS))
13582 tg3_flag_set(tp, 57765_PLUS);
13584 /* Intentionally exclude ASIC_REV_5906 */
13585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13586 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13588 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13589 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13590 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13591 tg3_flag(tp, 57765_PLUS))
13592 tg3_flag_set(tp, 5755_PLUS);
13594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13595 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13596 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13597 tg3_flag(tp, 5755_PLUS) ||
13598 tg3_flag(tp, 5780_CLASS))
13599 tg3_flag_set(tp, 5750_PLUS);
13601 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13602 tg3_flag(tp, 5750_PLUS))
13603 tg3_flag_set(tp, 5705_PLUS);
13605 /* Determine TSO capabilities */
13606 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13607 ; /* Do nothing. HW bug. */
13608 else if (tg3_flag(tp, 57765_PLUS))
13609 tg3_flag_set(tp, HW_TSO_3);
13610 else if (tg3_flag(tp, 5755_PLUS) ||
13611 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13612 tg3_flag_set(tp, HW_TSO_2);
13613 else if (tg3_flag(tp, 5750_PLUS)) {
13614 tg3_flag_set(tp, HW_TSO_1);
13615 tg3_flag_set(tp, TSO_BUG);
13616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13617 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13618 tg3_flag_clear(tp, TSO_BUG);
13619 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13620 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13621 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13622 tg3_flag_set(tp, TSO_BUG);
13623 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13624 tp->fw_needed = FIRMWARE_TG3TSO5;
13626 tp->fw_needed = FIRMWARE_TG3TSO;
13629 /* Selectively allow TSO based on operating conditions */
13630 if ((tg3_flag(tp, HW_TSO_1) ||
13631 tg3_flag(tp, HW_TSO_2) ||
13632 tg3_flag(tp, HW_TSO_3)) ||
13633 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13634 tg3_flag_set(tp, TSO_CAPABLE);
13636 tg3_flag_clear(tp, TSO_CAPABLE);
13637 tg3_flag_clear(tp, TSO_BUG);
13638 tp->fw_needed = NULL;
13641 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13642 tp->fw_needed = FIRMWARE_TG3;
13646 if (tg3_flag(tp, 5750_PLUS)) {
13647 tg3_flag_set(tp, SUPPORT_MSI);
13648 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13649 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13650 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13651 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13652 tp->pdev_peer == tp->pdev))
13653 tg3_flag_clear(tp, SUPPORT_MSI);
13655 if (tg3_flag(tp, 5755_PLUS) ||
13656 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13657 tg3_flag_set(tp, 1SHOT_MSI);
13660 if (tg3_flag(tp, 57765_PLUS)) {
13661 tg3_flag_set(tp, SUPPORT_MSIX);
13662 tp->irq_max = TG3_IRQ_MAX_VECS;
13666 /* All chips can get confused if TX buffers
13667 * straddle the 4GB address boundary.
13669 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13671 if (tg3_flag(tp, 5755_PLUS))
13672 tg3_flag_set(tp, SHORT_DMA_BUG);
13674 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13676 if (tg3_flag(tp, 5717_PLUS))
13677 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13679 if (tg3_flag(tp, 57765_PLUS) &&
13680 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13681 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13683 if (!tg3_flag(tp, 5705_PLUS) ||
13684 tg3_flag(tp, 5780_CLASS) ||
13685 tg3_flag(tp, USE_JUMBO_BDFLAG))
13686 tg3_flag_set(tp, JUMBO_CAPABLE);
13688 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13691 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13692 if (tp->pcie_cap != 0) {
13695 tg3_flag_set(tp, PCI_EXPRESS);
13697 tp->pcie_readrq = 4096;
13698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13699 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13700 tp->pcie_readrq = 2048;
13702 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13704 pci_read_config_word(tp->pdev,
13705 tp->pcie_cap + PCI_EXP_LNKCTL,
13707 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13708 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13709 tg3_flag_clear(tp, HW_TSO_2);
13710 tg3_flag_clear(tp, TSO_CAPABLE);
13711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13713 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13714 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13715 tg3_flag_set(tp, CLKREQ_BUG);
13716 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13717 tg3_flag_set(tp, L1PLLPD_EN);
13719 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13720 tg3_flag_set(tp, PCI_EXPRESS);
13721 } else if (!tg3_flag(tp, 5705_PLUS) ||
13722 tg3_flag(tp, 5780_CLASS)) {
13723 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13724 if (!tp->pcix_cap) {
13725 dev_err(&tp->pdev->dev,
13726 "Cannot find PCI-X capability, aborting\n");
13730 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13731 tg3_flag_set(tp, PCIX_MODE);
13734 /* If we have an AMD 762 or VIA K8T800 chipset, write
13735 * reordering to the mailbox registers done by the host
13736 * controller can cause major troubles. We read back from
13737 * every mailbox register write to force the writes to be
13738 * posted to the chip in order.
13740 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13741 !tg3_flag(tp, PCI_EXPRESS))
13742 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13744 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13745 &tp->pci_cacheline_sz);
13746 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13747 &tp->pci_lat_timer);
13748 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13749 tp->pci_lat_timer < 64) {
13750 tp->pci_lat_timer = 64;
13751 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13752 tp->pci_lat_timer);
13755 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13756 /* 5700 BX chips need to have their TX producer index
13757 * mailboxes written twice to workaround a bug.
13759 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13761 /* If we are in PCI-X mode, enable register write workaround.
13763 * The workaround is to use indirect register accesses
13764 * for all chip writes not to mailbox registers.
13766 if (tg3_flag(tp, PCIX_MODE)) {
13769 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13771 /* The chip can have it's power management PCI config
13772 * space registers clobbered due to this bug.
13773 * So explicitly force the chip into D0 here.
13775 pci_read_config_dword(tp->pdev,
13776 tp->pm_cap + PCI_PM_CTRL,
13778 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13779 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13780 pci_write_config_dword(tp->pdev,
13781 tp->pm_cap + PCI_PM_CTRL,
13784 /* Also, force SERR#/PERR# in PCI command. */
13785 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13786 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13787 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13791 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13792 tg3_flag_set(tp, PCI_HIGH_SPEED);
13793 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13794 tg3_flag_set(tp, PCI_32BIT);
13796 /* Chip-specific fixup from Broadcom driver */
13797 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13798 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13799 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13800 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13803 /* Default fast path register access methods */
13804 tp->read32 = tg3_read32;
13805 tp->write32 = tg3_write32;
13806 tp->read32_mbox = tg3_read32;
13807 tp->write32_mbox = tg3_write32;
13808 tp->write32_tx_mbox = tg3_write32;
13809 tp->write32_rx_mbox = tg3_write32;
13811 /* Various workaround register access methods */
13812 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13813 tp->write32 = tg3_write_indirect_reg32;
13814 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13815 (tg3_flag(tp, PCI_EXPRESS) &&
13816 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13818 * Back to back register writes can cause problems on these
13819 * chips, the workaround is to read back all reg writes
13820 * except those to mailbox regs.
13822 * See tg3_write_indirect_reg32().
13824 tp->write32 = tg3_write_flush_reg32;
13827 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13828 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13829 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13830 tp->write32_rx_mbox = tg3_write_flush_reg32;
13833 if (tg3_flag(tp, ICH_WORKAROUND)) {
13834 tp->read32 = tg3_read_indirect_reg32;
13835 tp->write32 = tg3_write_indirect_reg32;
13836 tp->read32_mbox = tg3_read_indirect_mbox;
13837 tp->write32_mbox = tg3_write_indirect_mbox;
13838 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13839 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13844 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13845 pci_cmd &= ~PCI_COMMAND_MEMORY;
13846 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13848 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13849 tp->read32_mbox = tg3_read32_mbox_5906;
13850 tp->write32_mbox = tg3_write32_mbox_5906;
13851 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13852 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13855 if (tp->write32 == tg3_write_indirect_reg32 ||
13856 (tg3_flag(tp, PCIX_MODE) &&
13857 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13858 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13859 tg3_flag_set(tp, SRAM_USE_CONFIG);
13861 /* Get eeprom hw config before calling tg3_set_power_state().
13862 * In particular, the TG3_FLAG_IS_NIC flag must be
13863 * determined before calling tg3_set_power_state() so that
13864 * we know whether or not to switch out of Vaux power.
13865 * When the flag is set, it means that GPIO1 is used for eeprom
13866 * write protect and also implies that it is a LOM where GPIOs
13867 * are not used to switch power.
13869 tg3_get_eeprom_hw_cfg(tp);
13871 if (tg3_flag(tp, ENABLE_APE)) {
13872 /* Allow reads and writes to the
13873 * APE register and memory space.
13875 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13876 PCISTATE_ALLOW_APE_SHMEM_WR |
13877 PCISTATE_ALLOW_APE_PSPACE_WR;
13878 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13883 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13884 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13885 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13886 tg3_flag(tp, 57765_PLUS))
13887 tg3_flag_set(tp, CPMU_PRESENT);
13889 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13890 * GPIO1 driven high will bring 5700's external PHY out of reset.
13891 * It is also used as eeprom write protect on LOMs.
13893 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13894 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13895 tg3_flag(tp, EEPROM_WRITE_PROT))
13896 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13897 GRC_LCLCTRL_GPIO_OUTPUT1);
13898 /* Unused GPIO3 must be driven as output on 5752 because there
13899 * are no pull-up resistors on unused GPIO pins.
13901 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13902 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13904 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13905 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13907 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13909 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13910 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13911 /* Turn off the debug UART. */
13912 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13913 if (tg3_flag(tp, IS_NIC))
13914 /* Keep VMain power. */
13915 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13916 GRC_LCLCTRL_GPIO_OUTPUT0;
13919 /* Force the chip into D0. */
13920 err = tg3_power_up(tp);
13922 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13926 /* Derive initial jumbo mode from MTU assigned in
13927 * ether_setup() via the alloc_etherdev() call
13929 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13930 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13932 /* Determine WakeOnLan speed to use. */
13933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13934 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13935 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13936 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13937 tg3_flag_clear(tp, WOL_SPEED_100MB);
13939 tg3_flag_set(tp, WOL_SPEED_100MB);
13942 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13943 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13945 /* A few boards don't want Ethernet@WireSpeed phy feature */
13946 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13947 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13948 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13949 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13950 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13951 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13952 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13954 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13955 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13956 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13957 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13958 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13960 if (tg3_flag(tp, 5705_PLUS) &&
13961 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13962 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13963 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13964 !tg3_flag(tp, 57765_PLUS)) {
13965 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13966 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13967 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13969 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13970 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13971 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13972 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13973 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13975 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13978 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13979 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13980 tp->phy_otp = tg3_read_otp_phycfg(tp);
13981 if (tp->phy_otp == 0)
13982 tp->phy_otp = TG3_OTP_DEFAULT;
13985 if (tg3_flag(tp, CPMU_PRESENT))
13986 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13988 tp->mi_mode = MAC_MI_MODE_BASE;
13990 tp->coalesce_mode = 0;
13991 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13992 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13993 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13995 /* Set these bits to enable statistics workaround. */
13996 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13997 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
13998 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
13999 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14000 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14003 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14004 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14005 tg3_flag_set(tp, USE_PHYLIB);
14007 err = tg3_mdio_init(tp);
14011 /* Initialize data/descriptor byte/word swapping. */
14012 val = tr32(GRC_MODE);
14013 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14014 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14015 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14016 GRC_MODE_B2HRX_ENABLE |
14017 GRC_MODE_HTX2B_ENABLE |
14018 GRC_MODE_HOST_STACKUP);
14020 val &= GRC_MODE_HOST_STACKUP;
14022 tw32(GRC_MODE, val | tp->grc_mode);
14024 tg3_switch_clocks(tp);
14026 /* Clear this out for sanity. */
14027 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14029 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14031 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14032 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14033 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14035 if (chiprevid == CHIPREV_ID_5701_A0 ||
14036 chiprevid == CHIPREV_ID_5701_B0 ||
14037 chiprevid == CHIPREV_ID_5701_B2 ||
14038 chiprevid == CHIPREV_ID_5701_B5) {
14039 void __iomem *sram_base;
14041 /* Write some dummy words into the SRAM status block
14042 * area, see if it reads back correctly. If the return
14043 * value is bad, force enable the PCIX workaround.
14045 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14047 writel(0x00000000, sram_base);
14048 writel(0x00000000, sram_base + 4);
14049 writel(0xffffffff, sram_base + 4);
14050 if (readl(sram_base) != 0x00000000)
14051 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14056 tg3_nvram_init(tp);
14058 grc_misc_cfg = tr32(GRC_MISC_CFG);
14059 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14062 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14063 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14064 tg3_flag_set(tp, IS_5788);
14066 if (!tg3_flag(tp, IS_5788) &&
14067 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
14068 tg3_flag_set(tp, TAGGED_STATUS);
14069 if (tg3_flag(tp, TAGGED_STATUS)) {
14070 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14071 HOSTCC_MODE_CLRTICK_TXBD);
14073 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14074 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14075 tp->misc_host_ctrl);
14078 /* Preserve the APE MAC_MODE bits */
14079 if (tg3_flag(tp, ENABLE_APE))
14080 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14082 tp->mac_mode = TG3_DEF_MAC_MODE;
14084 /* these are limited to 10/100 only */
14085 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14086 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14087 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14088 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14089 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14090 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14091 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14092 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14093 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14094 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14095 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14096 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14097 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14098 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14099 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14100 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14102 err = tg3_phy_probe(tp);
14104 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14105 /* ... but do not return immediately ... */
14110 tg3_read_fw_ver(tp);
14112 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14113 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14116 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14118 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14121 /* 5700 {AX,BX} chips have a broken status block link
14122 * change bit implementation, so we must use the
14123 * status register in those cases.
14125 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14126 tg3_flag_set(tp, USE_LINKCHG_REG);
14128 tg3_flag_clear(tp, USE_LINKCHG_REG);
14130 /* The led_ctrl is set during tg3_phy_probe, here we might
14131 * have to force the link status polling mechanism based
14132 * upon subsystem IDs.
14134 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14135 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14136 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14137 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14138 tg3_flag_set(tp, USE_LINKCHG_REG);
14141 /* For all SERDES we poll the MAC status register. */
14142 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14143 tg3_flag_set(tp, POLL_SERDES);
14145 tg3_flag_clear(tp, POLL_SERDES);
14147 tp->rx_offset = NET_IP_ALIGN;
14148 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14150 tg3_flag(tp, PCIX_MODE)) {
14152 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14153 tp->rx_copy_thresh = ~(u16)0;
14157 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14158 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14159 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14161 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14163 /* Increment the rx prod index on the rx std ring by at most
14164 * 8 for these chips to workaround hw errata.
14166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14167 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14168 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14169 tp->rx_std_max_post = 8;
14171 if (tg3_flag(tp, ASPM_WORKAROUND))
14172 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14173 PCIE_PWR_MGMT_L1_THRESH_MSK;
14178 #ifdef CONFIG_SPARC
14179 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14181 struct net_device *dev = tp->dev;
14182 struct pci_dev *pdev = tp->pdev;
14183 struct device_node *dp = pci_device_to_OF_node(pdev);
14184 const unsigned char *addr;
14187 addr = of_get_property(dp, "local-mac-address", &len);
14188 if (addr && len == 6) {
14189 memcpy(dev->dev_addr, addr, 6);
14190 memcpy(dev->perm_addr, dev->dev_addr, 6);
14196 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14198 struct net_device *dev = tp->dev;
14200 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14201 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14206 static int __devinit tg3_get_device_address(struct tg3 *tp)
14208 struct net_device *dev = tp->dev;
14209 u32 hi, lo, mac_offset;
14212 #ifdef CONFIG_SPARC
14213 if (!tg3_get_macaddr_sparc(tp))
14218 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
14219 tg3_flag(tp, 5780_CLASS)) {
14220 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14222 if (tg3_nvram_lock(tp))
14223 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14225 tg3_nvram_unlock(tp);
14226 } else if (tg3_flag(tp, 5717_PLUS)) {
14227 if (PCI_FUNC(tp->pdev->devfn) & 1)
14229 if (PCI_FUNC(tp->pdev->devfn) > 1)
14230 mac_offset += 0x18c;
14231 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14234 /* First try to get it from MAC address mailbox. */
14235 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14236 if ((hi >> 16) == 0x484b) {
14237 dev->dev_addr[0] = (hi >> 8) & 0xff;
14238 dev->dev_addr[1] = (hi >> 0) & 0xff;
14240 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14241 dev->dev_addr[2] = (lo >> 24) & 0xff;
14242 dev->dev_addr[3] = (lo >> 16) & 0xff;
14243 dev->dev_addr[4] = (lo >> 8) & 0xff;
14244 dev->dev_addr[5] = (lo >> 0) & 0xff;
14246 /* Some old bootcode may report a 0 MAC address in SRAM */
14247 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14250 /* Next, try NVRAM. */
14251 if (!tg3_flag(tp, NO_NVRAM) &&
14252 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14253 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14254 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14255 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14257 /* Finally just fetch it out of the MAC control regs. */
14259 hi = tr32(MAC_ADDR_0_HIGH);
14260 lo = tr32(MAC_ADDR_0_LOW);
14262 dev->dev_addr[5] = lo & 0xff;
14263 dev->dev_addr[4] = (lo >> 8) & 0xff;
14264 dev->dev_addr[3] = (lo >> 16) & 0xff;
14265 dev->dev_addr[2] = (lo >> 24) & 0xff;
14266 dev->dev_addr[1] = hi & 0xff;
14267 dev->dev_addr[0] = (hi >> 8) & 0xff;
14271 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14272 #ifdef CONFIG_SPARC
14273 if (!tg3_get_default_macaddr_sparc(tp))
14278 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14282 #define BOUNDARY_SINGLE_CACHELINE 1
14283 #define BOUNDARY_MULTI_CACHELINE 2
14285 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14287 int cacheline_size;
14291 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14293 cacheline_size = 1024;
14295 cacheline_size = (int) byte * 4;
14297 /* On 5703 and later chips, the boundary bits have no
14300 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14301 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14302 !tg3_flag(tp, PCI_EXPRESS))
14305 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14306 goal = BOUNDARY_MULTI_CACHELINE;
14308 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14309 goal = BOUNDARY_SINGLE_CACHELINE;
14315 if (tg3_flag(tp, 57765_PLUS)) {
14316 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14323 /* PCI controllers on most RISC systems tend to disconnect
14324 * when a device tries to burst across a cache-line boundary.
14325 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14327 * Unfortunately, for PCI-E there are only limited
14328 * write-side controls for this, and thus for reads
14329 * we will still get the disconnects. We'll also waste
14330 * these PCI cycles for both read and write for chips
14331 * other than 5700 and 5701 which do not implement the
14334 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14335 switch (cacheline_size) {
14340 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14341 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14342 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14344 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14345 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14350 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14351 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14355 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14356 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14359 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14360 switch (cacheline_size) {
14364 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14365 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14366 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14372 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14373 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14377 switch (cacheline_size) {
14379 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14380 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14381 DMA_RWCTRL_WRITE_BNDRY_16);
14386 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14387 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14388 DMA_RWCTRL_WRITE_BNDRY_32);
14393 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14394 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14395 DMA_RWCTRL_WRITE_BNDRY_64);
14400 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14401 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14402 DMA_RWCTRL_WRITE_BNDRY_128);
14407 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14408 DMA_RWCTRL_WRITE_BNDRY_256);
14411 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14412 DMA_RWCTRL_WRITE_BNDRY_512);
14416 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14417 DMA_RWCTRL_WRITE_BNDRY_1024);
14426 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14428 struct tg3_internal_buffer_desc test_desc;
14429 u32 sram_dma_descs;
14432 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14434 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14435 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14436 tw32(RDMAC_STATUS, 0);
14437 tw32(WDMAC_STATUS, 0);
14439 tw32(BUFMGR_MODE, 0);
14440 tw32(FTQ_RESET, 0);
14442 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14443 test_desc.addr_lo = buf_dma & 0xffffffff;
14444 test_desc.nic_mbuf = 0x00002100;
14445 test_desc.len = size;
14448 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14449 * the *second* time the tg3 driver was getting loaded after an
14452 * Broadcom tells me:
14453 * ...the DMA engine is connected to the GRC block and a DMA
14454 * reset may affect the GRC block in some unpredictable way...
14455 * The behavior of resets to individual blocks has not been tested.
14457 * Broadcom noted the GRC reset will also reset all sub-components.
14460 test_desc.cqid_sqid = (13 << 8) | 2;
14462 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14465 test_desc.cqid_sqid = (16 << 8) | 7;
14467 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14470 test_desc.flags = 0x00000005;
14472 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14475 val = *(((u32 *)&test_desc) + i);
14476 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14477 sram_dma_descs + (i * sizeof(u32)));
14478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14480 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14483 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14485 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14488 for (i = 0; i < 40; i++) {
14492 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14494 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14495 if ((val & 0xffff) == sram_dma_descs) {
14506 #define TEST_BUFFER_SIZE 0x2000
14508 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14509 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14513 static int __devinit tg3_test_dma(struct tg3 *tp)
14515 dma_addr_t buf_dma;
14516 u32 *buf, saved_dma_rwctrl;
14519 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14520 &buf_dma, GFP_KERNEL);
14526 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14527 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14529 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14531 if (tg3_flag(tp, 57765_PLUS))
14534 if (tg3_flag(tp, PCI_EXPRESS)) {
14535 /* DMA read watermark not used on PCIE */
14536 tp->dma_rwctrl |= 0x00180000;
14537 } else if (!tg3_flag(tp, PCIX_MODE)) {
14538 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14539 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14540 tp->dma_rwctrl |= 0x003f0000;
14542 tp->dma_rwctrl |= 0x003f000f;
14544 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14545 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14546 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14547 u32 read_water = 0x7;
14549 /* If the 5704 is behind the EPB bridge, we can
14550 * do the less restrictive ONE_DMA workaround for
14551 * better performance.
14553 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14555 tp->dma_rwctrl |= 0x8000;
14556 else if (ccval == 0x6 || ccval == 0x7)
14557 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14559 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14561 /* Set bit 23 to enable PCIX hw bug fix */
14563 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14564 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14566 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14567 /* 5780 always in PCIX mode */
14568 tp->dma_rwctrl |= 0x00144000;
14569 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14570 /* 5714 always in PCIX mode */
14571 tp->dma_rwctrl |= 0x00148000;
14573 tp->dma_rwctrl |= 0x001b000f;
14577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14578 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14579 tp->dma_rwctrl &= 0xfffffff0;
14581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14582 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14583 /* Remove this if it causes problems for some boards. */
14584 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14586 /* On 5700/5701 chips, we need to set this bit.
14587 * Otherwise the chip will issue cacheline transactions
14588 * to streamable DMA memory with not all the byte
14589 * enables turned on. This is an error on several
14590 * RISC PCI controllers, in particular sparc64.
14592 * On 5703/5704 chips, this bit has been reassigned
14593 * a different meaning. In particular, it is used
14594 * on those chips to enable a PCI-X workaround.
14596 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14599 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14602 /* Unneeded, already done by tg3_get_invariants. */
14603 tg3_switch_clocks(tp);
14606 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14607 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14610 /* It is best to perform DMA test with maximum write burst size
14611 * to expose the 5700/5701 write DMA bug.
14613 saved_dma_rwctrl = tp->dma_rwctrl;
14614 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14615 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14620 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14623 /* Send the buffer to the chip. */
14624 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14626 dev_err(&tp->pdev->dev,
14627 "%s: Buffer write failed. err = %d\n",
14633 /* validate data reached card RAM correctly. */
14634 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14636 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14637 if (le32_to_cpu(val) != p[i]) {
14638 dev_err(&tp->pdev->dev,
14639 "%s: Buffer corrupted on device! "
14640 "(%d != %d)\n", __func__, val, i);
14641 /* ret = -ENODEV here? */
14646 /* Now read it back. */
14647 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14649 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14650 "err = %d\n", __func__, ret);
14655 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14659 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14660 DMA_RWCTRL_WRITE_BNDRY_16) {
14661 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14662 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14663 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14666 dev_err(&tp->pdev->dev,
14667 "%s: Buffer corrupted on read back! "
14668 "(%d != %d)\n", __func__, p[i], i);
14674 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14680 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14681 DMA_RWCTRL_WRITE_BNDRY_16) {
14682 /* DMA test passed without adjusting DMA boundary,
14683 * now look for chipsets that are known to expose the
14684 * DMA bug without failing the test.
14686 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14687 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14688 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14690 /* Safe to use the calculated DMA boundary. */
14691 tp->dma_rwctrl = saved_dma_rwctrl;
14694 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14698 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14703 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14705 if (tg3_flag(tp, 57765_PLUS)) {
14706 tp->bufmgr_config.mbuf_read_dma_low_water =
14707 DEFAULT_MB_RDMA_LOW_WATER_5705;
14708 tp->bufmgr_config.mbuf_mac_rx_low_water =
14709 DEFAULT_MB_MACRX_LOW_WATER_57765;
14710 tp->bufmgr_config.mbuf_high_water =
14711 DEFAULT_MB_HIGH_WATER_57765;
14713 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14714 DEFAULT_MB_RDMA_LOW_WATER_5705;
14715 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14716 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14717 tp->bufmgr_config.mbuf_high_water_jumbo =
14718 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14719 } else if (tg3_flag(tp, 5705_PLUS)) {
14720 tp->bufmgr_config.mbuf_read_dma_low_water =
14721 DEFAULT_MB_RDMA_LOW_WATER_5705;
14722 tp->bufmgr_config.mbuf_mac_rx_low_water =
14723 DEFAULT_MB_MACRX_LOW_WATER_5705;
14724 tp->bufmgr_config.mbuf_high_water =
14725 DEFAULT_MB_HIGH_WATER_5705;
14726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14727 tp->bufmgr_config.mbuf_mac_rx_low_water =
14728 DEFAULT_MB_MACRX_LOW_WATER_5906;
14729 tp->bufmgr_config.mbuf_high_water =
14730 DEFAULT_MB_HIGH_WATER_5906;
14733 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14734 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14735 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14736 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14737 tp->bufmgr_config.mbuf_high_water_jumbo =
14738 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14740 tp->bufmgr_config.mbuf_read_dma_low_water =
14741 DEFAULT_MB_RDMA_LOW_WATER;
14742 tp->bufmgr_config.mbuf_mac_rx_low_water =
14743 DEFAULT_MB_MACRX_LOW_WATER;
14744 tp->bufmgr_config.mbuf_high_water =
14745 DEFAULT_MB_HIGH_WATER;
14747 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14748 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14749 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14750 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14751 tp->bufmgr_config.mbuf_high_water_jumbo =
14752 DEFAULT_MB_HIGH_WATER_JUMBO;
14755 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14756 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14759 static char * __devinit tg3_phy_string(struct tg3 *tp)
14761 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14762 case TG3_PHY_ID_BCM5400: return "5400";
14763 case TG3_PHY_ID_BCM5401: return "5401";
14764 case TG3_PHY_ID_BCM5411: return "5411";
14765 case TG3_PHY_ID_BCM5701: return "5701";
14766 case TG3_PHY_ID_BCM5703: return "5703";
14767 case TG3_PHY_ID_BCM5704: return "5704";
14768 case TG3_PHY_ID_BCM5705: return "5705";
14769 case TG3_PHY_ID_BCM5750: return "5750";
14770 case TG3_PHY_ID_BCM5752: return "5752";
14771 case TG3_PHY_ID_BCM5714: return "5714";
14772 case TG3_PHY_ID_BCM5780: return "5780";
14773 case TG3_PHY_ID_BCM5755: return "5755";
14774 case TG3_PHY_ID_BCM5787: return "5787";
14775 case TG3_PHY_ID_BCM5784: return "5784";
14776 case TG3_PHY_ID_BCM5756: return "5722/5756";
14777 case TG3_PHY_ID_BCM5906: return "5906";
14778 case TG3_PHY_ID_BCM5761: return "5761";
14779 case TG3_PHY_ID_BCM5718C: return "5718C";
14780 case TG3_PHY_ID_BCM5718S: return "5718S";
14781 case TG3_PHY_ID_BCM57765: return "57765";
14782 case TG3_PHY_ID_BCM5719C: return "5719C";
14783 case TG3_PHY_ID_BCM5720C: return "5720C";
14784 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14785 case 0: return "serdes";
14786 default: return "unknown";
14790 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14792 if (tg3_flag(tp, PCI_EXPRESS)) {
14793 strcpy(str, "PCI Express");
14795 } else if (tg3_flag(tp, PCIX_MODE)) {
14796 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14798 strcpy(str, "PCIX:");
14800 if ((clock_ctrl == 7) ||
14801 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14802 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14803 strcat(str, "133MHz");
14804 else if (clock_ctrl == 0)
14805 strcat(str, "33MHz");
14806 else if (clock_ctrl == 2)
14807 strcat(str, "50MHz");
14808 else if (clock_ctrl == 4)
14809 strcat(str, "66MHz");
14810 else if (clock_ctrl == 6)
14811 strcat(str, "100MHz");
14813 strcpy(str, "PCI:");
14814 if (tg3_flag(tp, PCI_HIGH_SPEED))
14815 strcat(str, "66MHz");
14817 strcat(str, "33MHz");
14819 if (tg3_flag(tp, PCI_32BIT))
14820 strcat(str, ":32-bit");
14822 strcat(str, ":64-bit");
14826 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14828 struct pci_dev *peer;
14829 unsigned int func, devnr = tp->pdev->devfn & ~7;
14831 for (func = 0; func < 8; func++) {
14832 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14833 if (peer && peer != tp->pdev)
14837 /* 5704 can be configured in single-port mode, set peer to
14838 * tp->pdev in that case.
14846 * We don't need to keep the refcount elevated; there's no way
14847 * to remove one half of this device without removing the other
14854 static void __devinit tg3_init_coal(struct tg3 *tp)
14856 struct ethtool_coalesce *ec = &tp->coal;
14858 memset(ec, 0, sizeof(*ec));
14859 ec->cmd = ETHTOOL_GCOALESCE;
14860 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14861 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14862 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14863 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14864 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14865 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14866 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14867 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14868 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14870 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14871 HOSTCC_MODE_CLRTICK_TXBD)) {
14872 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14873 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14874 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14875 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14878 if (tg3_flag(tp, 5705_PLUS)) {
14879 ec->rx_coalesce_usecs_irq = 0;
14880 ec->tx_coalesce_usecs_irq = 0;
14881 ec->stats_block_coalesce_usecs = 0;
14885 static const struct net_device_ops tg3_netdev_ops = {
14886 .ndo_open = tg3_open,
14887 .ndo_stop = tg3_close,
14888 .ndo_start_xmit = tg3_start_xmit,
14889 .ndo_get_stats64 = tg3_get_stats64,
14890 .ndo_validate_addr = eth_validate_addr,
14891 .ndo_set_multicast_list = tg3_set_rx_mode,
14892 .ndo_set_mac_address = tg3_set_mac_addr,
14893 .ndo_do_ioctl = tg3_ioctl,
14894 .ndo_tx_timeout = tg3_tx_timeout,
14895 .ndo_change_mtu = tg3_change_mtu,
14896 .ndo_fix_features = tg3_fix_features,
14897 .ndo_set_features = tg3_set_features,
14898 #ifdef CONFIG_NET_POLL_CONTROLLER
14899 .ndo_poll_controller = tg3_poll_controller,
14903 static int __devinit tg3_init_one(struct pci_dev *pdev,
14904 const struct pci_device_id *ent)
14906 struct net_device *dev;
14908 int i, err, pm_cap;
14909 u32 sndmbx, rcvmbx, intmbx;
14911 u64 dma_mask, persist_dma_mask;
14914 printk_once(KERN_INFO "%s\n", version);
14916 err = pci_enable_device(pdev);
14918 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14922 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14924 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14925 goto err_out_disable_pdev;
14928 pci_set_master(pdev);
14930 /* Find power-management capability. */
14931 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14933 dev_err(&pdev->dev,
14934 "Cannot find Power Management capability, aborting\n");
14936 goto err_out_free_res;
14939 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14941 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14943 goto err_out_free_res;
14946 SET_NETDEV_DEV(dev, &pdev->dev);
14948 tp = netdev_priv(dev);
14951 tp->pm_cap = pm_cap;
14952 tp->rx_mode = TG3_DEF_RX_MODE;
14953 tp->tx_mode = TG3_DEF_TX_MODE;
14956 tp->msg_enable = tg3_debug;
14958 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14960 /* The word/byte swap controls here control register access byte
14961 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14964 tp->misc_host_ctrl =
14965 MISC_HOST_CTRL_MASK_PCI_INT |
14966 MISC_HOST_CTRL_WORD_SWAP |
14967 MISC_HOST_CTRL_INDIR_ACCESS |
14968 MISC_HOST_CTRL_PCISTATE_RW;
14970 /* The NONFRM (non-frame) byte/word swap controls take effect
14971 * on descriptor entries, anything which isn't packet data.
14973 * The StrongARM chips on the board (one for tx, one for rx)
14974 * are running in big-endian mode.
14976 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14977 GRC_MODE_WSWAP_NONFRM_DATA);
14978 #ifdef __BIG_ENDIAN
14979 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14981 spin_lock_init(&tp->lock);
14982 spin_lock_init(&tp->indirect_lock);
14983 INIT_WORK(&tp->reset_task, tg3_reset_task);
14985 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14987 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14989 goto err_out_free_dev;
14992 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14993 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14995 dev->ethtool_ops = &tg3_ethtool_ops;
14996 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14997 dev->netdev_ops = &tg3_netdev_ops;
14998 dev->irq = pdev->irq;
15000 err = tg3_get_invariants(tp);
15002 dev_err(&pdev->dev,
15003 "Problem fetching invariants of chip, aborting\n");
15004 goto err_out_iounmap;
15007 /* The EPB bridge inside 5714, 5715, and 5780 and any
15008 * device behind the EPB cannot support DMA addresses > 40-bit.
15009 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15010 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15011 * do DMA address check in tg3_start_xmit().
15013 if (tg3_flag(tp, IS_5788))
15014 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15015 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15016 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15017 #ifdef CONFIG_HIGHMEM
15018 dma_mask = DMA_BIT_MASK(64);
15021 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15023 /* Configure DMA attributes. */
15024 if (dma_mask > DMA_BIT_MASK(32)) {
15025 err = pci_set_dma_mask(pdev, dma_mask);
15027 features |= NETIF_F_HIGHDMA;
15028 err = pci_set_consistent_dma_mask(pdev,
15031 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15032 "DMA for consistent allocations\n");
15033 goto err_out_iounmap;
15037 if (err || dma_mask == DMA_BIT_MASK(32)) {
15038 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15040 dev_err(&pdev->dev,
15041 "No usable DMA configuration, aborting\n");
15042 goto err_out_iounmap;
15046 tg3_init_bufmgr_config(tp);
15048 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15050 /* 5700 B0 chips do not support checksumming correctly due
15051 * to hardware bugs.
15053 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15054 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15056 if (tg3_flag(tp, 5755_PLUS))
15057 features |= NETIF_F_IPV6_CSUM;
15060 /* TSO is on by default on chips that support hardware TSO.
15061 * Firmware TSO on older chips gives lower performance, so it
15062 * is off by default, but can be enabled using ethtool.
15064 if ((tg3_flag(tp, HW_TSO_1) ||
15065 tg3_flag(tp, HW_TSO_2) ||
15066 tg3_flag(tp, HW_TSO_3)) &&
15067 (features & NETIF_F_IP_CSUM))
15068 features |= NETIF_F_TSO;
15069 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15070 if (features & NETIF_F_IPV6_CSUM)
15071 features |= NETIF_F_TSO6;
15072 if (tg3_flag(tp, HW_TSO_3) ||
15073 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15074 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15075 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15076 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15077 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15078 features |= NETIF_F_TSO_ECN;
15082 * Add loopback capability only for a subset of devices that support
15083 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15084 * loopback for the remaining devices.
15086 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15087 !tg3_flag(tp, CPMU_PRESENT))
15088 /* Add the loopback capability */
15089 features |= NETIF_F_LOOPBACK;
15091 dev->features |= features;
15092 dev->hw_features |= features;
15093 dev->vlan_features |= features;
15095 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15096 !tg3_flag(tp, TSO_CAPABLE) &&
15097 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15098 tg3_flag_set(tp, MAX_RXPEND_64);
15099 tp->rx_pending = 63;
15102 err = tg3_get_device_address(tp);
15104 dev_err(&pdev->dev,
15105 "Could not obtain valid ethernet address, aborting\n");
15106 goto err_out_iounmap;
15109 if (tg3_flag(tp, ENABLE_APE)) {
15110 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15111 if (!tp->aperegs) {
15112 dev_err(&pdev->dev,
15113 "Cannot map APE registers, aborting\n");
15115 goto err_out_iounmap;
15118 tg3_ape_lock_init(tp);
15120 if (tg3_flag(tp, ENABLE_ASF))
15121 tg3_read_dash_ver(tp);
15125 * Reset chip in case UNDI or EFI driver did not shutdown
15126 * DMA self test will enable WDMAC and we'll see (spurious)
15127 * pending DMA on the PCI bus at that point.
15129 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15130 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15131 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15132 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15135 err = tg3_test_dma(tp);
15137 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15138 goto err_out_apeunmap;
15141 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15142 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15143 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15144 for (i = 0; i < tp->irq_max; i++) {
15145 struct tg3_napi *tnapi = &tp->napi[i];
15148 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15150 tnapi->int_mbox = intmbx;
15156 tnapi->consmbox = rcvmbx;
15157 tnapi->prodmbox = sndmbx;
15160 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15162 tnapi->coal_now = HOSTCC_MODE_NOW;
15164 if (!tg3_flag(tp, SUPPORT_MSIX))
15168 * If we support MSIX, we'll be using RSS. If we're using
15169 * RSS, the first vector only handles link interrupts and the
15170 * remaining vectors handle rx and tx interrupts. Reuse the
15171 * mailbox values for the next iteration. The values we setup
15172 * above are still useful for the single vectored mode.
15187 pci_set_drvdata(pdev, dev);
15189 err = register_netdev(dev);
15191 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15192 goto err_out_apeunmap;
15195 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15196 tp->board_part_number,
15197 tp->pci_chip_rev_id,
15198 tg3_bus_string(tp, str),
15201 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15202 struct phy_device *phydev;
15203 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15205 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15206 phydev->drv->name, dev_name(&phydev->dev));
15210 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15211 ethtype = "10/100Base-TX";
15212 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15213 ethtype = "1000Base-SX";
15215 ethtype = "10/100/1000Base-T";
15217 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15218 "(WireSpeed[%d], EEE[%d])\n",
15219 tg3_phy_string(tp), ethtype,
15220 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15221 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15224 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15225 (dev->features & NETIF_F_RXCSUM) != 0,
15226 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15227 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15228 tg3_flag(tp, ENABLE_ASF) != 0,
15229 tg3_flag(tp, TSO_CAPABLE) != 0);
15230 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15232 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15233 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15235 pci_save_state(pdev);
15241 iounmap(tp->aperegs);
15242 tp->aperegs = NULL;
15255 pci_release_regions(pdev);
15257 err_out_disable_pdev:
15258 pci_disable_device(pdev);
15259 pci_set_drvdata(pdev, NULL);
15263 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15265 struct net_device *dev = pci_get_drvdata(pdev);
15268 struct tg3 *tp = netdev_priv(dev);
15271 release_firmware(tp->fw);
15273 cancel_work_sync(&tp->reset_task);
15275 if (!tg3_flag(tp, USE_PHYLIB)) {
15280 unregister_netdev(dev);
15282 iounmap(tp->aperegs);
15283 tp->aperegs = NULL;
15290 pci_release_regions(pdev);
15291 pci_disable_device(pdev);
15292 pci_set_drvdata(pdev, NULL);
15296 #ifdef CONFIG_PM_SLEEP
15297 static int tg3_suspend(struct device *device)
15299 struct pci_dev *pdev = to_pci_dev(device);
15300 struct net_device *dev = pci_get_drvdata(pdev);
15301 struct tg3 *tp = netdev_priv(dev);
15304 if (!netif_running(dev))
15307 flush_work_sync(&tp->reset_task);
15309 tg3_netif_stop(tp);
15311 del_timer_sync(&tp->timer);
15313 tg3_full_lock(tp, 1);
15314 tg3_disable_ints(tp);
15315 tg3_full_unlock(tp);
15317 netif_device_detach(dev);
15319 tg3_full_lock(tp, 0);
15320 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15321 tg3_flag_clear(tp, INIT_COMPLETE);
15322 tg3_full_unlock(tp);
15324 err = tg3_power_down_prepare(tp);
15328 tg3_full_lock(tp, 0);
15330 tg3_flag_set(tp, INIT_COMPLETE);
15331 err2 = tg3_restart_hw(tp, 1);
15335 tp->timer.expires = jiffies + tp->timer_offset;
15336 add_timer(&tp->timer);
15338 netif_device_attach(dev);
15339 tg3_netif_start(tp);
15342 tg3_full_unlock(tp);
15351 static int tg3_resume(struct device *device)
15353 struct pci_dev *pdev = to_pci_dev(device);
15354 struct net_device *dev = pci_get_drvdata(pdev);
15355 struct tg3 *tp = netdev_priv(dev);
15358 if (!netif_running(dev))
15361 netif_device_attach(dev);
15363 tg3_full_lock(tp, 0);
15365 tg3_flag_set(tp, INIT_COMPLETE);
15366 err = tg3_restart_hw(tp, 1);
15370 tp->timer.expires = jiffies + tp->timer_offset;
15371 add_timer(&tp->timer);
15373 tg3_netif_start(tp);
15376 tg3_full_unlock(tp);
15384 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15385 #define TG3_PM_OPS (&tg3_pm_ops)
15389 #define TG3_PM_OPS NULL
15391 #endif /* CONFIG_PM_SLEEP */
15394 * tg3_io_error_detected - called when PCI error is detected
15395 * @pdev: Pointer to PCI device
15396 * @state: The current pci connection state
15398 * This function is called after a PCI bus error affecting
15399 * this device has been detected.
15401 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15402 pci_channel_state_t state)
15404 struct net_device *netdev = pci_get_drvdata(pdev);
15405 struct tg3 *tp = netdev_priv(netdev);
15406 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15408 netdev_info(netdev, "PCI I/O error detected\n");
15412 if (!netif_running(netdev))
15417 tg3_netif_stop(tp);
15419 del_timer_sync(&tp->timer);
15420 tg3_flag_clear(tp, RESTART_TIMER);
15422 /* Want to make sure that the reset task doesn't run */
15423 cancel_work_sync(&tp->reset_task);
15424 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15425 tg3_flag_clear(tp, RESTART_TIMER);
15427 netif_device_detach(netdev);
15429 /* Clean up software state, even if MMIO is blocked */
15430 tg3_full_lock(tp, 0);
15431 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15432 tg3_full_unlock(tp);
15435 if (state == pci_channel_io_perm_failure)
15436 err = PCI_ERS_RESULT_DISCONNECT;
15438 pci_disable_device(pdev);
15446 * tg3_io_slot_reset - called after the pci bus has been reset.
15447 * @pdev: Pointer to PCI device
15449 * Restart the card from scratch, as if from a cold-boot.
15450 * At this point, the card has exprienced a hard reset,
15451 * followed by fixups by BIOS, and has its config space
15452 * set up identically to what it was at cold boot.
15454 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15456 struct net_device *netdev = pci_get_drvdata(pdev);
15457 struct tg3 *tp = netdev_priv(netdev);
15458 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15463 if (pci_enable_device(pdev)) {
15464 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15468 pci_set_master(pdev);
15469 pci_restore_state(pdev);
15470 pci_save_state(pdev);
15472 if (!netif_running(netdev)) {
15473 rc = PCI_ERS_RESULT_RECOVERED;
15477 err = tg3_power_up(tp);
15479 netdev_err(netdev, "Failed to restore register access.\n");
15483 rc = PCI_ERS_RESULT_RECOVERED;
15492 * tg3_io_resume - called when traffic can start flowing again.
15493 * @pdev: Pointer to PCI device
15495 * This callback is called when the error recovery driver tells
15496 * us that its OK to resume normal operation.
15498 static void tg3_io_resume(struct pci_dev *pdev)
15500 struct net_device *netdev = pci_get_drvdata(pdev);
15501 struct tg3 *tp = netdev_priv(netdev);
15506 if (!netif_running(netdev))
15509 tg3_full_lock(tp, 0);
15510 tg3_flag_set(tp, INIT_COMPLETE);
15511 err = tg3_restart_hw(tp, 1);
15512 tg3_full_unlock(tp);
15514 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15518 netif_device_attach(netdev);
15520 tp->timer.expires = jiffies + tp->timer_offset;
15521 add_timer(&tp->timer);
15523 tg3_netif_start(tp);
15531 static struct pci_error_handlers tg3_err_handler = {
15532 .error_detected = tg3_io_error_detected,
15533 .slot_reset = tg3_io_slot_reset,
15534 .resume = tg3_io_resume
15537 static struct pci_driver tg3_driver = {
15538 .name = DRV_MODULE_NAME,
15539 .id_table = tg3_pci_tbl,
15540 .probe = tg3_init_one,
15541 .remove = __devexit_p(tg3_remove_one),
15542 .err_handler = &tg3_err_handler,
15543 .driver.pm = TG3_PM_OPS,
15546 static int __init tg3_init(void)
15548 return pci_register_driver(&tg3_driver);
15551 static void __exit tg3_cleanup(void)
15553 pci_unregister_driver(&tg3_driver);
15556 module_init(tg3_init);
15557 module_exit(tg3_cleanup);