2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
8 * See MAINTAINERS file for support contact information.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
53 #define assert(expr) \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit = 32;
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83 #define R8169_REGS_SIZE 256
84 #define R8169_NAPI_WEIGHT 64
85 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
86 #define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
87 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
88 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
90 #define RTL8169_TX_TIMEOUT (6*HZ)
91 #define RTL8169_PHY_TIMEOUT (10*HZ)
93 /* write/read MMIO register */
94 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
95 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
96 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
97 #define RTL_R8(reg) readb (ioaddr + (reg))
98 #define RTL_R16(reg) readw (ioaddr + (reg))
99 #define RTL_R32(reg) readl (ioaddr + (reg))
102 RTL_GIGA_MAC_VER_01 = 0,
143 RTL_GIGA_MAC_NONE = 0xff,
146 enum rtl_tx_desc_version {
151 #define JUMBO_1K ETH_DATA_LEN
152 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
153 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
154 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
155 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
157 #define _R(NAME,TD,FW,SZ,B) { \
165 static const struct {
167 enum rtl_tx_desc_version txd_version;
171 } rtl_chip_infos[] = {
173 [RTL_GIGA_MAC_VER_01] =
174 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
175 [RTL_GIGA_MAC_VER_02] =
176 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
177 [RTL_GIGA_MAC_VER_03] =
178 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
179 [RTL_GIGA_MAC_VER_04] =
180 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
181 [RTL_GIGA_MAC_VER_05] =
182 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
183 [RTL_GIGA_MAC_VER_06] =
184 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
186 [RTL_GIGA_MAC_VER_07] =
187 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
188 [RTL_GIGA_MAC_VER_08] =
189 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
190 [RTL_GIGA_MAC_VER_09] =
191 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
192 [RTL_GIGA_MAC_VER_10] =
193 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
194 [RTL_GIGA_MAC_VER_11] =
195 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
196 [RTL_GIGA_MAC_VER_12] =
197 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
198 [RTL_GIGA_MAC_VER_13] =
199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
200 [RTL_GIGA_MAC_VER_14] =
201 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
202 [RTL_GIGA_MAC_VER_15] =
203 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
204 [RTL_GIGA_MAC_VER_16] =
205 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
206 [RTL_GIGA_MAC_VER_17] =
207 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
208 [RTL_GIGA_MAC_VER_18] =
209 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
210 [RTL_GIGA_MAC_VER_19] =
211 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
212 [RTL_GIGA_MAC_VER_20] =
213 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
214 [RTL_GIGA_MAC_VER_21] =
215 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
216 [RTL_GIGA_MAC_VER_22] =
217 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
218 [RTL_GIGA_MAC_VER_23] =
219 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
220 [RTL_GIGA_MAC_VER_24] =
221 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
222 [RTL_GIGA_MAC_VER_25] =
223 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
225 [RTL_GIGA_MAC_VER_26] =
226 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
228 [RTL_GIGA_MAC_VER_27] =
229 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
230 [RTL_GIGA_MAC_VER_28] =
231 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
232 [RTL_GIGA_MAC_VER_29] =
233 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
235 [RTL_GIGA_MAC_VER_30] =
236 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
238 [RTL_GIGA_MAC_VER_31] =
239 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
240 [RTL_GIGA_MAC_VER_32] =
241 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
243 [RTL_GIGA_MAC_VER_33] =
244 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
246 [RTL_GIGA_MAC_VER_34] =
247 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
249 [RTL_GIGA_MAC_VER_35] =
250 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
252 [RTL_GIGA_MAC_VER_36] =
253 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
255 [RTL_GIGA_MAC_VER_37] =
256 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
258 [RTL_GIGA_MAC_VER_38] =
259 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
261 [RTL_GIGA_MAC_VER_39] =
262 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
264 [RTL_GIGA_MAC_VER_40] =
265 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
267 [RTL_GIGA_MAC_VER_41] =
268 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
278 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
279 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
280 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
281 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
282 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
283 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
284 { PCI_VENDOR_ID_DLINK, 0x4300,
285 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
286 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
287 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
289 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_LINKSYS, 0x1032,
291 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
293 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
297 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
299 static int rx_buf_sz = 16383;
306 MAC0 = 0, /* Ethernet hardware address. */
308 MAR0 = 8, /* Multicast filter. */
309 CounterAddrLow = 0x10,
310 CounterAddrHigh = 0x14,
311 TxDescStartAddrLow = 0x20,
312 TxDescStartAddrHigh = 0x24,
313 TxHDescStartAddrLow = 0x28,
314 TxHDescStartAddrHigh = 0x2c,
323 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
324 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
327 #define RX128_INT_EN (1 << 15) /* 8111c and later */
328 #define RX_MULTI_EN (1 << 14) /* 8111c only */
329 #define RXCFG_FIFO_SHIFT 13
330 /* No threshold before first PCI xfer */
331 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
332 #define RXCFG_DMA_SHIFT 8
333 /* Unlimited maximum PCI burst. */
334 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
341 #define PME_SIGNAL (1 << 5) /* 8168c and later */
352 RxDescAddrLow = 0xe4,
353 RxDescAddrHigh = 0xe8,
354 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
356 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
358 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
360 #define TxPacketMax (8064 >> 7)
361 #define EarlySize 0x27
364 FuncEventMask = 0xf4,
365 FuncPresetState = 0xf8,
366 FuncForceEvent = 0xfc,
369 enum rtl8110_registers {
375 enum rtl8168_8101_registers {
378 #define CSIAR_FLAG 0x80000000
379 #define CSIAR_WRITE_CMD 0x80000000
380 #define CSIAR_BYTE_ENABLE 0x0f
381 #define CSIAR_BYTE_ENABLE_SHIFT 12
382 #define CSIAR_ADDR_MASK 0x0fff
383 #define CSIAR_FUNC_CARD 0x00000000
384 #define CSIAR_FUNC_SDIO 0x00010000
385 #define CSIAR_FUNC_NIC 0x00020000
388 #define EPHYAR_FLAG 0x80000000
389 #define EPHYAR_WRITE_CMD 0x80000000
390 #define EPHYAR_REG_MASK 0x1f
391 #define EPHYAR_REG_SHIFT 16
392 #define EPHYAR_DATA_MASK 0xffff
394 #define PFM_EN (1 << 6)
396 #define FIX_NAK_1 (1 << 4)
397 #define FIX_NAK_2 (1 << 3)
400 #define NOW_IS_OOB (1 << 7)
401 #define TX_EMPTY (1 << 5)
402 #define RX_EMPTY (1 << 4)
403 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
404 #define EN_NDP (1 << 3)
405 #define EN_OOB_RESET (1 << 2)
406 #define LINK_LIST_RDY (1 << 1)
408 #define EFUSEAR_FLAG 0x80000000
409 #define EFUSEAR_WRITE_CMD 0x80000000
410 #define EFUSEAR_READ_CMD 0x00000000
411 #define EFUSEAR_REG_MASK 0x03ff
412 #define EFUSEAR_REG_SHIFT 8
413 #define EFUSEAR_DATA_MASK 0xff
416 enum rtl8168_registers {
421 #define ERIAR_FLAG 0x80000000
422 #define ERIAR_WRITE_CMD 0x80000000
423 #define ERIAR_READ_CMD 0x00000000
424 #define ERIAR_ADDR_BYTE_ALIGN 4
425 #define ERIAR_TYPE_SHIFT 16
426 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
427 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
428 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
429 #define ERIAR_MASK_SHIFT 12
430 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
431 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
432 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
433 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
434 EPHY_RXER_NUM = 0x7c,
435 OCPDR = 0xb0, /* OCP GPHY access */
436 #define OCPDR_WRITE_CMD 0x80000000
437 #define OCPDR_READ_CMD 0x00000000
438 #define OCPDR_REG_MASK 0x7f
439 #define OCPDR_GPHY_REG_SHIFT 16
440 #define OCPDR_DATA_MASK 0xffff
442 #define OCPAR_FLAG 0x80000000
443 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
444 #define OCPAR_GPHY_READ_CMD 0x0000f060
446 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
447 MISC = 0xf0, /* 8168e only. */
448 #define TXPLA_RST (1 << 29)
449 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
450 #define PWM_EN (1 << 22)
451 #define RXDV_GATED_EN (1 << 19)
452 #define EARLY_TALLY_EN (1 << 16)
455 enum rtl_register_content {
456 /* InterruptStatusBits */
460 TxDescUnavail = 0x0080,
484 /* TXPoll register p.5 */
485 HPQ = 0x80, /* Poll cmd on the high prio queue */
486 NPQ = 0x40, /* Poll cmd on the low prio queue */
487 FSWInt = 0x01, /* Forced software interrupt */
491 Cfg9346_Unlock = 0xc0,
496 AcceptBroadcast = 0x08,
497 AcceptMulticast = 0x04,
499 AcceptAllPhys = 0x01,
500 #define RX_CONFIG_ACCEPT_MASK 0x3f
503 TxInterFrameGapShift = 24,
504 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
506 /* Config1 register p.24 */
509 Speed_down = (1 << 4),
513 PMEnable = (1 << 0), /* Power Management Enable */
515 /* Config2 register p. 25 */
516 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
517 PCI_Clock_66MHz = 0x01,
518 PCI_Clock_33MHz = 0x00,
520 /* Config3 register p.25 */
521 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
522 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
523 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
524 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
526 /* Config4 register */
527 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
529 /* Config5 register p.27 */
530 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
531 MWF = (1 << 5), /* Accept Multicast wakeup frame */
532 UWF = (1 << 4), /* Accept Unicast wakeup frame */
534 LanWake = (1 << 1), /* LanWake enable/disable */
535 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
538 TBIReset = 0x80000000,
539 TBILoopback = 0x40000000,
540 TBINwEnable = 0x20000000,
541 TBINwRestart = 0x10000000,
542 TBILinkOk = 0x02000000,
543 TBINwComplete = 0x01000000,
546 EnableBist = (1 << 15), // 8168 8101
547 Mac_dbgo_oe = (1 << 14), // 8168 8101
548 Normal_mode = (1 << 13), // unused
549 Force_half_dup = (1 << 12), // 8168 8101
550 Force_rxflow_en = (1 << 11), // 8168 8101
551 Force_txflow_en = (1 << 10), // 8168 8101
552 Cxpl_dbg_sel = (1 << 9), // 8168 8101
553 ASF = (1 << 8), // 8168 8101
554 PktCntrDisable = (1 << 7), // 8168 8101
555 Mac_dbgo_sel = 0x001c, // 8168
560 INTT_0 = 0x0000, // 8168
561 INTT_1 = 0x0001, // 8168
562 INTT_2 = 0x0002, // 8168
563 INTT_3 = 0x0003, // 8168
565 /* rtl8169_PHYstatus */
576 TBILinkOK = 0x02000000,
578 /* DumpCounterCommand */
583 /* First doubleword. */
584 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
585 RingEnd = (1 << 30), /* End of descriptor ring */
586 FirstFrag = (1 << 29), /* First segment of a packet */
587 LastFrag = (1 << 28), /* Final segment of a packet */
591 enum rtl_tx_desc_bit {
592 /* First doubleword. */
593 TD_LSO = (1 << 27), /* Large Send Offload */
594 #define TD_MSS_MAX 0x07ffu /* MSS value */
596 /* Second doubleword. */
597 TxVlanTag = (1 << 17), /* Add VLAN tag */
600 /* 8169, 8168b and 810x except 8102e. */
601 enum rtl_tx_desc_bit_0 {
602 /* First doubleword. */
603 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
604 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
605 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
606 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
609 /* 8102e, 8168c and beyond. */
610 enum rtl_tx_desc_bit_1 {
611 /* Second doubleword. */
612 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
613 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
614 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
615 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
618 static const struct rtl_tx_desc_info {
625 } tx_desc_info [] = {
628 .udp = TD0_IP_CS | TD0_UDP_CS,
629 .tcp = TD0_IP_CS | TD0_TCP_CS
631 .mss_shift = TD0_MSS_SHIFT,
636 .udp = TD1_IP_CS | TD1_UDP_CS,
637 .tcp = TD1_IP_CS | TD1_TCP_CS
639 .mss_shift = TD1_MSS_SHIFT,
644 enum rtl_rx_desc_bit {
646 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
647 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
649 #define RxProtoUDP (PID1)
650 #define RxProtoTCP (PID0)
651 #define RxProtoIP (PID1 | PID0)
652 #define RxProtoMask RxProtoIP
654 IPFail = (1 << 16), /* IP checksum failed */
655 UDPFail = (1 << 15), /* UDP/IP checksum failed */
656 TCPFail = (1 << 14), /* TCP/IP checksum failed */
657 RxVlanTag = (1 << 16), /* VLAN tag available */
660 #define RsvdMask 0x3fffc000
677 u8 __pad[sizeof(void *) - sizeof(u32)];
681 RTL_FEATURE_WOL = (1 << 0),
682 RTL_FEATURE_MSI = (1 << 1),
683 RTL_FEATURE_GMII = (1 << 2),
686 struct rtl8169_counters {
693 __le32 tx_one_collision;
694 __le32 tx_multi_collision;
703 RTL_FLAG_TASK_ENABLED,
704 RTL_FLAG_TASK_SLOW_PENDING,
705 RTL_FLAG_TASK_RESET_PENDING,
706 RTL_FLAG_TASK_PHY_PENDING,
710 struct rtl8169_stats {
713 struct u64_stats_sync syncp;
716 struct rtl8169_private {
717 void __iomem *mmio_addr; /* memory map physical address */
718 struct pci_dev *pci_dev;
719 struct net_device *dev;
720 struct napi_struct napi;
724 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
725 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
727 struct rtl8169_stats rx_stats;
728 struct rtl8169_stats tx_stats;
729 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
730 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
731 dma_addr_t TxPhyAddr;
732 dma_addr_t RxPhyAddr;
733 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
734 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
735 struct timer_list timer;
741 void (*write)(struct rtl8169_private *, int, int);
742 int (*read)(struct rtl8169_private *, int);
745 struct pll_power_ops {
746 void (*down)(struct rtl8169_private *);
747 void (*up)(struct rtl8169_private *);
751 void (*enable)(struct rtl8169_private *);
752 void (*disable)(struct rtl8169_private *);
756 void (*write)(struct rtl8169_private *, int, int);
757 u32 (*read)(struct rtl8169_private *, int);
760 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
761 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
762 void (*phy_reset_enable)(struct rtl8169_private *tp);
763 void (*hw_start)(struct net_device *);
764 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
765 unsigned int (*link_ok)(void __iomem *);
766 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
769 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
771 struct work_struct work;
776 struct mii_if_info mii;
777 struct rtl8169_counters counters;
782 const struct firmware *fw;
784 #define RTL_VER_SIZE 32
786 char version[RTL_VER_SIZE];
788 struct rtl_fw_phy_action {
793 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
798 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
799 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
800 module_param(use_dac, int, 0);
801 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
802 module_param_named(debug, debug.msg_enable, int, 0);
803 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
804 MODULE_LICENSE("GPL");
805 MODULE_VERSION(RTL8169_VERSION);
806 MODULE_FIRMWARE(FIRMWARE_8168D_1);
807 MODULE_FIRMWARE(FIRMWARE_8168D_2);
808 MODULE_FIRMWARE(FIRMWARE_8168E_1);
809 MODULE_FIRMWARE(FIRMWARE_8168E_2);
810 MODULE_FIRMWARE(FIRMWARE_8168E_3);
811 MODULE_FIRMWARE(FIRMWARE_8105E_1);
812 MODULE_FIRMWARE(FIRMWARE_8168F_1);
813 MODULE_FIRMWARE(FIRMWARE_8168F_2);
814 MODULE_FIRMWARE(FIRMWARE_8402_1);
815 MODULE_FIRMWARE(FIRMWARE_8411_1);
816 MODULE_FIRMWARE(FIRMWARE_8106E_1);
817 MODULE_FIRMWARE(FIRMWARE_8168G_1);
819 static void rtl_lock_work(struct rtl8169_private *tp)
821 mutex_lock(&tp->wk.mutex);
824 static void rtl_unlock_work(struct rtl8169_private *tp)
826 mutex_unlock(&tp->wk.mutex);
829 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
831 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
832 PCI_EXP_DEVCTL_READRQ, force);
836 bool (*check)(struct rtl8169_private *);
840 static void rtl_udelay(unsigned int d)
845 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
846 void (*delay)(unsigned int), unsigned int d, int n,
851 for (i = 0; i < n; i++) {
853 if (c->check(tp) == high)
856 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
857 c->msg, !high, n, d);
861 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
862 const struct rtl_cond *c,
863 unsigned int d, int n)
865 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
868 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
869 const struct rtl_cond *c,
870 unsigned int d, int n)
872 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
875 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
876 const struct rtl_cond *c,
877 unsigned int d, int n)
879 return rtl_loop_wait(tp, c, msleep, d, n, true);
882 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
883 const struct rtl_cond *c,
884 unsigned int d, int n)
886 return rtl_loop_wait(tp, c, msleep, d, n, false);
889 #define DECLARE_RTL_COND(name) \
890 static bool name ## _check(struct rtl8169_private *); \
892 static const struct rtl_cond name = { \
893 .check = name ## _check, \
897 static bool name ## _check(struct rtl8169_private *tp)
899 DECLARE_RTL_COND(rtl_ocpar_cond)
901 void __iomem *ioaddr = tp->mmio_addr;
903 return RTL_R32(OCPAR) & OCPAR_FLAG;
906 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
908 void __iomem *ioaddr = tp->mmio_addr;
910 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
912 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
916 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
918 void __iomem *ioaddr = tp->mmio_addr;
920 RTL_W32(OCPDR, data);
921 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
923 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
926 DECLARE_RTL_COND(rtl_eriar_cond)
928 void __iomem *ioaddr = tp->mmio_addr;
930 return RTL_R32(ERIAR) & ERIAR_FLAG;
933 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
935 void __iomem *ioaddr = tp->mmio_addr;
938 RTL_W32(ERIAR, 0x800010e8);
941 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
944 ocp_write(tp, 0x1, 0x30, 0x00000001);
947 #define OOB_CMD_RESET 0x00
948 #define OOB_CMD_DRIVER_START 0x05
949 #define OOB_CMD_DRIVER_STOP 0x06
951 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
953 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
956 DECLARE_RTL_COND(rtl_ocp_read_cond)
960 reg = rtl8168_get_ocp_reg(tp);
962 return ocp_read(tp, 0x0f, reg) & 0x00000800;
965 static void rtl8168_driver_start(struct rtl8169_private *tp)
967 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
969 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
972 static void rtl8168_driver_stop(struct rtl8169_private *tp)
974 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
976 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
979 static int r8168dp_check_dash(struct rtl8169_private *tp)
981 u16 reg = rtl8168_get_ocp_reg(tp);
983 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
986 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
988 if (reg & 0xffff0001) {
989 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
995 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
997 void __iomem *ioaddr = tp->mmio_addr;
999 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1002 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1004 void __iomem *ioaddr = tp->mmio_addr;
1006 if (rtl_ocp_reg_failure(tp, reg))
1009 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1011 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1014 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1016 void __iomem *ioaddr = tp->mmio_addr;
1018 if (rtl_ocp_reg_failure(tp, reg))
1021 RTL_W32(GPHY_OCP, reg << 15);
1023 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1024 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1027 static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1031 val = r8168_phy_ocp_read(tp, reg);
1032 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1035 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1037 void __iomem *ioaddr = tp->mmio_addr;
1039 if (rtl_ocp_reg_failure(tp, reg))
1042 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1045 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1047 void __iomem *ioaddr = tp->mmio_addr;
1049 if (rtl_ocp_reg_failure(tp, reg))
1052 RTL_W32(OCPDR, reg << 15);
1054 return RTL_R32(OCPDR);
1057 #define OCP_STD_PHY_BASE 0xa400
1059 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1062 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1066 if (tp->ocp_base != OCP_STD_PHY_BASE)
1069 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1072 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1074 if (tp->ocp_base != OCP_STD_PHY_BASE)
1077 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1080 DECLARE_RTL_COND(rtl_phyar_cond)
1082 void __iomem *ioaddr = tp->mmio_addr;
1084 return RTL_R32(PHYAR) & 0x80000000;
1087 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1089 void __iomem *ioaddr = tp->mmio_addr;
1091 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1093 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1095 * According to hardware specs a 20us delay is required after write
1096 * complete indication, but before sending next command.
1101 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1103 void __iomem *ioaddr = tp->mmio_addr;
1106 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1108 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1109 RTL_R32(PHYAR) & 0xffff : ~0;
1112 * According to hardware specs a 20us delay is required after read
1113 * complete indication, but before sending next command.
1120 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1122 void __iomem *ioaddr = tp->mmio_addr;
1124 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1125 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1126 RTL_W32(EPHY_RXER_NUM, 0);
1128 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1131 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1133 r8168dp_1_mdio_access(tp, reg,
1134 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1137 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1139 void __iomem *ioaddr = tp->mmio_addr;
1141 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1144 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1145 RTL_W32(EPHY_RXER_NUM, 0);
1147 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1148 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1151 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1153 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1155 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1158 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1160 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1163 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1165 void __iomem *ioaddr = tp->mmio_addr;
1167 r8168dp_2_mdio_start(ioaddr);
1169 r8169_mdio_write(tp, reg, value);
1171 r8168dp_2_mdio_stop(ioaddr);
1174 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1176 void __iomem *ioaddr = tp->mmio_addr;
1179 r8168dp_2_mdio_start(ioaddr);
1181 value = r8169_mdio_read(tp, reg);
1183 r8168dp_2_mdio_stop(ioaddr);
1188 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1190 tp->mdio_ops.write(tp, location, val);
1193 static int rtl_readphy(struct rtl8169_private *tp, int location)
1195 return tp->mdio_ops.read(tp, location);
1198 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1200 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1203 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1207 val = rtl_readphy(tp, reg_addr);
1208 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1211 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1214 struct rtl8169_private *tp = netdev_priv(dev);
1216 rtl_writephy(tp, location, val);
1219 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1221 struct rtl8169_private *tp = netdev_priv(dev);
1223 return rtl_readphy(tp, location);
1226 DECLARE_RTL_COND(rtl_ephyar_cond)
1228 void __iomem *ioaddr = tp->mmio_addr;
1230 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1233 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1235 void __iomem *ioaddr = tp->mmio_addr;
1237 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1238 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1240 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1245 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1247 void __iomem *ioaddr = tp->mmio_addr;
1249 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1251 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1252 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1255 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1258 void __iomem *ioaddr = tp->mmio_addr;
1260 BUG_ON((addr & 3) || (mask == 0));
1261 RTL_W32(ERIDR, val);
1262 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1264 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1267 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1269 void __iomem *ioaddr = tp->mmio_addr;
1271 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1273 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1274 RTL_R32(ERIDR) : ~0;
1277 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1282 val = rtl_eri_read(tp, addr, type);
1283 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1292 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1293 const struct exgmac_reg *r, int len)
1296 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1301 DECLARE_RTL_COND(rtl_efusear_cond)
1303 void __iomem *ioaddr = tp->mmio_addr;
1305 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1308 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1310 void __iomem *ioaddr = tp->mmio_addr;
1312 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1314 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1315 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1318 static u16 rtl_get_events(struct rtl8169_private *tp)
1320 void __iomem *ioaddr = tp->mmio_addr;
1322 return RTL_R16(IntrStatus);
1325 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1327 void __iomem *ioaddr = tp->mmio_addr;
1329 RTL_W16(IntrStatus, bits);
1333 static void rtl_irq_disable(struct rtl8169_private *tp)
1335 void __iomem *ioaddr = tp->mmio_addr;
1337 RTL_W16(IntrMask, 0);
1341 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1343 void __iomem *ioaddr = tp->mmio_addr;
1345 RTL_W16(IntrMask, bits);
1348 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1349 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1350 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1352 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1354 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1357 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1359 void __iomem *ioaddr = tp->mmio_addr;
1361 rtl_irq_disable(tp);
1362 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1366 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1368 void __iomem *ioaddr = tp->mmio_addr;
1370 return RTL_R32(TBICSR) & TBIReset;
1373 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1375 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1378 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1380 return RTL_R32(TBICSR) & TBILinkOk;
1383 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1385 return RTL_R8(PHYstatus) & LinkStatus;
1388 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1390 void __iomem *ioaddr = tp->mmio_addr;
1392 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1395 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1399 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1400 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1403 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1405 void __iomem *ioaddr = tp->mmio_addr;
1406 struct net_device *dev = tp->dev;
1408 if (!netif_running(dev))
1411 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1412 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1413 if (RTL_R8(PHYstatus) & _1000bpsF) {
1414 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1416 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1418 } else if (RTL_R8(PHYstatus) & _100bps) {
1419 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1421 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1424 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1426 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1429 /* Reset packet filter */
1430 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1432 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1434 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1435 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1436 if (RTL_R8(PHYstatus) & _1000bpsF) {
1437 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1439 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1442 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1444 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1447 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1448 if (RTL_R8(PHYstatus) & _10bps) {
1449 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1451 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1454 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1460 static void __rtl8169_check_link_status(struct net_device *dev,
1461 struct rtl8169_private *tp,
1462 void __iomem *ioaddr, bool pm)
1464 if (tp->link_ok(ioaddr)) {
1465 rtl_link_chg_patch(tp);
1466 /* This is to cancel a scheduled suspend if there's one. */
1468 pm_request_resume(&tp->pci_dev->dev);
1469 netif_carrier_on(dev);
1470 if (net_ratelimit())
1471 netif_info(tp, ifup, dev, "link up\n");
1473 netif_carrier_off(dev);
1474 netif_info(tp, ifdown, dev, "link down\n");
1476 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1480 static void rtl8169_check_link_status(struct net_device *dev,
1481 struct rtl8169_private *tp,
1482 void __iomem *ioaddr)
1484 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1487 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1489 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1491 void __iomem *ioaddr = tp->mmio_addr;
1495 options = RTL_R8(Config1);
1496 if (!(options & PMEnable))
1499 options = RTL_R8(Config3);
1500 if (options & LinkUp)
1501 wolopts |= WAKE_PHY;
1502 if (options & MagicPacket)
1503 wolopts |= WAKE_MAGIC;
1505 options = RTL_R8(Config5);
1507 wolopts |= WAKE_UCAST;
1509 wolopts |= WAKE_BCAST;
1511 wolopts |= WAKE_MCAST;
1516 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1518 struct rtl8169_private *tp = netdev_priv(dev);
1522 wol->supported = WAKE_ANY;
1523 wol->wolopts = __rtl8169_get_wol(tp);
1525 rtl_unlock_work(tp);
1528 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1530 void __iomem *ioaddr = tp->mmio_addr;
1532 static const struct {
1537 { WAKE_PHY, Config3, LinkUp },
1538 { WAKE_MAGIC, Config3, MagicPacket },
1539 { WAKE_UCAST, Config5, UWF },
1540 { WAKE_BCAST, Config5, BWF },
1541 { WAKE_MCAST, Config5, MWF },
1542 { WAKE_ANY, Config5, LanWake }
1546 RTL_W8(Cfg9346, Cfg9346_Unlock);
1548 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1549 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1550 if (wolopts & cfg[i].opt)
1551 options |= cfg[i].mask;
1552 RTL_W8(cfg[i].reg, options);
1555 switch (tp->mac_version) {
1556 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1557 options = RTL_R8(Config1) & ~PMEnable;
1559 options |= PMEnable;
1560 RTL_W8(Config1, options);
1563 options = RTL_R8(Config2) & ~PME_SIGNAL;
1565 options |= PME_SIGNAL;
1566 RTL_W8(Config2, options);
1570 RTL_W8(Cfg9346, Cfg9346_Lock);
1573 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1575 struct rtl8169_private *tp = netdev_priv(dev);
1580 tp->features |= RTL_FEATURE_WOL;
1582 tp->features &= ~RTL_FEATURE_WOL;
1583 __rtl8169_set_wol(tp, wol->wolopts);
1585 rtl_unlock_work(tp);
1587 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1592 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1594 return rtl_chip_infos[tp->mac_version].fw_name;
1597 static void rtl8169_get_drvinfo(struct net_device *dev,
1598 struct ethtool_drvinfo *info)
1600 struct rtl8169_private *tp = netdev_priv(dev);
1601 struct rtl_fw *rtl_fw = tp->rtl_fw;
1603 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1604 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1605 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1606 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1607 if (!IS_ERR_OR_NULL(rtl_fw))
1608 strlcpy(info->fw_version, rtl_fw->version,
1609 sizeof(info->fw_version));
1612 static int rtl8169_get_regs_len(struct net_device *dev)
1614 return R8169_REGS_SIZE;
1617 static int rtl8169_set_speed_tbi(struct net_device *dev,
1618 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1620 struct rtl8169_private *tp = netdev_priv(dev);
1621 void __iomem *ioaddr = tp->mmio_addr;
1625 reg = RTL_R32(TBICSR);
1626 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1627 (duplex == DUPLEX_FULL)) {
1628 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1629 } else if (autoneg == AUTONEG_ENABLE)
1630 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1632 netif_warn(tp, link, dev,
1633 "incorrect speed setting refused in TBI mode\n");
1640 static int rtl8169_set_speed_xmii(struct net_device *dev,
1641 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1643 struct rtl8169_private *tp = netdev_priv(dev);
1644 int giga_ctrl, bmcr;
1647 rtl_writephy(tp, 0x1f, 0x0000);
1649 if (autoneg == AUTONEG_ENABLE) {
1652 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1653 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1654 ADVERTISE_100HALF | ADVERTISE_100FULL);
1656 if (adv & ADVERTISED_10baseT_Half)
1657 auto_nego |= ADVERTISE_10HALF;
1658 if (adv & ADVERTISED_10baseT_Full)
1659 auto_nego |= ADVERTISE_10FULL;
1660 if (adv & ADVERTISED_100baseT_Half)
1661 auto_nego |= ADVERTISE_100HALF;
1662 if (adv & ADVERTISED_100baseT_Full)
1663 auto_nego |= ADVERTISE_100FULL;
1665 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1667 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1668 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1670 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1671 if (tp->mii.supports_gmii) {
1672 if (adv & ADVERTISED_1000baseT_Half)
1673 giga_ctrl |= ADVERTISE_1000HALF;
1674 if (adv & ADVERTISED_1000baseT_Full)
1675 giga_ctrl |= ADVERTISE_1000FULL;
1676 } else if (adv & (ADVERTISED_1000baseT_Half |
1677 ADVERTISED_1000baseT_Full)) {
1678 netif_info(tp, link, dev,
1679 "PHY does not support 1000Mbps\n");
1683 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1685 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1686 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1690 if (speed == SPEED_10)
1692 else if (speed == SPEED_100)
1693 bmcr = BMCR_SPEED100;
1697 if (duplex == DUPLEX_FULL)
1698 bmcr |= BMCR_FULLDPLX;
1701 rtl_writephy(tp, MII_BMCR, bmcr);
1703 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1704 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1705 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1706 rtl_writephy(tp, 0x17, 0x2138);
1707 rtl_writephy(tp, 0x0e, 0x0260);
1709 rtl_writephy(tp, 0x17, 0x2108);
1710 rtl_writephy(tp, 0x0e, 0x0000);
1719 static int rtl8169_set_speed(struct net_device *dev,
1720 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1722 struct rtl8169_private *tp = netdev_priv(dev);
1725 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1729 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1730 (advertising & ADVERTISED_1000baseT_Full)) {
1731 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1737 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1739 struct rtl8169_private *tp = netdev_priv(dev);
1742 del_timer_sync(&tp->timer);
1745 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1746 cmd->duplex, cmd->advertising);
1747 rtl_unlock_work(tp);
1752 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1753 netdev_features_t features)
1755 struct rtl8169_private *tp = netdev_priv(dev);
1757 if (dev->mtu > TD_MSS_MAX)
1758 features &= ~NETIF_F_ALL_TSO;
1760 if (dev->mtu > JUMBO_1K &&
1761 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1762 features &= ~NETIF_F_IP_CSUM;
1767 static void __rtl8169_set_features(struct net_device *dev,
1768 netdev_features_t features)
1770 struct rtl8169_private *tp = netdev_priv(dev);
1771 netdev_features_t changed = features ^ dev->features;
1772 void __iomem *ioaddr = tp->mmio_addr;
1774 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1777 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1778 if (features & NETIF_F_RXCSUM)
1779 tp->cp_cmd |= RxChkSum;
1781 tp->cp_cmd &= ~RxChkSum;
1783 if (dev->features & NETIF_F_HW_VLAN_RX)
1784 tp->cp_cmd |= RxVlan;
1786 tp->cp_cmd &= ~RxVlan;
1788 RTL_W16(CPlusCmd, tp->cp_cmd);
1791 if (changed & NETIF_F_RXALL) {
1792 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1793 if (features & NETIF_F_RXALL)
1794 tmp |= (AcceptErr | AcceptRunt);
1795 RTL_W32(RxConfig, tmp);
1799 static int rtl8169_set_features(struct net_device *dev,
1800 netdev_features_t features)
1802 struct rtl8169_private *tp = netdev_priv(dev);
1805 __rtl8169_set_features(dev, features);
1806 rtl_unlock_work(tp);
1812 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1814 return (vlan_tx_tag_present(skb)) ?
1815 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1818 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1820 u32 opts2 = le32_to_cpu(desc->opts2);
1822 if (opts2 & RxVlanTag)
1823 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1826 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1828 struct rtl8169_private *tp = netdev_priv(dev);
1829 void __iomem *ioaddr = tp->mmio_addr;
1833 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1834 cmd->port = PORT_FIBRE;
1835 cmd->transceiver = XCVR_INTERNAL;
1837 status = RTL_R32(TBICSR);
1838 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1839 cmd->autoneg = !!(status & TBINwEnable);
1841 ethtool_cmd_speed_set(cmd, SPEED_1000);
1842 cmd->duplex = DUPLEX_FULL; /* Always set */
1847 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1849 struct rtl8169_private *tp = netdev_priv(dev);
1851 return mii_ethtool_gset(&tp->mii, cmd);
1854 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1856 struct rtl8169_private *tp = netdev_priv(dev);
1860 rc = tp->get_settings(dev, cmd);
1861 rtl_unlock_work(tp);
1866 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1869 struct rtl8169_private *tp = netdev_priv(dev);
1871 if (regs->len > R8169_REGS_SIZE)
1872 regs->len = R8169_REGS_SIZE;
1875 memcpy_fromio(p, tp->mmio_addr, regs->len);
1876 rtl_unlock_work(tp);
1879 static u32 rtl8169_get_msglevel(struct net_device *dev)
1881 struct rtl8169_private *tp = netdev_priv(dev);
1883 return tp->msg_enable;
1886 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1888 struct rtl8169_private *tp = netdev_priv(dev);
1890 tp->msg_enable = value;
1893 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1900 "tx_single_collisions",
1901 "tx_multi_collisions",
1909 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1913 return ARRAY_SIZE(rtl8169_gstrings);
1919 DECLARE_RTL_COND(rtl_counters_cond)
1921 void __iomem *ioaddr = tp->mmio_addr;
1923 return RTL_R32(CounterAddrLow) & CounterDump;
1926 static void rtl8169_update_counters(struct net_device *dev)
1928 struct rtl8169_private *tp = netdev_priv(dev);
1929 void __iomem *ioaddr = tp->mmio_addr;
1930 struct device *d = &tp->pci_dev->dev;
1931 struct rtl8169_counters *counters;
1936 * Some chips are unable to dump tally counters when the receiver
1939 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1942 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1946 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1947 cmd = (u64)paddr & DMA_BIT_MASK(32);
1948 RTL_W32(CounterAddrLow, cmd);
1949 RTL_W32(CounterAddrLow, cmd | CounterDump);
1951 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1952 memcpy(&tp->counters, counters, sizeof(*counters));
1954 RTL_W32(CounterAddrLow, 0);
1955 RTL_W32(CounterAddrHigh, 0);
1957 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1960 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1961 struct ethtool_stats *stats, u64 *data)
1963 struct rtl8169_private *tp = netdev_priv(dev);
1967 rtl8169_update_counters(dev);
1969 data[0] = le64_to_cpu(tp->counters.tx_packets);
1970 data[1] = le64_to_cpu(tp->counters.rx_packets);
1971 data[2] = le64_to_cpu(tp->counters.tx_errors);
1972 data[3] = le32_to_cpu(tp->counters.rx_errors);
1973 data[4] = le16_to_cpu(tp->counters.rx_missed);
1974 data[5] = le16_to_cpu(tp->counters.align_errors);
1975 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1976 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1977 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1978 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1979 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1980 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1981 data[12] = le16_to_cpu(tp->counters.tx_underun);
1984 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1988 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1993 static const struct ethtool_ops rtl8169_ethtool_ops = {
1994 .get_drvinfo = rtl8169_get_drvinfo,
1995 .get_regs_len = rtl8169_get_regs_len,
1996 .get_link = ethtool_op_get_link,
1997 .get_settings = rtl8169_get_settings,
1998 .set_settings = rtl8169_set_settings,
1999 .get_msglevel = rtl8169_get_msglevel,
2000 .set_msglevel = rtl8169_set_msglevel,
2001 .get_regs = rtl8169_get_regs,
2002 .get_wol = rtl8169_get_wol,
2003 .set_wol = rtl8169_set_wol,
2004 .get_strings = rtl8169_get_strings,
2005 .get_sset_count = rtl8169_get_sset_count,
2006 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2007 .get_ts_info = ethtool_op_get_ts_info,
2010 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2011 struct net_device *dev, u8 default_version)
2013 void __iomem *ioaddr = tp->mmio_addr;
2015 * The driver currently handles the 8168Bf and the 8168Be identically
2016 * but they can be identified more specifically through the test below
2019 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2021 * Same thing for the 8101Eb and the 8101Ec:
2023 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2025 static const struct rtl_mac_info {
2031 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2032 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2035 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2036 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2037 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2040 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2041 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2042 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2043 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2046 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2047 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2048 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2050 /* 8168DP family. */
2051 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2052 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2053 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2056 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2057 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2058 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2059 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2060 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2061 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2062 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2063 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2064 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2067 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2068 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2069 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2070 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2073 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2074 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2075 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2076 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2077 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2078 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2079 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2080 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2081 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2082 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2083 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2084 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2085 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2086 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2087 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2088 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2089 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2090 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2091 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2092 /* FIXME: where did these entries come from ? -- FR */
2093 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2094 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2097 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2098 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2099 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2100 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2101 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2102 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2105 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2107 const struct rtl_mac_info *p = mac_info;
2110 reg = RTL_R32(TxConfig);
2111 while ((reg & p->mask) != p->val)
2113 tp->mac_version = p->mac_version;
2115 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2116 netif_notice(tp, probe, dev,
2117 "unknown MAC, using family default\n");
2118 tp->mac_version = default_version;
2122 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2124 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2132 static void rtl_writephy_batch(struct rtl8169_private *tp,
2133 const struct phy_reg *regs, int len)
2136 rtl_writephy(tp, regs->reg, regs->val);
2141 #define PHY_READ 0x00000000
2142 #define PHY_DATA_OR 0x10000000
2143 #define PHY_DATA_AND 0x20000000
2144 #define PHY_BJMPN 0x30000000
2145 #define PHY_READ_EFUSE 0x40000000
2146 #define PHY_READ_MAC_BYTE 0x50000000
2147 #define PHY_WRITE_MAC_BYTE 0x60000000
2148 #define PHY_CLEAR_READCOUNT 0x70000000
2149 #define PHY_WRITE 0x80000000
2150 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2151 #define PHY_COMP_EQ_SKIPN 0xa0000000
2152 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2153 #define PHY_WRITE_PREVIOUS 0xc0000000
2154 #define PHY_SKIPN 0xd0000000
2155 #define PHY_DELAY_MS 0xe0000000
2156 #define PHY_WRITE_ERI_WORD 0xf0000000
2160 char version[RTL_VER_SIZE];
2166 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2168 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2170 const struct firmware *fw = rtl_fw->fw;
2171 struct fw_info *fw_info = (struct fw_info *)fw->data;
2172 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2173 char *version = rtl_fw->version;
2176 if (fw->size < FW_OPCODE_SIZE)
2179 if (!fw_info->magic) {
2180 size_t i, size, start;
2183 if (fw->size < sizeof(*fw_info))
2186 for (i = 0; i < fw->size; i++)
2187 checksum += fw->data[i];
2191 start = le32_to_cpu(fw_info->fw_start);
2192 if (start > fw->size)
2195 size = le32_to_cpu(fw_info->fw_len);
2196 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2199 memcpy(version, fw_info->version, RTL_VER_SIZE);
2201 pa->code = (__le32 *)(fw->data + start);
2204 if (fw->size % FW_OPCODE_SIZE)
2207 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2209 pa->code = (__le32 *)fw->data;
2210 pa->size = fw->size / FW_OPCODE_SIZE;
2212 version[RTL_VER_SIZE - 1] = 0;
2219 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2220 struct rtl_fw_phy_action *pa)
2225 for (index = 0; index < pa->size; index++) {
2226 u32 action = le32_to_cpu(pa->code[index]);
2227 u32 regno = (action & 0x0fff0000) >> 16;
2229 switch(action & 0xf0000000) {
2233 case PHY_READ_EFUSE:
2234 case PHY_CLEAR_READCOUNT:
2236 case PHY_WRITE_PREVIOUS:
2241 if (regno > index) {
2242 netif_err(tp, ifup, tp->dev,
2243 "Out of range of firmware\n");
2247 case PHY_READCOUNT_EQ_SKIP:
2248 if (index + 2 >= pa->size) {
2249 netif_err(tp, ifup, tp->dev,
2250 "Out of range of firmware\n");
2254 case PHY_COMP_EQ_SKIPN:
2255 case PHY_COMP_NEQ_SKIPN:
2257 if (index + 1 + regno >= pa->size) {
2258 netif_err(tp, ifup, tp->dev,
2259 "Out of range of firmware\n");
2264 case PHY_READ_MAC_BYTE:
2265 case PHY_WRITE_MAC_BYTE:
2266 case PHY_WRITE_ERI_WORD:
2268 netif_err(tp, ifup, tp->dev,
2269 "Invalid action 0x%08x\n", action);
2278 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2280 struct net_device *dev = tp->dev;
2283 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2284 netif_err(tp, ifup, dev, "invalid firwmare\n");
2288 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2294 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2296 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2300 predata = count = 0;
2302 for (index = 0; index < pa->size; ) {
2303 u32 action = le32_to_cpu(pa->code[index]);
2304 u32 data = action & 0x0000ffff;
2305 u32 regno = (action & 0x0fff0000) >> 16;
2310 switch(action & 0xf0000000) {
2312 predata = rtl_readphy(tp, regno);
2327 case PHY_READ_EFUSE:
2328 predata = rtl8168d_efuse_read(tp, regno);
2331 case PHY_CLEAR_READCOUNT:
2336 rtl_writephy(tp, regno, data);
2339 case PHY_READCOUNT_EQ_SKIP:
2340 index += (count == data) ? 2 : 1;
2342 case PHY_COMP_EQ_SKIPN:
2343 if (predata == data)
2347 case PHY_COMP_NEQ_SKIPN:
2348 if (predata != data)
2352 case PHY_WRITE_PREVIOUS:
2353 rtl_writephy(tp, regno, predata);
2364 case PHY_READ_MAC_BYTE:
2365 case PHY_WRITE_MAC_BYTE:
2366 case PHY_WRITE_ERI_WORD:
2373 static void rtl_release_firmware(struct rtl8169_private *tp)
2375 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2376 release_firmware(tp->rtl_fw->fw);
2379 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2382 static void rtl_apply_firmware(struct rtl8169_private *tp)
2384 struct rtl_fw *rtl_fw = tp->rtl_fw;
2386 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2387 if (!IS_ERR_OR_NULL(rtl_fw))
2388 rtl_phy_write_fw(tp, rtl_fw);
2391 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2393 if (rtl_readphy(tp, reg) != val)
2394 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2396 rtl_apply_firmware(tp);
2399 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2401 static const struct phy_reg phy_reg_init[] = {
2463 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2466 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2468 static const struct phy_reg phy_reg_init[] = {
2474 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2477 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2479 struct pci_dev *pdev = tp->pci_dev;
2481 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2482 (pdev->subsystem_device != 0xe000))
2485 rtl_writephy(tp, 0x1f, 0x0001);
2486 rtl_writephy(tp, 0x10, 0xf01b);
2487 rtl_writephy(tp, 0x1f, 0x0000);
2490 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2492 static const struct phy_reg phy_reg_init[] = {
2532 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2534 rtl8169scd_hw_phy_config_quirk(tp);
2537 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2539 static const struct phy_reg phy_reg_init[] = {
2587 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2590 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2592 static const struct phy_reg phy_reg_init[] = {
2597 rtl_writephy(tp, 0x1f, 0x0001);
2598 rtl_patchphy(tp, 0x16, 1 << 0);
2600 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2603 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2605 static const struct phy_reg phy_reg_init[] = {
2611 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2614 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2616 static const struct phy_reg phy_reg_init[] = {
2624 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2627 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2629 static const struct phy_reg phy_reg_init[] = {
2635 rtl_writephy(tp, 0x1f, 0x0000);
2636 rtl_patchphy(tp, 0x14, 1 << 5);
2637 rtl_patchphy(tp, 0x0d, 1 << 5);
2639 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2642 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2644 static const struct phy_reg phy_reg_init[] = {
2664 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2666 rtl_patchphy(tp, 0x14, 1 << 5);
2667 rtl_patchphy(tp, 0x0d, 1 << 5);
2668 rtl_writephy(tp, 0x1f, 0x0000);
2671 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2673 static const struct phy_reg phy_reg_init[] = {
2691 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2693 rtl_patchphy(tp, 0x16, 1 << 0);
2694 rtl_patchphy(tp, 0x14, 1 << 5);
2695 rtl_patchphy(tp, 0x0d, 1 << 5);
2696 rtl_writephy(tp, 0x1f, 0x0000);
2699 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2701 static const struct phy_reg phy_reg_init[] = {
2713 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2715 rtl_patchphy(tp, 0x16, 1 << 0);
2716 rtl_patchphy(tp, 0x14, 1 << 5);
2717 rtl_patchphy(tp, 0x0d, 1 << 5);
2718 rtl_writephy(tp, 0x1f, 0x0000);
2721 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2723 rtl8168c_3_hw_phy_config(tp);
2726 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2728 static const struct phy_reg phy_reg_init_0[] = {
2729 /* Channel Estimation */
2750 * Enhance line driver power
2759 * Can not link to 1Gbps with bad cable
2760 * Decrease SNR threshold form 21.07dB to 19.04dB
2769 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2773 * Fine Tune Switching regulator parameter
2775 rtl_writephy(tp, 0x1f, 0x0002);
2776 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2777 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2779 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2780 static const struct phy_reg phy_reg_init[] = {
2790 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2792 val = rtl_readphy(tp, 0x0d);
2794 if ((val & 0x00ff) != 0x006c) {
2795 static const u32 set[] = {
2796 0x0065, 0x0066, 0x0067, 0x0068,
2797 0x0069, 0x006a, 0x006b, 0x006c
2801 rtl_writephy(tp, 0x1f, 0x0002);
2804 for (i = 0; i < ARRAY_SIZE(set); i++)
2805 rtl_writephy(tp, 0x0d, val | set[i]);
2808 static const struct phy_reg phy_reg_init[] = {
2816 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2819 /* RSET couple improve */
2820 rtl_writephy(tp, 0x1f, 0x0002);
2821 rtl_patchphy(tp, 0x0d, 0x0300);
2822 rtl_patchphy(tp, 0x0f, 0x0010);
2824 /* Fine tune PLL performance */
2825 rtl_writephy(tp, 0x1f, 0x0002);
2826 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2827 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2829 rtl_writephy(tp, 0x1f, 0x0005);
2830 rtl_writephy(tp, 0x05, 0x001b);
2832 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2834 rtl_writephy(tp, 0x1f, 0x0000);
2837 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2839 static const struct phy_reg phy_reg_init_0[] = {
2840 /* Channel Estimation */
2861 * Enhance line driver power
2870 * Can not link to 1Gbps with bad cable
2871 * Decrease SNR threshold form 21.07dB to 19.04dB
2880 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2882 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2883 static const struct phy_reg phy_reg_init[] = {
2894 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2896 val = rtl_readphy(tp, 0x0d);
2897 if ((val & 0x00ff) != 0x006c) {
2898 static const u32 set[] = {
2899 0x0065, 0x0066, 0x0067, 0x0068,
2900 0x0069, 0x006a, 0x006b, 0x006c
2904 rtl_writephy(tp, 0x1f, 0x0002);
2907 for (i = 0; i < ARRAY_SIZE(set); i++)
2908 rtl_writephy(tp, 0x0d, val | set[i]);
2911 static const struct phy_reg phy_reg_init[] = {
2919 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2922 /* Fine tune PLL performance */
2923 rtl_writephy(tp, 0x1f, 0x0002);
2924 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2925 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2927 /* Switching regulator Slew rate */
2928 rtl_writephy(tp, 0x1f, 0x0002);
2929 rtl_patchphy(tp, 0x0f, 0x0017);
2931 rtl_writephy(tp, 0x1f, 0x0005);
2932 rtl_writephy(tp, 0x05, 0x001b);
2934 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2936 rtl_writephy(tp, 0x1f, 0x0000);
2939 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2941 static const struct phy_reg phy_reg_init[] = {
2997 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3000 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3002 static const struct phy_reg phy_reg_init[] = {
3012 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3013 rtl_patchphy(tp, 0x0d, 1 << 5);
3016 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3018 static const struct phy_reg phy_reg_init[] = {
3019 /* Enable Delay cap */
3025 /* Channel estimation fine tune */
3034 /* Update PFM & 10M TX idle timer */
3046 rtl_apply_firmware(tp);
3048 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3050 /* DCO enable for 10M IDLE Power */
3051 rtl_writephy(tp, 0x1f, 0x0007);
3052 rtl_writephy(tp, 0x1e, 0x0023);
3053 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3054 rtl_writephy(tp, 0x1f, 0x0000);
3056 /* For impedance matching */
3057 rtl_writephy(tp, 0x1f, 0x0002);
3058 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3059 rtl_writephy(tp, 0x1f, 0x0000);
3061 /* PHY auto speed down */
3062 rtl_writephy(tp, 0x1f, 0x0007);
3063 rtl_writephy(tp, 0x1e, 0x002d);
3064 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3065 rtl_writephy(tp, 0x1f, 0x0000);
3066 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3068 rtl_writephy(tp, 0x1f, 0x0005);
3069 rtl_writephy(tp, 0x05, 0x8b86);
3070 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3071 rtl_writephy(tp, 0x1f, 0x0000);
3073 rtl_writephy(tp, 0x1f, 0x0005);
3074 rtl_writephy(tp, 0x05, 0x8b85);
3075 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3076 rtl_writephy(tp, 0x1f, 0x0007);
3077 rtl_writephy(tp, 0x1e, 0x0020);
3078 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3079 rtl_writephy(tp, 0x1f, 0x0006);
3080 rtl_writephy(tp, 0x00, 0x5a00);
3081 rtl_writephy(tp, 0x1f, 0x0000);
3082 rtl_writephy(tp, 0x0d, 0x0007);
3083 rtl_writephy(tp, 0x0e, 0x003c);
3084 rtl_writephy(tp, 0x0d, 0x4007);
3085 rtl_writephy(tp, 0x0e, 0x0000);
3086 rtl_writephy(tp, 0x0d, 0x0000);
3089 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3092 addr[0] | (addr[1] << 8),
3093 addr[2] | (addr[3] << 8),
3094 addr[4] | (addr[5] << 8)
3096 const struct exgmac_reg e[] = {
3097 { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3098 { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3099 { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3100 { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3103 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3106 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3108 static const struct phy_reg phy_reg_init[] = {
3109 /* Enable Delay cap */
3118 /* Channel estimation fine tune */
3135 rtl_apply_firmware(tp);
3137 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3139 /* For 4-corner performance improve */
3140 rtl_writephy(tp, 0x1f, 0x0005);
3141 rtl_writephy(tp, 0x05, 0x8b80);
3142 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3143 rtl_writephy(tp, 0x1f, 0x0000);
3145 /* PHY auto speed down */
3146 rtl_writephy(tp, 0x1f, 0x0004);
3147 rtl_writephy(tp, 0x1f, 0x0007);
3148 rtl_writephy(tp, 0x1e, 0x002d);
3149 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3150 rtl_writephy(tp, 0x1f, 0x0002);
3151 rtl_writephy(tp, 0x1f, 0x0000);
3152 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3154 /* improve 10M EEE waveform */
3155 rtl_writephy(tp, 0x1f, 0x0005);
3156 rtl_writephy(tp, 0x05, 0x8b86);
3157 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3158 rtl_writephy(tp, 0x1f, 0x0000);
3160 /* Improve 2-pair detection performance */
3161 rtl_writephy(tp, 0x1f, 0x0005);
3162 rtl_writephy(tp, 0x05, 0x8b85);
3163 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3164 rtl_writephy(tp, 0x1f, 0x0000);
3167 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3168 rtl_writephy(tp, 0x1f, 0x0005);
3169 rtl_writephy(tp, 0x05, 0x8b85);
3170 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3171 rtl_writephy(tp, 0x1f, 0x0004);
3172 rtl_writephy(tp, 0x1f, 0x0007);
3173 rtl_writephy(tp, 0x1e, 0x0020);
3174 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3175 rtl_writephy(tp, 0x1f, 0x0002);
3176 rtl_writephy(tp, 0x1f, 0x0000);
3177 rtl_writephy(tp, 0x0d, 0x0007);
3178 rtl_writephy(tp, 0x0e, 0x003c);
3179 rtl_writephy(tp, 0x0d, 0x4007);
3180 rtl_writephy(tp, 0x0e, 0x0000);
3181 rtl_writephy(tp, 0x0d, 0x0000);
3184 rtl_writephy(tp, 0x1f, 0x0003);
3185 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3186 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3187 rtl_writephy(tp, 0x1f, 0x0000);
3189 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3190 rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3193 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3195 /* For 4-corner performance improve */
3196 rtl_writephy(tp, 0x1f, 0x0005);
3197 rtl_writephy(tp, 0x05, 0x8b80);
3198 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3199 rtl_writephy(tp, 0x1f, 0x0000);
3201 /* PHY auto speed down */
3202 rtl_writephy(tp, 0x1f, 0x0007);
3203 rtl_writephy(tp, 0x1e, 0x002d);
3204 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3205 rtl_writephy(tp, 0x1f, 0x0000);
3206 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3208 /* Improve 10M EEE waveform */
3209 rtl_writephy(tp, 0x1f, 0x0005);
3210 rtl_writephy(tp, 0x05, 0x8b86);
3211 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3212 rtl_writephy(tp, 0x1f, 0x0000);
3215 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3217 static const struct phy_reg phy_reg_init[] = {
3218 /* Channel estimation fine tune */
3223 /* Modify green table for giga & fnet */
3240 /* Modify green table for 10M */
3246 /* Disable hiimpedance detection (RTCT) */
3252 rtl_apply_firmware(tp);
3254 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3256 rtl8168f_hw_phy_config(tp);
3258 /* Improve 2-pair detection performance */
3259 rtl_writephy(tp, 0x1f, 0x0005);
3260 rtl_writephy(tp, 0x05, 0x8b85);
3261 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3262 rtl_writephy(tp, 0x1f, 0x0000);
3265 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3267 rtl_apply_firmware(tp);
3269 rtl8168f_hw_phy_config(tp);
3272 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3274 static const struct phy_reg phy_reg_init[] = {
3275 /* Channel estimation fine tune */
3280 /* Modify green table for giga & fnet */
3297 /* Modify green table for 10M */
3303 /* Disable hiimpedance detection (RTCT) */
3310 rtl_apply_firmware(tp);
3312 rtl8168f_hw_phy_config(tp);
3314 /* Improve 2-pair detection performance */
3315 rtl_writephy(tp, 0x1f, 0x0005);
3316 rtl_writephy(tp, 0x05, 0x8b85);
3317 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3318 rtl_writephy(tp, 0x1f, 0x0000);
3320 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3322 /* Modify green table for giga */
3323 rtl_writephy(tp, 0x1f, 0x0005);
3324 rtl_writephy(tp, 0x05, 0x8b54);
3325 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3326 rtl_writephy(tp, 0x05, 0x8b5d);
3327 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3328 rtl_writephy(tp, 0x05, 0x8a7c);
3329 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3330 rtl_writephy(tp, 0x05, 0x8a7f);
3331 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3332 rtl_writephy(tp, 0x05, 0x8a82);
3333 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3334 rtl_writephy(tp, 0x05, 0x8a85);
3335 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3336 rtl_writephy(tp, 0x05, 0x8a88);
3337 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3338 rtl_writephy(tp, 0x1f, 0x0000);
3340 /* uc same-seed solution */
3341 rtl_writephy(tp, 0x1f, 0x0005);
3342 rtl_writephy(tp, 0x05, 0x8b85);
3343 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3344 rtl_writephy(tp, 0x1f, 0x0000);
3347 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3348 rtl_writephy(tp, 0x1f, 0x0005);
3349 rtl_writephy(tp, 0x05, 0x8b85);
3350 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3351 rtl_writephy(tp, 0x1f, 0x0004);
3352 rtl_writephy(tp, 0x1f, 0x0007);
3353 rtl_writephy(tp, 0x1e, 0x0020);
3354 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3355 rtl_writephy(tp, 0x1f, 0x0000);
3356 rtl_writephy(tp, 0x0d, 0x0007);
3357 rtl_writephy(tp, 0x0e, 0x003c);
3358 rtl_writephy(tp, 0x0d, 0x4007);
3359 rtl_writephy(tp, 0x0e, 0x0000);
3360 rtl_writephy(tp, 0x0d, 0x0000);
3363 rtl_writephy(tp, 0x1f, 0x0003);
3364 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3365 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3366 rtl_writephy(tp, 0x1f, 0x0000);
3369 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3371 static const u16 mac_ocp_patch[] = {
3372 0xe008, 0xe01b, 0xe01d, 0xe01f,
3373 0xe021, 0xe023, 0xe025, 0xe027,
3374 0x49d2, 0xf10d, 0x766c, 0x49e2,
3375 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3377 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3378 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3379 0xbe00, 0xb416, 0x0076, 0xe86c,
3380 0xc602, 0xbe00, 0x0000, 0xc602,
3382 0xbe00, 0x0000, 0xc602, 0xbe00,
3383 0x0000, 0xc602, 0xbe00, 0x0000,
3384 0xc602, 0xbe00, 0x0000, 0xc602,
3385 0xbe00, 0x0000, 0xc602, 0xbe00,
3387 0x0000, 0x0000, 0x0000, 0x0000
3391 /* Patch code for GPHY reset */
3392 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3393 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3394 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3395 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3397 rtl_apply_firmware(tp);
3399 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3400 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3402 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3404 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3405 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3407 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3409 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3410 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3412 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3413 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3415 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3418 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3420 static const struct phy_reg phy_reg_init[] = {
3427 rtl_writephy(tp, 0x1f, 0x0000);
3428 rtl_patchphy(tp, 0x11, 1 << 12);
3429 rtl_patchphy(tp, 0x19, 1 << 13);
3430 rtl_patchphy(tp, 0x10, 1 << 15);
3432 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3435 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3437 static const struct phy_reg phy_reg_init[] = {
3451 /* Disable ALDPS before ram code */
3452 rtl_writephy(tp, 0x1f, 0x0000);
3453 rtl_writephy(tp, 0x18, 0x0310);
3456 rtl_apply_firmware(tp);
3458 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3461 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3463 /* Disable ALDPS before setting firmware */
3464 rtl_writephy(tp, 0x1f, 0x0000);
3465 rtl_writephy(tp, 0x18, 0x0310);
3468 rtl_apply_firmware(tp);
3471 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3472 rtl_writephy(tp, 0x1f, 0x0004);
3473 rtl_writephy(tp, 0x10, 0x401f);
3474 rtl_writephy(tp, 0x19, 0x7030);
3475 rtl_writephy(tp, 0x1f, 0x0000);
3478 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3480 static const struct phy_reg phy_reg_init[] = {
3487 /* Disable ALDPS before ram code */
3488 rtl_writephy(tp, 0x1f, 0x0000);
3489 rtl_writephy(tp, 0x18, 0x0310);
3492 rtl_apply_firmware(tp);
3494 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3495 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3497 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3500 static void rtl_hw_phy_config(struct net_device *dev)
3502 struct rtl8169_private *tp = netdev_priv(dev);
3504 rtl8169_print_mac_version(tp);
3506 switch (tp->mac_version) {
3507 case RTL_GIGA_MAC_VER_01:
3509 case RTL_GIGA_MAC_VER_02:
3510 case RTL_GIGA_MAC_VER_03:
3511 rtl8169s_hw_phy_config(tp);
3513 case RTL_GIGA_MAC_VER_04:
3514 rtl8169sb_hw_phy_config(tp);
3516 case RTL_GIGA_MAC_VER_05:
3517 rtl8169scd_hw_phy_config(tp);
3519 case RTL_GIGA_MAC_VER_06:
3520 rtl8169sce_hw_phy_config(tp);
3522 case RTL_GIGA_MAC_VER_07:
3523 case RTL_GIGA_MAC_VER_08:
3524 case RTL_GIGA_MAC_VER_09:
3525 rtl8102e_hw_phy_config(tp);
3527 case RTL_GIGA_MAC_VER_11:
3528 rtl8168bb_hw_phy_config(tp);
3530 case RTL_GIGA_MAC_VER_12:
3531 rtl8168bef_hw_phy_config(tp);
3533 case RTL_GIGA_MAC_VER_17:
3534 rtl8168bef_hw_phy_config(tp);
3536 case RTL_GIGA_MAC_VER_18:
3537 rtl8168cp_1_hw_phy_config(tp);
3539 case RTL_GIGA_MAC_VER_19:
3540 rtl8168c_1_hw_phy_config(tp);
3542 case RTL_GIGA_MAC_VER_20:
3543 rtl8168c_2_hw_phy_config(tp);
3545 case RTL_GIGA_MAC_VER_21:
3546 rtl8168c_3_hw_phy_config(tp);
3548 case RTL_GIGA_MAC_VER_22:
3549 rtl8168c_4_hw_phy_config(tp);
3551 case RTL_GIGA_MAC_VER_23:
3552 case RTL_GIGA_MAC_VER_24:
3553 rtl8168cp_2_hw_phy_config(tp);
3555 case RTL_GIGA_MAC_VER_25:
3556 rtl8168d_1_hw_phy_config(tp);
3558 case RTL_GIGA_MAC_VER_26:
3559 rtl8168d_2_hw_phy_config(tp);
3561 case RTL_GIGA_MAC_VER_27:
3562 rtl8168d_3_hw_phy_config(tp);
3564 case RTL_GIGA_MAC_VER_28:
3565 rtl8168d_4_hw_phy_config(tp);
3567 case RTL_GIGA_MAC_VER_29:
3568 case RTL_GIGA_MAC_VER_30:
3569 rtl8105e_hw_phy_config(tp);
3571 case RTL_GIGA_MAC_VER_31:
3574 case RTL_GIGA_MAC_VER_32:
3575 case RTL_GIGA_MAC_VER_33:
3576 rtl8168e_1_hw_phy_config(tp);
3578 case RTL_GIGA_MAC_VER_34:
3579 rtl8168e_2_hw_phy_config(tp);
3581 case RTL_GIGA_MAC_VER_35:
3582 rtl8168f_1_hw_phy_config(tp);
3584 case RTL_GIGA_MAC_VER_36:
3585 rtl8168f_2_hw_phy_config(tp);
3588 case RTL_GIGA_MAC_VER_37:
3589 rtl8402_hw_phy_config(tp);
3592 case RTL_GIGA_MAC_VER_38:
3593 rtl8411_hw_phy_config(tp);
3596 case RTL_GIGA_MAC_VER_39:
3597 rtl8106e_hw_phy_config(tp);
3600 case RTL_GIGA_MAC_VER_40:
3601 rtl8168g_1_hw_phy_config(tp);
3604 case RTL_GIGA_MAC_VER_41:
3610 static void rtl_phy_work(struct rtl8169_private *tp)
3612 struct timer_list *timer = &tp->timer;
3613 void __iomem *ioaddr = tp->mmio_addr;
3614 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3616 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3618 if (tp->phy_reset_pending(tp)) {
3620 * A busy loop could burn quite a few cycles on nowadays CPU.
3621 * Let's delay the execution of the timer for a few ticks.
3627 if (tp->link_ok(ioaddr))
3630 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3632 tp->phy_reset_enable(tp);
3635 mod_timer(timer, jiffies + timeout);
3638 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3640 if (!test_and_set_bit(flag, tp->wk.flags))
3641 schedule_work(&tp->wk.work);
3644 static void rtl8169_phy_timer(unsigned long __opaque)
3646 struct net_device *dev = (struct net_device *)__opaque;
3647 struct rtl8169_private *tp = netdev_priv(dev);
3649 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3652 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3653 void __iomem *ioaddr)
3656 pci_release_regions(pdev);
3657 pci_clear_mwi(pdev);
3658 pci_disable_device(pdev);
3662 DECLARE_RTL_COND(rtl_phy_reset_cond)
3664 return tp->phy_reset_pending(tp);
3667 static void rtl8169_phy_reset(struct net_device *dev,
3668 struct rtl8169_private *tp)
3670 tp->phy_reset_enable(tp);
3671 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3674 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3676 void __iomem *ioaddr = tp->mmio_addr;
3678 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3679 (RTL_R8(PHYstatus) & TBI_Enable);
3682 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3684 void __iomem *ioaddr = tp->mmio_addr;
3686 rtl_hw_phy_config(dev);
3688 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3689 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3693 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3695 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3696 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3698 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3699 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3701 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3702 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3705 rtl8169_phy_reset(dev, tp);
3707 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3708 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3709 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3710 (tp->mii.supports_gmii ?
3711 ADVERTISED_1000baseT_Half |
3712 ADVERTISED_1000baseT_Full : 0));
3714 if (rtl_tbi_enabled(tp))
3715 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3718 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3720 void __iomem *ioaddr = tp->mmio_addr;
3724 RTL_W8(Cfg9346, Cfg9346_Unlock);
3726 RTL_W32(MAC4, addr[4] | addr[5] << 8);
3729 RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3732 if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3733 rtl_rar_exgmac_set(tp, addr);
3735 RTL_W8(Cfg9346, Cfg9346_Lock);
3737 rtl_unlock_work(tp);
3740 static int rtl_set_mac_address(struct net_device *dev, void *p)
3742 struct rtl8169_private *tp = netdev_priv(dev);
3743 struct sockaddr *addr = p;
3745 if (!is_valid_ether_addr(addr->sa_data))
3746 return -EADDRNOTAVAIL;
3748 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3750 rtl_rar_set(tp, dev->dev_addr);
3755 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3757 struct rtl8169_private *tp = netdev_priv(dev);
3758 struct mii_ioctl_data *data = if_mii(ifr);
3760 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3763 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3764 struct mii_ioctl_data *data, int cmd)
3768 data->phy_id = 32; /* Internal PHY */
3772 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3776 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3782 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3787 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3789 if (tp->features & RTL_FEATURE_MSI) {
3790 pci_disable_msi(pdev);
3791 tp->features &= ~RTL_FEATURE_MSI;
3795 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3797 struct mdio_ops *ops = &tp->mdio_ops;
3799 switch (tp->mac_version) {
3800 case RTL_GIGA_MAC_VER_27:
3801 ops->write = r8168dp_1_mdio_write;
3802 ops->read = r8168dp_1_mdio_read;
3804 case RTL_GIGA_MAC_VER_28:
3805 case RTL_GIGA_MAC_VER_31:
3806 ops->write = r8168dp_2_mdio_write;
3807 ops->read = r8168dp_2_mdio_read;
3809 case RTL_GIGA_MAC_VER_40:
3810 case RTL_GIGA_MAC_VER_41:
3811 ops->write = r8168g_mdio_write;
3812 ops->read = r8168g_mdio_read;
3815 ops->write = r8169_mdio_write;
3816 ops->read = r8169_mdio_read;
3821 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3823 void __iomem *ioaddr = tp->mmio_addr;
3825 switch (tp->mac_version) {
3826 case RTL_GIGA_MAC_VER_25:
3827 case RTL_GIGA_MAC_VER_26:
3828 case RTL_GIGA_MAC_VER_29:
3829 case RTL_GIGA_MAC_VER_30:
3830 case RTL_GIGA_MAC_VER_32:
3831 case RTL_GIGA_MAC_VER_33:
3832 case RTL_GIGA_MAC_VER_34:
3833 case RTL_GIGA_MAC_VER_37:
3834 case RTL_GIGA_MAC_VER_38:
3835 case RTL_GIGA_MAC_VER_39:
3836 case RTL_GIGA_MAC_VER_40:
3837 case RTL_GIGA_MAC_VER_41:
3838 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3839 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3846 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3848 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3851 rtl_writephy(tp, 0x1f, 0x0000);
3852 rtl_writephy(tp, MII_BMCR, 0x0000);
3854 rtl_wol_suspend_quirk(tp);
3859 static void r810x_phy_power_down(struct rtl8169_private *tp)
3861 rtl_writephy(tp, 0x1f, 0x0000);
3862 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3865 static void r810x_phy_power_up(struct rtl8169_private *tp)
3867 rtl_writephy(tp, 0x1f, 0x0000);
3868 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3871 static void r810x_pll_power_down(struct rtl8169_private *tp)
3873 void __iomem *ioaddr = tp->mmio_addr;
3875 if (rtl_wol_pll_power_down(tp))
3878 r810x_phy_power_down(tp);
3880 switch (tp->mac_version) {
3881 case RTL_GIGA_MAC_VER_07:
3882 case RTL_GIGA_MAC_VER_08:
3883 case RTL_GIGA_MAC_VER_09:
3884 case RTL_GIGA_MAC_VER_10:
3885 case RTL_GIGA_MAC_VER_13:
3886 case RTL_GIGA_MAC_VER_16:
3889 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3894 static void r810x_pll_power_up(struct rtl8169_private *tp)
3896 void __iomem *ioaddr = tp->mmio_addr;
3898 r810x_phy_power_up(tp);
3900 switch (tp->mac_version) {
3901 case RTL_GIGA_MAC_VER_07:
3902 case RTL_GIGA_MAC_VER_08:
3903 case RTL_GIGA_MAC_VER_09:
3904 case RTL_GIGA_MAC_VER_10:
3905 case RTL_GIGA_MAC_VER_13:
3906 case RTL_GIGA_MAC_VER_16:
3909 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3914 static void r8168_phy_power_up(struct rtl8169_private *tp)
3916 rtl_writephy(tp, 0x1f, 0x0000);
3917 switch (tp->mac_version) {
3918 case RTL_GIGA_MAC_VER_11:
3919 case RTL_GIGA_MAC_VER_12:
3920 case RTL_GIGA_MAC_VER_17:
3921 case RTL_GIGA_MAC_VER_18:
3922 case RTL_GIGA_MAC_VER_19:
3923 case RTL_GIGA_MAC_VER_20:
3924 case RTL_GIGA_MAC_VER_21:
3925 case RTL_GIGA_MAC_VER_22:
3926 case RTL_GIGA_MAC_VER_23:
3927 case RTL_GIGA_MAC_VER_24:
3928 case RTL_GIGA_MAC_VER_25:
3929 case RTL_GIGA_MAC_VER_26:
3930 case RTL_GIGA_MAC_VER_27:
3931 case RTL_GIGA_MAC_VER_28:
3932 case RTL_GIGA_MAC_VER_31:
3933 rtl_writephy(tp, 0x0e, 0x0000);
3938 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3941 static void r8168_phy_power_down(struct rtl8169_private *tp)
3943 rtl_writephy(tp, 0x1f, 0x0000);
3944 switch (tp->mac_version) {
3945 case RTL_GIGA_MAC_VER_32:
3946 case RTL_GIGA_MAC_VER_33:
3947 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3950 case RTL_GIGA_MAC_VER_11:
3951 case RTL_GIGA_MAC_VER_12:
3952 case RTL_GIGA_MAC_VER_17:
3953 case RTL_GIGA_MAC_VER_18:
3954 case RTL_GIGA_MAC_VER_19:
3955 case RTL_GIGA_MAC_VER_20:
3956 case RTL_GIGA_MAC_VER_21:
3957 case RTL_GIGA_MAC_VER_22:
3958 case RTL_GIGA_MAC_VER_23:
3959 case RTL_GIGA_MAC_VER_24:
3960 case RTL_GIGA_MAC_VER_25:
3961 case RTL_GIGA_MAC_VER_26:
3962 case RTL_GIGA_MAC_VER_27:
3963 case RTL_GIGA_MAC_VER_28:
3964 case RTL_GIGA_MAC_VER_31:
3965 rtl_writephy(tp, 0x0e, 0x0200);
3967 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3972 static void r8168_pll_power_down(struct rtl8169_private *tp)
3974 void __iomem *ioaddr = tp->mmio_addr;
3976 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3977 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3978 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3979 r8168dp_check_dash(tp)) {
3983 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
3984 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
3985 (RTL_R16(CPlusCmd) & ASF)) {
3989 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
3990 tp->mac_version == RTL_GIGA_MAC_VER_33)
3991 rtl_ephy_write(tp, 0x19, 0xff64);
3993 if (rtl_wol_pll_power_down(tp))
3996 r8168_phy_power_down(tp);
3998 switch (tp->mac_version) {
3999 case RTL_GIGA_MAC_VER_25:
4000 case RTL_GIGA_MAC_VER_26:
4001 case RTL_GIGA_MAC_VER_27:
4002 case RTL_GIGA_MAC_VER_28:
4003 case RTL_GIGA_MAC_VER_31:
4004 case RTL_GIGA_MAC_VER_32:
4005 case RTL_GIGA_MAC_VER_33:
4006 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4011 static void r8168_pll_power_up(struct rtl8169_private *tp)
4013 void __iomem *ioaddr = tp->mmio_addr;
4015 switch (tp->mac_version) {
4016 case RTL_GIGA_MAC_VER_25:
4017 case RTL_GIGA_MAC_VER_26:
4018 case RTL_GIGA_MAC_VER_27:
4019 case RTL_GIGA_MAC_VER_28:
4020 case RTL_GIGA_MAC_VER_31:
4021 case RTL_GIGA_MAC_VER_32:
4022 case RTL_GIGA_MAC_VER_33:
4023 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4027 r8168_phy_power_up(tp);
4030 static void rtl_generic_op(struct rtl8169_private *tp,
4031 void (*op)(struct rtl8169_private *))
4037 static void rtl_pll_power_down(struct rtl8169_private *tp)
4039 rtl_generic_op(tp, tp->pll_power_ops.down);
4042 static void rtl_pll_power_up(struct rtl8169_private *tp)
4044 rtl_generic_op(tp, tp->pll_power_ops.up);
4047 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4049 struct pll_power_ops *ops = &tp->pll_power_ops;
4051 switch (tp->mac_version) {
4052 case RTL_GIGA_MAC_VER_07:
4053 case RTL_GIGA_MAC_VER_08:
4054 case RTL_GIGA_MAC_VER_09:
4055 case RTL_GIGA_MAC_VER_10:
4056 case RTL_GIGA_MAC_VER_16:
4057 case RTL_GIGA_MAC_VER_29:
4058 case RTL_GIGA_MAC_VER_30:
4059 case RTL_GIGA_MAC_VER_37:
4060 case RTL_GIGA_MAC_VER_39:
4061 ops->down = r810x_pll_power_down;
4062 ops->up = r810x_pll_power_up;
4065 case RTL_GIGA_MAC_VER_11:
4066 case RTL_GIGA_MAC_VER_12:
4067 case RTL_GIGA_MAC_VER_17:
4068 case RTL_GIGA_MAC_VER_18:
4069 case RTL_GIGA_MAC_VER_19:
4070 case RTL_GIGA_MAC_VER_20:
4071 case RTL_GIGA_MAC_VER_21:
4072 case RTL_GIGA_MAC_VER_22:
4073 case RTL_GIGA_MAC_VER_23:
4074 case RTL_GIGA_MAC_VER_24:
4075 case RTL_GIGA_MAC_VER_25:
4076 case RTL_GIGA_MAC_VER_26:
4077 case RTL_GIGA_MAC_VER_27:
4078 case RTL_GIGA_MAC_VER_28:
4079 case RTL_GIGA_MAC_VER_31:
4080 case RTL_GIGA_MAC_VER_32:
4081 case RTL_GIGA_MAC_VER_33:
4082 case RTL_GIGA_MAC_VER_34:
4083 case RTL_GIGA_MAC_VER_35:
4084 case RTL_GIGA_MAC_VER_36:
4085 case RTL_GIGA_MAC_VER_38:
4086 case RTL_GIGA_MAC_VER_40:
4087 case RTL_GIGA_MAC_VER_41:
4088 ops->down = r8168_pll_power_down;
4089 ops->up = r8168_pll_power_up;
4099 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4101 void __iomem *ioaddr = tp->mmio_addr;
4103 switch (tp->mac_version) {
4104 case RTL_GIGA_MAC_VER_01:
4105 case RTL_GIGA_MAC_VER_02:
4106 case RTL_GIGA_MAC_VER_03:
4107 case RTL_GIGA_MAC_VER_04:
4108 case RTL_GIGA_MAC_VER_05:
4109 case RTL_GIGA_MAC_VER_06:
4110 case RTL_GIGA_MAC_VER_10:
4111 case RTL_GIGA_MAC_VER_11:
4112 case RTL_GIGA_MAC_VER_12:
4113 case RTL_GIGA_MAC_VER_13:
4114 case RTL_GIGA_MAC_VER_14:
4115 case RTL_GIGA_MAC_VER_15:
4116 case RTL_GIGA_MAC_VER_16:
4117 case RTL_GIGA_MAC_VER_17:
4118 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4120 case RTL_GIGA_MAC_VER_18:
4121 case RTL_GIGA_MAC_VER_19:
4122 case RTL_GIGA_MAC_VER_20:
4123 case RTL_GIGA_MAC_VER_21:
4124 case RTL_GIGA_MAC_VER_22:
4125 case RTL_GIGA_MAC_VER_23:
4126 case RTL_GIGA_MAC_VER_24:
4127 case RTL_GIGA_MAC_VER_34:
4128 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4131 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4136 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4138 tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
4141 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4143 void __iomem *ioaddr = tp->mmio_addr;
4145 RTL_W8(Cfg9346, Cfg9346_Unlock);
4146 rtl_generic_op(tp, tp->jumbo_ops.enable);
4147 RTL_W8(Cfg9346, Cfg9346_Lock);
4150 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4152 void __iomem *ioaddr = tp->mmio_addr;
4154 RTL_W8(Cfg9346, Cfg9346_Unlock);
4155 rtl_generic_op(tp, tp->jumbo_ops.disable);
4156 RTL_W8(Cfg9346, Cfg9346_Lock);
4159 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4161 void __iomem *ioaddr = tp->mmio_addr;
4163 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4164 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4165 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4168 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4170 void __iomem *ioaddr = tp->mmio_addr;
4172 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4173 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4174 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4177 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4179 void __iomem *ioaddr = tp->mmio_addr;
4181 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4184 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4186 void __iomem *ioaddr = tp->mmio_addr;
4188 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4191 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4193 void __iomem *ioaddr = tp->mmio_addr;
4195 RTL_W8(MaxTxPacketSize, 0x3f);
4196 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4197 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4198 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4201 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4203 void __iomem *ioaddr = tp->mmio_addr;
4205 RTL_W8(MaxTxPacketSize, 0x0c);
4206 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4207 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4208 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4211 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4213 rtl_tx_performance_tweak(tp->pci_dev,
4214 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4217 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4219 rtl_tx_performance_tweak(tp->pci_dev,
4220 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4223 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4225 void __iomem *ioaddr = tp->mmio_addr;
4227 r8168b_0_hw_jumbo_enable(tp);
4229 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4232 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4234 void __iomem *ioaddr = tp->mmio_addr;
4236 r8168b_0_hw_jumbo_disable(tp);
4238 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4241 static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4243 struct jumbo_ops *ops = &tp->jumbo_ops;
4245 switch (tp->mac_version) {
4246 case RTL_GIGA_MAC_VER_11:
4247 ops->disable = r8168b_0_hw_jumbo_disable;
4248 ops->enable = r8168b_0_hw_jumbo_enable;
4250 case RTL_GIGA_MAC_VER_12:
4251 case RTL_GIGA_MAC_VER_17:
4252 ops->disable = r8168b_1_hw_jumbo_disable;
4253 ops->enable = r8168b_1_hw_jumbo_enable;
4255 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4256 case RTL_GIGA_MAC_VER_19:
4257 case RTL_GIGA_MAC_VER_20:
4258 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4259 case RTL_GIGA_MAC_VER_22:
4260 case RTL_GIGA_MAC_VER_23:
4261 case RTL_GIGA_MAC_VER_24:
4262 case RTL_GIGA_MAC_VER_25:
4263 case RTL_GIGA_MAC_VER_26:
4264 ops->disable = r8168c_hw_jumbo_disable;
4265 ops->enable = r8168c_hw_jumbo_enable;
4267 case RTL_GIGA_MAC_VER_27:
4268 case RTL_GIGA_MAC_VER_28:
4269 ops->disable = r8168dp_hw_jumbo_disable;
4270 ops->enable = r8168dp_hw_jumbo_enable;
4272 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4273 case RTL_GIGA_MAC_VER_32:
4274 case RTL_GIGA_MAC_VER_33:
4275 case RTL_GIGA_MAC_VER_34:
4276 ops->disable = r8168e_hw_jumbo_disable;
4277 ops->enable = r8168e_hw_jumbo_enable;
4281 * No action needed for jumbo frames with 8169.
4282 * No jumbo for 810x at all.
4284 case RTL_GIGA_MAC_VER_40:
4285 case RTL_GIGA_MAC_VER_41:
4287 ops->disable = NULL;
4293 DECLARE_RTL_COND(rtl_chipcmd_cond)
4295 void __iomem *ioaddr = tp->mmio_addr;
4297 return RTL_R8(ChipCmd) & CmdReset;
4300 static void rtl_hw_reset(struct rtl8169_private *tp)
4302 void __iomem *ioaddr = tp->mmio_addr;
4304 RTL_W8(ChipCmd, CmdReset);
4306 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4309 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4311 struct rtl_fw *rtl_fw;
4315 name = rtl_lookup_firmware_name(tp);
4317 goto out_no_firmware;
4319 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4323 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4327 rc = rtl_check_firmware(tp, rtl_fw);
4329 goto err_release_firmware;
4331 tp->rtl_fw = rtl_fw;
4335 err_release_firmware:
4336 release_firmware(rtl_fw->fw);
4340 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4347 static void rtl_request_firmware(struct rtl8169_private *tp)
4349 if (IS_ERR(tp->rtl_fw))
4350 rtl_request_uncached_firmware(tp);
4353 static void rtl_rx_close(struct rtl8169_private *tp)
4355 void __iomem *ioaddr = tp->mmio_addr;
4357 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4360 DECLARE_RTL_COND(rtl_npq_cond)
4362 void __iomem *ioaddr = tp->mmio_addr;
4364 return RTL_R8(TxPoll) & NPQ;
4367 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4369 void __iomem *ioaddr = tp->mmio_addr;
4371 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4374 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4376 void __iomem *ioaddr = tp->mmio_addr;
4378 /* Disable interrupts */
4379 rtl8169_irq_mask_and_ack(tp);
4383 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4384 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4385 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4386 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4387 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4388 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4389 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4390 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4391 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4392 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4393 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4394 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4395 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4397 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4404 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4406 void __iomem *ioaddr = tp->mmio_addr;
4408 /* Set DMA burst size and Interframe Gap Time */
4409 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4410 (InterFrameGap << TxInterFrameGapShift));
4413 static void rtl_hw_start(struct net_device *dev)
4415 struct rtl8169_private *tp = netdev_priv(dev);
4419 rtl_irq_enable_all(tp);
4422 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4423 void __iomem *ioaddr)
4426 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4427 * register to be written before TxDescAddrLow to work.
4428 * Switching from MMIO to I/O access fixes the issue as well.
4430 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4431 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4432 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4433 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4436 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4440 cmd = RTL_R16(CPlusCmd);
4441 RTL_W16(CPlusCmd, cmd);
4445 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4447 /* Low hurts. Let's disable the filtering. */
4448 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4451 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4453 static const struct rtl_cfg2_info {
4458 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4459 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4460 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4461 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4463 const struct rtl_cfg2_info *p = cfg2_info;
4467 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4468 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4469 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4470 RTL_W32(0x7c, p->val);
4476 static void rtl_set_rx_mode(struct net_device *dev)
4478 struct rtl8169_private *tp = netdev_priv(dev);
4479 void __iomem *ioaddr = tp->mmio_addr;
4480 u32 mc_filter[2]; /* Multicast hash filter */
4484 if (dev->flags & IFF_PROMISC) {
4485 /* Unconditionally log net taps. */
4486 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4488 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4490 mc_filter[1] = mc_filter[0] = 0xffffffff;
4491 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4492 (dev->flags & IFF_ALLMULTI)) {
4493 /* Too many to filter perfectly -- accept all multicasts. */
4494 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4495 mc_filter[1] = mc_filter[0] = 0xffffffff;
4497 struct netdev_hw_addr *ha;
4499 rx_mode = AcceptBroadcast | AcceptMyPhys;
4500 mc_filter[1] = mc_filter[0] = 0;
4501 netdev_for_each_mc_addr(ha, dev) {
4502 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4503 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4504 rx_mode |= AcceptMulticast;
4508 if (dev->features & NETIF_F_RXALL)
4509 rx_mode |= (AcceptErr | AcceptRunt);
4511 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4513 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4514 u32 data = mc_filter[0];
4516 mc_filter[0] = swab32(mc_filter[1]);
4517 mc_filter[1] = swab32(data);
4520 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4521 mc_filter[1] = mc_filter[0] = 0xffffffff;
4523 RTL_W32(MAR0 + 4, mc_filter[1]);
4524 RTL_W32(MAR0 + 0, mc_filter[0]);
4526 RTL_W32(RxConfig, tmp);
4529 static void rtl_hw_start_8169(struct net_device *dev)
4531 struct rtl8169_private *tp = netdev_priv(dev);
4532 void __iomem *ioaddr = tp->mmio_addr;
4533 struct pci_dev *pdev = tp->pci_dev;
4535 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4536 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4537 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4540 RTL_W8(Cfg9346, Cfg9346_Unlock);
4541 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4542 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4543 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4544 tp->mac_version == RTL_GIGA_MAC_VER_04)
4545 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4549 RTL_W8(EarlyTxThres, NoEarlyTx);
4551 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4553 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4554 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4555 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4556 tp->mac_version == RTL_GIGA_MAC_VER_04)
4557 rtl_set_rx_tx_config_registers(tp);
4559 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4561 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4562 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4563 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4564 "Bit-3 and bit-14 MUST be 1\n");
4565 tp->cp_cmd |= (1 << 14);
4568 RTL_W16(CPlusCmd, tp->cp_cmd);
4570 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4573 * Undocumented corner. Supposedly:
4574 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4576 RTL_W16(IntrMitigate, 0x0000);
4578 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4580 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4581 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4582 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4583 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4584 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4585 rtl_set_rx_tx_config_registers(tp);
4588 RTL_W8(Cfg9346, Cfg9346_Lock);
4590 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4593 RTL_W32(RxMissed, 0);
4595 rtl_set_rx_mode(dev);
4597 /* no early-rx interrupts */
4598 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4601 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4603 if (tp->csi_ops.write)
4604 tp->csi_ops.write(tp, addr, value);
4607 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4609 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4612 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4616 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4617 rtl_csi_write(tp, 0x070c, csi | bits);
4620 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4622 rtl_csi_access_enable(tp, 0x17000000);
4625 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4627 rtl_csi_access_enable(tp, 0x27000000);
4630 DECLARE_RTL_COND(rtl_csiar_cond)
4632 void __iomem *ioaddr = tp->mmio_addr;
4634 return RTL_R32(CSIAR) & CSIAR_FLAG;
4637 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4639 void __iomem *ioaddr = tp->mmio_addr;
4641 RTL_W32(CSIDR, value);
4642 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4643 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4645 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4648 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4650 void __iomem *ioaddr = tp->mmio_addr;
4652 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4653 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4655 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4656 RTL_R32(CSIDR) : ~0;
4659 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4661 void __iomem *ioaddr = tp->mmio_addr;
4663 RTL_W32(CSIDR, value);
4664 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4665 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4668 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4671 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4673 void __iomem *ioaddr = tp->mmio_addr;
4675 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4676 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4678 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4679 RTL_R32(CSIDR) : ~0;
4682 static void rtl_init_csi_ops(struct rtl8169_private *tp)
4684 struct csi_ops *ops = &tp->csi_ops;
4686 switch (tp->mac_version) {
4687 case RTL_GIGA_MAC_VER_01:
4688 case RTL_GIGA_MAC_VER_02:
4689 case RTL_GIGA_MAC_VER_03:
4690 case RTL_GIGA_MAC_VER_04:
4691 case RTL_GIGA_MAC_VER_05:
4692 case RTL_GIGA_MAC_VER_06:
4693 case RTL_GIGA_MAC_VER_10:
4694 case RTL_GIGA_MAC_VER_11:
4695 case RTL_GIGA_MAC_VER_12:
4696 case RTL_GIGA_MAC_VER_13:
4697 case RTL_GIGA_MAC_VER_14:
4698 case RTL_GIGA_MAC_VER_15:
4699 case RTL_GIGA_MAC_VER_16:
4700 case RTL_GIGA_MAC_VER_17:
4705 case RTL_GIGA_MAC_VER_37:
4706 case RTL_GIGA_MAC_VER_38:
4707 ops->write = r8402_csi_write;
4708 ops->read = r8402_csi_read;
4712 ops->write = r8169_csi_write;
4713 ops->read = r8169_csi_read;
4719 unsigned int offset;
4724 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4730 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4731 rtl_ephy_write(tp, e->offset, w);
4736 static void rtl_disable_clock_request(struct pci_dev *pdev)
4738 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4739 PCI_EXP_LNKCTL_CLKREQ_EN);
4742 static void rtl_enable_clock_request(struct pci_dev *pdev)
4744 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4745 PCI_EXP_LNKCTL_CLKREQ_EN);
4748 #define R8168_CPCMD_QUIRK_MASK (\
4759 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4761 void __iomem *ioaddr = tp->mmio_addr;
4762 struct pci_dev *pdev = tp->pci_dev;
4764 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4766 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4768 if (tp->dev->mtu <= ETH_DATA_LEN) {
4769 rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
4770 PCI_EXP_DEVCTL_NOSNOOP_EN);
4774 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4776 void __iomem *ioaddr = tp->mmio_addr;
4778 rtl_hw_start_8168bb(tp);
4780 RTL_W8(MaxTxPacketSize, TxPacketMax);
4782 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4785 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4787 void __iomem *ioaddr = tp->mmio_addr;
4788 struct pci_dev *pdev = tp->pci_dev;
4790 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4792 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4794 if (tp->dev->mtu <= ETH_DATA_LEN)
4795 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4797 rtl_disable_clock_request(pdev);
4799 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4802 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4804 static const struct ephy_info e_info_8168cp[] = {
4805 { 0x01, 0, 0x0001 },
4806 { 0x02, 0x0800, 0x1000 },
4807 { 0x03, 0, 0x0042 },
4808 { 0x06, 0x0080, 0x0000 },
4812 rtl_csi_access_enable_2(tp);
4814 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4816 __rtl_hw_start_8168cp(tp);
4819 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4821 void __iomem *ioaddr = tp->mmio_addr;
4822 struct pci_dev *pdev = tp->pci_dev;
4824 rtl_csi_access_enable_2(tp);
4826 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4828 if (tp->dev->mtu <= ETH_DATA_LEN)
4829 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4831 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4834 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4836 void __iomem *ioaddr = tp->mmio_addr;
4837 struct pci_dev *pdev = tp->pci_dev;
4839 rtl_csi_access_enable_2(tp);
4841 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4844 RTL_W8(DBG_REG, 0x20);
4846 RTL_W8(MaxTxPacketSize, TxPacketMax);
4848 if (tp->dev->mtu <= ETH_DATA_LEN)
4849 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4851 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4854 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4856 void __iomem *ioaddr = tp->mmio_addr;
4857 static const struct ephy_info e_info_8168c_1[] = {
4858 { 0x02, 0x0800, 0x1000 },
4859 { 0x03, 0, 0x0002 },
4860 { 0x06, 0x0080, 0x0000 }
4863 rtl_csi_access_enable_2(tp);
4865 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4867 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4869 __rtl_hw_start_8168cp(tp);
4872 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4874 static const struct ephy_info e_info_8168c_2[] = {
4875 { 0x01, 0, 0x0001 },
4876 { 0x03, 0x0400, 0x0220 }
4879 rtl_csi_access_enable_2(tp);
4881 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4883 __rtl_hw_start_8168cp(tp);
4886 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4888 rtl_hw_start_8168c_2(tp);
4891 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4893 rtl_csi_access_enable_2(tp);
4895 __rtl_hw_start_8168cp(tp);
4898 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4900 void __iomem *ioaddr = tp->mmio_addr;
4901 struct pci_dev *pdev = tp->pci_dev;
4903 rtl_csi_access_enable_2(tp);
4905 rtl_disable_clock_request(pdev);
4907 RTL_W8(MaxTxPacketSize, TxPacketMax);
4909 if (tp->dev->mtu <= ETH_DATA_LEN)
4910 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4912 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4915 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4917 void __iomem *ioaddr = tp->mmio_addr;
4918 struct pci_dev *pdev = tp->pci_dev;
4920 rtl_csi_access_enable_1(tp);
4922 if (tp->dev->mtu <= ETH_DATA_LEN)
4923 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4925 RTL_W8(MaxTxPacketSize, TxPacketMax);
4927 rtl_disable_clock_request(pdev);
4930 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4932 void __iomem *ioaddr = tp->mmio_addr;
4933 struct pci_dev *pdev = tp->pci_dev;
4934 static const struct ephy_info e_info_8168d_4[] = {
4936 { 0x19, 0x20, 0x50 },
4941 rtl_csi_access_enable_1(tp);
4943 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4945 RTL_W8(MaxTxPacketSize, TxPacketMax);
4947 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4948 const struct ephy_info *e = e_info_8168d_4 + i;
4951 w = rtl_ephy_read(tp, e->offset);
4952 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4955 rtl_enable_clock_request(pdev);
4958 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4960 void __iomem *ioaddr = tp->mmio_addr;
4961 struct pci_dev *pdev = tp->pci_dev;
4962 static const struct ephy_info e_info_8168e_1[] = {
4963 { 0x00, 0x0200, 0x0100 },
4964 { 0x00, 0x0000, 0x0004 },
4965 { 0x06, 0x0002, 0x0001 },
4966 { 0x06, 0x0000, 0x0030 },
4967 { 0x07, 0x0000, 0x2000 },
4968 { 0x00, 0x0000, 0x0020 },
4969 { 0x03, 0x5800, 0x2000 },
4970 { 0x03, 0x0000, 0x0001 },
4971 { 0x01, 0x0800, 0x1000 },
4972 { 0x07, 0x0000, 0x4000 },
4973 { 0x1e, 0x0000, 0x2000 },
4974 { 0x19, 0xffff, 0xfe6c },
4975 { 0x0a, 0x0000, 0x0040 }
4978 rtl_csi_access_enable_2(tp);
4980 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4982 if (tp->dev->mtu <= ETH_DATA_LEN)
4983 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4985 RTL_W8(MaxTxPacketSize, TxPacketMax);
4987 rtl_disable_clock_request(pdev);
4989 /* Reset tx FIFO pointer */
4990 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
4991 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
4993 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4996 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4998 void __iomem *ioaddr = tp->mmio_addr;
4999 struct pci_dev *pdev = tp->pci_dev;
5000 static const struct ephy_info e_info_8168e_2[] = {
5001 { 0x09, 0x0000, 0x0080 },
5002 { 0x19, 0x0000, 0x0224 }
5005 rtl_csi_access_enable_1(tp);
5007 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5009 if (tp->dev->mtu <= ETH_DATA_LEN)
5010 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5012 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5013 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5014 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5015 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5016 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5017 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5018 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5019 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5021 RTL_W8(MaxTxPacketSize, EarlySize);
5023 rtl_disable_clock_request(pdev);
5025 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5026 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5028 /* Adjust EEE LED frequency */
5029 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5031 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5032 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5033 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5036 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5038 void __iomem *ioaddr = tp->mmio_addr;
5039 struct pci_dev *pdev = tp->pci_dev;
5041 rtl_csi_access_enable_2(tp);
5043 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5045 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5046 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5047 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5048 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5049 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5050 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5051 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5052 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5053 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5054 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5056 RTL_W8(MaxTxPacketSize, EarlySize);
5058 rtl_disable_clock_request(pdev);
5060 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5061 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5062 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5063 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5064 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5067 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5069 void __iomem *ioaddr = tp->mmio_addr;
5070 static const struct ephy_info e_info_8168f_1[] = {
5071 { 0x06, 0x00c0, 0x0020 },
5072 { 0x08, 0x0001, 0x0002 },
5073 { 0x09, 0x0000, 0x0080 },
5074 { 0x19, 0x0000, 0x0224 }
5077 rtl_hw_start_8168f(tp);
5079 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5081 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5083 /* Adjust EEE LED frequency */
5084 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5087 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5089 static const struct ephy_info e_info_8168f_1[] = {
5090 { 0x06, 0x00c0, 0x0020 },
5091 { 0x0f, 0xffff, 0x5200 },
5092 { 0x1e, 0x0000, 0x4000 },
5093 { 0x19, 0x0000, 0x0224 }
5096 rtl_hw_start_8168f(tp);
5098 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5100 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5103 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5105 void __iomem *ioaddr = tp->mmio_addr;
5106 struct pci_dev *pdev = tp->pci_dev;
5108 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5109 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5110 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5111 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5113 rtl_csi_access_enable_1(tp);
5115 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5117 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5118 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5120 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5121 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5122 RTL_W8(MaxTxPacketSize, EarlySize);
5124 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5125 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5127 /* Adjust EEE LED frequency */
5128 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5130 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5133 static void rtl_hw_start_8168(struct net_device *dev)
5135 struct rtl8169_private *tp = netdev_priv(dev);
5136 void __iomem *ioaddr = tp->mmio_addr;
5138 RTL_W8(Cfg9346, Cfg9346_Unlock);
5140 RTL_W8(MaxTxPacketSize, TxPacketMax);
5142 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5144 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5146 RTL_W16(CPlusCmd, tp->cp_cmd);
5148 RTL_W16(IntrMitigate, 0x5151);
5150 /* Work around for RxFIFO overflow. */
5151 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5152 tp->event_slow |= RxFIFOOver | PCSTimeout;
5153 tp->event_slow &= ~RxOverflow;
5156 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5158 rtl_set_rx_mode(dev);
5160 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5161 (InterFrameGap << TxInterFrameGapShift));
5165 switch (tp->mac_version) {
5166 case RTL_GIGA_MAC_VER_11:
5167 rtl_hw_start_8168bb(tp);
5170 case RTL_GIGA_MAC_VER_12:
5171 case RTL_GIGA_MAC_VER_17:
5172 rtl_hw_start_8168bef(tp);
5175 case RTL_GIGA_MAC_VER_18:
5176 rtl_hw_start_8168cp_1(tp);
5179 case RTL_GIGA_MAC_VER_19:
5180 rtl_hw_start_8168c_1(tp);
5183 case RTL_GIGA_MAC_VER_20:
5184 rtl_hw_start_8168c_2(tp);
5187 case RTL_GIGA_MAC_VER_21:
5188 rtl_hw_start_8168c_3(tp);
5191 case RTL_GIGA_MAC_VER_22:
5192 rtl_hw_start_8168c_4(tp);
5195 case RTL_GIGA_MAC_VER_23:
5196 rtl_hw_start_8168cp_2(tp);
5199 case RTL_GIGA_MAC_VER_24:
5200 rtl_hw_start_8168cp_3(tp);
5203 case RTL_GIGA_MAC_VER_25:
5204 case RTL_GIGA_MAC_VER_26:
5205 case RTL_GIGA_MAC_VER_27:
5206 rtl_hw_start_8168d(tp);
5209 case RTL_GIGA_MAC_VER_28:
5210 rtl_hw_start_8168d_4(tp);
5213 case RTL_GIGA_MAC_VER_31:
5214 rtl_hw_start_8168dp(tp);
5217 case RTL_GIGA_MAC_VER_32:
5218 case RTL_GIGA_MAC_VER_33:
5219 rtl_hw_start_8168e_1(tp);
5221 case RTL_GIGA_MAC_VER_34:
5222 rtl_hw_start_8168e_2(tp);
5225 case RTL_GIGA_MAC_VER_35:
5226 case RTL_GIGA_MAC_VER_36:
5227 rtl_hw_start_8168f_1(tp);
5230 case RTL_GIGA_MAC_VER_38:
5231 rtl_hw_start_8411(tp);
5234 case RTL_GIGA_MAC_VER_40:
5235 case RTL_GIGA_MAC_VER_41:
5236 rtl_hw_start_8168g_1(tp);
5240 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5241 dev->name, tp->mac_version);
5245 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5247 RTL_W8(Cfg9346, Cfg9346_Lock);
5249 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5252 #define R810X_CPCMD_QUIRK_MASK (\
5263 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5265 void __iomem *ioaddr = tp->mmio_addr;
5266 struct pci_dev *pdev = tp->pci_dev;
5267 static const struct ephy_info e_info_8102e_1[] = {
5268 { 0x01, 0, 0x6e65 },
5269 { 0x02, 0, 0x091f },
5270 { 0x03, 0, 0xc2f9 },
5271 { 0x06, 0, 0xafb5 },
5272 { 0x07, 0, 0x0e00 },
5273 { 0x19, 0, 0xec80 },
5274 { 0x01, 0, 0x2e65 },
5279 rtl_csi_access_enable_2(tp);
5281 RTL_W8(DBG_REG, FIX_NAK_1);
5283 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5286 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5287 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5289 cfg1 = RTL_R8(Config1);
5290 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5291 RTL_W8(Config1, cfg1 & ~LEDS0);
5293 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5296 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5298 void __iomem *ioaddr = tp->mmio_addr;
5299 struct pci_dev *pdev = tp->pci_dev;
5301 rtl_csi_access_enable_2(tp);
5303 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5305 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5306 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5309 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5311 rtl_hw_start_8102e_2(tp);
5313 rtl_ephy_write(tp, 0x03, 0xc2f9);
5316 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5318 void __iomem *ioaddr = tp->mmio_addr;
5319 static const struct ephy_info e_info_8105e_1[] = {
5320 { 0x07, 0, 0x4000 },
5321 { 0x19, 0, 0x0200 },
5322 { 0x19, 0, 0x0020 },
5323 { 0x1e, 0, 0x2000 },
5324 { 0x03, 0, 0x0001 },
5325 { 0x19, 0, 0x0100 },
5326 { 0x19, 0, 0x0004 },
5330 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5331 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5333 /* Disable Early Tally Counter */
5334 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5336 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5337 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5339 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5342 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5344 rtl_hw_start_8105e_1(tp);
5345 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5348 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5350 void __iomem *ioaddr = tp->mmio_addr;
5351 static const struct ephy_info e_info_8402[] = {
5352 { 0x19, 0xffff, 0xff64 },
5356 rtl_csi_access_enable_2(tp);
5358 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5359 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5361 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5362 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5364 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5366 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5368 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5369 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5370 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5371 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5372 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5373 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5374 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5377 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5379 void __iomem *ioaddr = tp->mmio_addr;
5381 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5382 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5384 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5385 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5386 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5389 static void rtl_hw_start_8101(struct net_device *dev)
5391 struct rtl8169_private *tp = netdev_priv(dev);
5392 void __iomem *ioaddr = tp->mmio_addr;
5393 struct pci_dev *pdev = tp->pci_dev;
5395 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5396 tp->event_slow &= ~RxFIFOOver;
5398 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5399 tp->mac_version == RTL_GIGA_MAC_VER_16)
5400 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5401 PCI_EXP_DEVCTL_NOSNOOP_EN);
5403 RTL_W8(Cfg9346, Cfg9346_Unlock);
5405 switch (tp->mac_version) {
5406 case RTL_GIGA_MAC_VER_07:
5407 rtl_hw_start_8102e_1(tp);
5410 case RTL_GIGA_MAC_VER_08:
5411 rtl_hw_start_8102e_3(tp);
5414 case RTL_GIGA_MAC_VER_09:
5415 rtl_hw_start_8102e_2(tp);
5418 case RTL_GIGA_MAC_VER_29:
5419 rtl_hw_start_8105e_1(tp);
5421 case RTL_GIGA_MAC_VER_30:
5422 rtl_hw_start_8105e_2(tp);
5425 case RTL_GIGA_MAC_VER_37:
5426 rtl_hw_start_8402(tp);
5429 case RTL_GIGA_MAC_VER_39:
5430 rtl_hw_start_8106(tp);
5434 RTL_W8(Cfg9346, Cfg9346_Lock);
5436 RTL_W8(MaxTxPacketSize, TxPacketMax);
5438 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5440 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5441 RTL_W16(CPlusCmd, tp->cp_cmd);
5443 RTL_W16(IntrMitigate, 0x0000);
5445 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5447 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5448 rtl_set_rx_tx_config_registers(tp);
5452 rtl_set_rx_mode(dev);
5454 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5457 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5459 struct rtl8169_private *tp = netdev_priv(dev);
5461 if (new_mtu < ETH_ZLEN ||
5462 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5465 if (new_mtu > ETH_DATA_LEN)
5466 rtl_hw_jumbo_enable(tp);
5468 rtl_hw_jumbo_disable(tp);
5471 netdev_update_features(dev);
5476 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5478 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5479 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5482 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5483 void **data_buff, struct RxDesc *desc)
5485 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5490 rtl8169_make_unusable_by_asic(desc);
5493 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5495 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5497 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5500 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5503 desc->addr = cpu_to_le64(mapping);
5505 rtl8169_mark_to_asic(desc, rx_buf_sz);
5508 static inline void *rtl8169_align(void *data)
5510 return (void *)ALIGN((long)data, 16);
5513 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5514 struct RxDesc *desc)
5518 struct device *d = &tp->pci_dev->dev;
5519 struct net_device *dev = tp->dev;
5520 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5522 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5526 if (rtl8169_align(data) != data) {
5528 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5533 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5535 if (unlikely(dma_mapping_error(d, mapping))) {
5536 if (net_ratelimit())
5537 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5541 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5549 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5553 for (i = 0; i < NUM_RX_DESC; i++) {
5554 if (tp->Rx_databuff[i]) {
5555 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5556 tp->RxDescArray + i);
5561 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5563 desc->opts1 |= cpu_to_le32(RingEnd);
5566 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5570 for (i = 0; i < NUM_RX_DESC; i++) {
5573 if (tp->Rx_databuff[i])
5576 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5578 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5581 tp->Rx_databuff[i] = data;
5584 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5588 rtl8169_rx_clear(tp);
5592 static int rtl8169_init_ring(struct net_device *dev)
5594 struct rtl8169_private *tp = netdev_priv(dev);
5596 rtl8169_init_ring_indexes(tp);
5598 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5599 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5601 return rtl8169_rx_fill(tp);
5604 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5605 struct TxDesc *desc)
5607 unsigned int len = tx_skb->len;
5609 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5617 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5622 for (i = 0; i < n; i++) {
5623 unsigned int entry = (start + i) % NUM_TX_DESC;
5624 struct ring_info *tx_skb = tp->tx_skb + entry;
5625 unsigned int len = tx_skb->len;
5628 struct sk_buff *skb = tx_skb->skb;
5630 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5631 tp->TxDescArray + entry);
5633 tp->dev->stats.tx_dropped++;
5641 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5643 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5644 tp->cur_tx = tp->dirty_tx = 0;
5647 static void rtl_reset_work(struct rtl8169_private *tp)
5649 struct net_device *dev = tp->dev;
5652 napi_disable(&tp->napi);
5653 netif_stop_queue(dev);
5654 synchronize_sched();
5656 rtl8169_hw_reset(tp);
5658 for (i = 0; i < NUM_RX_DESC; i++)
5659 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5661 rtl8169_tx_clear(tp);
5662 rtl8169_init_ring_indexes(tp);
5664 napi_enable(&tp->napi);
5666 netif_wake_queue(dev);
5667 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5670 static void rtl8169_tx_timeout(struct net_device *dev)
5672 struct rtl8169_private *tp = netdev_priv(dev);
5674 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5677 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5680 struct skb_shared_info *info = skb_shinfo(skb);
5681 unsigned int cur_frag, entry;
5682 struct TxDesc * uninitialized_var(txd);
5683 struct device *d = &tp->pci_dev->dev;
5686 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5687 const skb_frag_t *frag = info->frags + cur_frag;
5692 entry = (entry + 1) % NUM_TX_DESC;
5694 txd = tp->TxDescArray + entry;
5695 len = skb_frag_size(frag);
5696 addr = skb_frag_address(frag);
5697 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5698 if (unlikely(dma_mapping_error(d, mapping))) {
5699 if (net_ratelimit())
5700 netif_err(tp, drv, tp->dev,
5701 "Failed to map TX fragments DMA!\n");
5705 /* Anti gcc 2.95.3 bugware (sic) */
5706 status = opts[0] | len |
5707 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5709 txd->opts1 = cpu_to_le32(status);
5710 txd->opts2 = cpu_to_le32(opts[1]);
5711 txd->addr = cpu_to_le64(mapping);
5713 tp->tx_skb[entry].len = len;
5717 tp->tx_skb[entry].skb = skb;
5718 txd->opts1 |= cpu_to_le32(LastFrag);
5724 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5728 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5729 struct sk_buff *skb, u32 *opts)
5731 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5732 u32 mss = skb_shinfo(skb)->gso_size;
5733 int offset = info->opts_offset;
5737 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5738 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5739 const struct iphdr *ip = ip_hdr(skb);
5741 if (ip->protocol == IPPROTO_TCP)
5742 opts[offset] |= info->checksum.tcp;
5743 else if (ip->protocol == IPPROTO_UDP)
5744 opts[offset] |= info->checksum.udp;
5750 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5751 struct net_device *dev)
5753 struct rtl8169_private *tp = netdev_priv(dev);
5754 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5755 struct TxDesc *txd = tp->TxDescArray + entry;
5756 void __iomem *ioaddr = tp->mmio_addr;
5757 struct device *d = &tp->pci_dev->dev;
5763 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5764 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5768 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5771 len = skb_headlen(skb);
5772 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5773 if (unlikely(dma_mapping_error(d, mapping))) {
5774 if (net_ratelimit())
5775 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5779 tp->tx_skb[entry].len = len;
5780 txd->addr = cpu_to_le64(mapping);
5782 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5785 rtl8169_tso_csum(tp, skb, opts);
5787 frags = rtl8169_xmit_frags(tp, skb, opts);
5791 opts[0] |= FirstFrag;
5793 opts[0] |= FirstFrag | LastFrag;
5794 tp->tx_skb[entry].skb = skb;
5797 txd->opts2 = cpu_to_le32(opts[1]);
5799 skb_tx_timestamp(skb);
5803 /* Anti gcc 2.95.3 bugware (sic) */
5804 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5805 txd->opts1 = cpu_to_le32(status);
5807 tp->cur_tx += frags + 1;
5811 RTL_W8(TxPoll, NPQ);
5815 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5816 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5817 * not miss a ring update when it notices a stopped queue.
5820 netif_stop_queue(dev);
5821 /* Sync with rtl_tx:
5822 * - publish queue status and cur_tx ring index (write barrier)
5823 * - refresh dirty_tx ring index (read barrier).
5824 * May the current thread have a pessimistic view of the ring
5825 * status and forget to wake up queue, a racing rtl_tx thread
5829 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5830 netif_wake_queue(dev);
5833 return NETDEV_TX_OK;
5836 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5839 dev->stats.tx_dropped++;
5840 return NETDEV_TX_OK;
5843 netif_stop_queue(dev);
5844 dev->stats.tx_dropped++;
5845 return NETDEV_TX_BUSY;
5848 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5850 struct rtl8169_private *tp = netdev_priv(dev);
5851 struct pci_dev *pdev = tp->pci_dev;
5852 u16 pci_status, pci_cmd;
5854 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5855 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5857 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5858 pci_cmd, pci_status);
5861 * The recovery sequence below admits a very elaborated explanation:
5862 * - it seems to work;
5863 * - I did not see what else could be done;
5864 * - it makes iop3xx happy.
5866 * Feel free to adjust to your needs.
5868 if (pdev->broken_parity_status)
5869 pci_cmd &= ~PCI_COMMAND_PARITY;
5871 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5873 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5875 pci_write_config_word(pdev, PCI_STATUS,
5876 pci_status & (PCI_STATUS_DETECTED_PARITY |
5877 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5878 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5880 /* The infamous DAC f*ckup only happens at boot time */
5881 if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
5882 void __iomem *ioaddr = tp->mmio_addr;
5884 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5885 tp->cp_cmd &= ~PCIDAC;
5886 RTL_W16(CPlusCmd, tp->cp_cmd);
5887 dev->features &= ~NETIF_F_HIGHDMA;
5890 rtl8169_hw_reset(tp);
5892 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5895 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5897 unsigned int dirty_tx, tx_left;
5899 dirty_tx = tp->dirty_tx;
5901 tx_left = tp->cur_tx - dirty_tx;
5903 while (tx_left > 0) {
5904 unsigned int entry = dirty_tx % NUM_TX_DESC;
5905 struct ring_info *tx_skb = tp->tx_skb + entry;
5909 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5910 if (status & DescOwn)
5913 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5914 tp->TxDescArray + entry);
5915 if (status & LastFrag) {
5916 u64_stats_update_begin(&tp->tx_stats.syncp);
5917 tp->tx_stats.packets++;
5918 tp->tx_stats.bytes += tx_skb->skb->len;
5919 u64_stats_update_end(&tp->tx_stats.syncp);
5920 dev_kfree_skb(tx_skb->skb);
5927 if (tp->dirty_tx != dirty_tx) {
5928 tp->dirty_tx = dirty_tx;
5929 /* Sync with rtl8169_start_xmit:
5930 * - publish dirty_tx ring index (write barrier)
5931 * - refresh cur_tx ring index and queue status (read barrier)
5932 * May the current thread miss the stopped queue condition,
5933 * a racing xmit thread can only have a right view of the
5937 if (netif_queue_stopped(dev) &&
5938 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5939 netif_wake_queue(dev);
5942 * 8168 hack: TxPoll requests are lost when the Tx packets are
5943 * too close. Let's kick an extra TxPoll request when a burst
5944 * of start_xmit activity is detected (if it is not detected,
5945 * it is slow enough). -- FR
5947 if (tp->cur_tx != dirty_tx) {
5948 void __iomem *ioaddr = tp->mmio_addr;
5950 RTL_W8(TxPoll, NPQ);
5955 static inline int rtl8169_fragmented_frame(u32 status)
5957 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5960 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5962 u32 status = opts1 & RxProtoMask;
5964 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5965 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5966 skb->ip_summed = CHECKSUM_UNNECESSARY;
5968 skb_checksum_none_assert(skb);
5971 static struct sk_buff *rtl8169_try_rx_copy(void *data,
5972 struct rtl8169_private *tp,
5976 struct sk_buff *skb;
5977 struct device *d = &tp->pci_dev->dev;
5979 data = rtl8169_align(data);
5980 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
5982 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
5984 memcpy(skb->data, data, pkt_size);
5985 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5990 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
5992 unsigned int cur_rx, rx_left;
5995 cur_rx = tp->cur_rx;
5997 for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
5998 unsigned int entry = cur_rx % NUM_RX_DESC;
5999 struct RxDesc *desc = tp->RxDescArray + entry;
6003 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
6005 if (status & DescOwn)
6007 if (unlikely(status & RxRES)) {
6008 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6010 dev->stats.rx_errors++;
6011 if (status & (RxRWT | RxRUNT))
6012 dev->stats.rx_length_errors++;
6014 dev->stats.rx_crc_errors++;
6015 if (status & RxFOVF) {
6016 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6017 dev->stats.rx_fifo_errors++;
6019 if ((status & (RxRUNT | RxCRC)) &&
6020 !(status & (RxRWT | RxFOVF)) &&
6021 (dev->features & NETIF_F_RXALL))
6024 struct sk_buff *skb;
6029 addr = le64_to_cpu(desc->addr);
6030 if (likely(!(dev->features & NETIF_F_RXFCS)))
6031 pkt_size = (status & 0x00003fff) - 4;
6033 pkt_size = status & 0x00003fff;
6036 * The driver does not support incoming fragmented
6037 * frames. They are seen as a symptom of over-mtu
6040 if (unlikely(rtl8169_fragmented_frame(status))) {
6041 dev->stats.rx_dropped++;
6042 dev->stats.rx_length_errors++;
6043 goto release_descriptor;
6046 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6047 tp, pkt_size, addr);
6049 dev->stats.rx_dropped++;
6050 goto release_descriptor;
6053 rtl8169_rx_csum(skb, status);
6054 skb_put(skb, pkt_size);
6055 skb->protocol = eth_type_trans(skb, dev);
6057 rtl8169_rx_vlan_tag(desc, skb);
6059 napi_gro_receive(&tp->napi, skb);
6061 u64_stats_update_begin(&tp->rx_stats.syncp);
6062 tp->rx_stats.packets++;
6063 tp->rx_stats.bytes += pkt_size;
6064 u64_stats_update_end(&tp->rx_stats.syncp);
6069 rtl8169_mark_to_asic(desc, rx_buf_sz);
6072 count = cur_rx - tp->cur_rx;
6073 tp->cur_rx = cur_rx;
6078 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6080 struct net_device *dev = dev_instance;
6081 struct rtl8169_private *tp = netdev_priv(dev);
6085 status = rtl_get_events(tp);
6086 if (status && status != 0xffff) {
6087 status &= RTL_EVENT_NAPI | tp->event_slow;
6091 rtl_irq_disable(tp);
6092 napi_schedule(&tp->napi);
6095 return IRQ_RETVAL(handled);
6099 * Workqueue context.
6101 static void rtl_slow_event_work(struct rtl8169_private *tp)
6103 struct net_device *dev = tp->dev;
6106 status = rtl_get_events(tp) & tp->event_slow;
6107 rtl_ack_events(tp, status);
6109 if (unlikely(status & RxFIFOOver)) {
6110 switch (tp->mac_version) {
6111 /* Work around for rx fifo overflow */
6112 case RTL_GIGA_MAC_VER_11:
6113 netif_stop_queue(dev);
6114 /* XXX - Hack alert. See rtl_task(). */
6115 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6121 if (unlikely(status & SYSErr))
6122 rtl8169_pcierr_interrupt(dev);
6124 if (status & LinkChg)
6125 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6127 rtl_irq_enable_all(tp);
6130 static void rtl_task(struct work_struct *work)
6132 static const struct {
6134 void (*action)(struct rtl8169_private *);
6136 /* XXX - keep rtl_slow_event_work() as first element. */
6137 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6138 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6139 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6141 struct rtl8169_private *tp =
6142 container_of(work, struct rtl8169_private, wk.work);
6143 struct net_device *dev = tp->dev;
6148 if (!netif_running(dev) ||
6149 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6152 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6155 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6157 rtl_work[i].action(tp);
6161 rtl_unlock_work(tp);
6164 static int rtl8169_poll(struct napi_struct *napi, int budget)
6166 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6167 struct net_device *dev = tp->dev;
6168 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6172 status = rtl_get_events(tp);
6173 rtl_ack_events(tp, status & ~tp->event_slow);
6175 if (status & RTL_EVENT_NAPI_RX)
6176 work_done = rtl_rx(dev, tp, (u32) budget);
6178 if (status & RTL_EVENT_NAPI_TX)
6181 if (status & tp->event_slow) {
6182 enable_mask &= ~tp->event_slow;
6184 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6187 if (work_done < budget) {
6188 napi_complete(napi);
6190 rtl_irq_enable(tp, enable_mask);
6197 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6199 struct rtl8169_private *tp = netdev_priv(dev);
6201 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6204 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6205 RTL_W32(RxMissed, 0);
6208 static void rtl8169_down(struct net_device *dev)
6210 struct rtl8169_private *tp = netdev_priv(dev);
6211 void __iomem *ioaddr = tp->mmio_addr;
6213 del_timer_sync(&tp->timer);
6215 napi_disable(&tp->napi);
6216 netif_stop_queue(dev);
6218 rtl8169_hw_reset(tp);
6220 * At this point device interrupts can not be enabled in any function,
6221 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6222 * and napi is disabled (rtl8169_poll).
6224 rtl8169_rx_missed(dev, ioaddr);
6226 /* Give a racing hard_start_xmit a few cycles to complete. */
6227 synchronize_sched();
6229 rtl8169_tx_clear(tp);
6231 rtl8169_rx_clear(tp);
6233 rtl_pll_power_down(tp);
6236 static int rtl8169_close(struct net_device *dev)
6238 struct rtl8169_private *tp = netdev_priv(dev);
6239 struct pci_dev *pdev = tp->pci_dev;
6241 pm_runtime_get_sync(&pdev->dev);
6243 /* Update counters before going down */
6244 rtl8169_update_counters(dev);
6247 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6250 rtl_unlock_work(tp);
6252 free_irq(pdev->irq, dev);
6254 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6256 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6258 tp->TxDescArray = NULL;
6259 tp->RxDescArray = NULL;
6261 pm_runtime_put_sync(&pdev->dev);
6266 #ifdef CONFIG_NET_POLL_CONTROLLER
6267 static void rtl8169_netpoll(struct net_device *dev)
6269 struct rtl8169_private *tp = netdev_priv(dev);
6271 rtl8169_interrupt(tp->pci_dev->irq, dev);
6275 static int rtl_open(struct net_device *dev)
6277 struct rtl8169_private *tp = netdev_priv(dev);
6278 void __iomem *ioaddr = tp->mmio_addr;
6279 struct pci_dev *pdev = tp->pci_dev;
6280 int retval = -ENOMEM;
6282 pm_runtime_get_sync(&pdev->dev);
6285 * Rx and Tx descriptors needs 256 bytes alignment.
6286 * dma_alloc_coherent provides more.
6288 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6289 &tp->TxPhyAddr, GFP_KERNEL);
6290 if (!tp->TxDescArray)
6291 goto err_pm_runtime_put;
6293 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6294 &tp->RxPhyAddr, GFP_KERNEL);
6295 if (!tp->RxDescArray)
6298 retval = rtl8169_init_ring(dev);
6302 INIT_WORK(&tp->wk.work, rtl_task);
6306 rtl_request_firmware(tp);
6308 retval = request_irq(pdev->irq, rtl8169_interrupt,
6309 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6312 goto err_release_fw_2;
6316 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6318 napi_enable(&tp->napi);
6320 rtl8169_init_phy(dev, tp);
6322 __rtl8169_set_features(dev, dev->features);
6324 rtl_pll_power_up(tp);
6328 netif_start_queue(dev);
6330 rtl_unlock_work(tp);
6332 tp->saved_wolopts = 0;
6333 pm_runtime_put_noidle(&pdev->dev);
6335 rtl8169_check_link_status(dev, tp, ioaddr);
6340 rtl_release_firmware(tp);
6341 rtl8169_rx_clear(tp);
6343 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6345 tp->RxDescArray = NULL;
6347 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6349 tp->TxDescArray = NULL;
6351 pm_runtime_put_noidle(&pdev->dev);
6355 static struct rtnl_link_stats64 *
6356 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6358 struct rtl8169_private *tp = netdev_priv(dev);
6359 void __iomem *ioaddr = tp->mmio_addr;
6362 if (netif_running(dev))
6363 rtl8169_rx_missed(dev, ioaddr);
6366 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6367 stats->rx_packets = tp->rx_stats.packets;
6368 stats->rx_bytes = tp->rx_stats.bytes;
6369 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6373 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6374 stats->tx_packets = tp->tx_stats.packets;
6375 stats->tx_bytes = tp->tx_stats.bytes;
6376 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6378 stats->rx_dropped = dev->stats.rx_dropped;
6379 stats->tx_dropped = dev->stats.tx_dropped;
6380 stats->rx_length_errors = dev->stats.rx_length_errors;
6381 stats->rx_errors = dev->stats.rx_errors;
6382 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6383 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6384 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6389 static void rtl8169_net_suspend(struct net_device *dev)
6391 struct rtl8169_private *tp = netdev_priv(dev);
6393 if (!netif_running(dev))
6396 netif_device_detach(dev);
6397 netif_stop_queue(dev);
6400 napi_disable(&tp->napi);
6401 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6402 rtl_unlock_work(tp);
6404 rtl_pll_power_down(tp);
6409 static int rtl8169_suspend(struct device *device)
6411 struct pci_dev *pdev = to_pci_dev(device);
6412 struct net_device *dev = pci_get_drvdata(pdev);
6414 rtl8169_net_suspend(dev);
6419 static void __rtl8169_resume(struct net_device *dev)
6421 struct rtl8169_private *tp = netdev_priv(dev);
6423 netif_device_attach(dev);
6425 rtl_pll_power_up(tp);
6428 napi_enable(&tp->napi);
6429 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6430 rtl_unlock_work(tp);
6432 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6435 static int rtl8169_resume(struct device *device)
6437 struct pci_dev *pdev = to_pci_dev(device);
6438 struct net_device *dev = pci_get_drvdata(pdev);
6439 struct rtl8169_private *tp = netdev_priv(dev);
6441 rtl8169_init_phy(dev, tp);
6443 if (netif_running(dev))
6444 __rtl8169_resume(dev);
6449 static int rtl8169_runtime_suspend(struct device *device)
6451 struct pci_dev *pdev = to_pci_dev(device);
6452 struct net_device *dev = pci_get_drvdata(pdev);
6453 struct rtl8169_private *tp = netdev_priv(dev);
6455 if (!tp->TxDescArray)
6459 tp->saved_wolopts = __rtl8169_get_wol(tp);
6460 __rtl8169_set_wol(tp, WAKE_ANY);
6461 rtl_unlock_work(tp);
6463 rtl8169_net_suspend(dev);
6468 static int rtl8169_runtime_resume(struct device *device)
6470 struct pci_dev *pdev = to_pci_dev(device);
6471 struct net_device *dev = pci_get_drvdata(pdev);
6472 struct rtl8169_private *tp = netdev_priv(dev);
6474 if (!tp->TxDescArray)
6478 __rtl8169_set_wol(tp, tp->saved_wolopts);
6479 tp->saved_wolopts = 0;
6480 rtl_unlock_work(tp);
6482 rtl8169_init_phy(dev, tp);
6484 __rtl8169_resume(dev);
6489 static int rtl8169_runtime_idle(struct device *device)
6491 struct pci_dev *pdev = to_pci_dev(device);
6492 struct net_device *dev = pci_get_drvdata(pdev);
6493 struct rtl8169_private *tp = netdev_priv(dev);
6495 return tp->TxDescArray ? -EBUSY : 0;
6498 static const struct dev_pm_ops rtl8169_pm_ops = {
6499 .suspend = rtl8169_suspend,
6500 .resume = rtl8169_resume,
6501 .freeze = rtl8169_suspend,
6502 .thaw = rtl8169_resume,
6503 .poweroff = rtl8169_suspend,
6504 .restore = rtl8169_resume,
6505 .runtime_suspend = rtl8169_runtime_suspend,
6506 .runtime_resume = rtl8169_runtime_resume,
6507 .runtime_idle = rtl8169_runtime_idle,
6510 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6512 #else /* !CONFIG_PM */
6514 #define RTL8169_PM_OPS NULL
6516 #endif /* !CONFIG_PM */
6518 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6520 void __iomem *ioaddr = tp->mmio_addr;
6522 /* WoL fails with 8168b when the receiver is disabled. */
6523 switch (tp->mac_version) {
6524 case RTL_GIGA_MAC_VER_11:
6525 case RTL_GIGA_MAC_VER_12:
6526 case RTL_GIGA_MAC_VER_17:
6527 pci_clear_master(tp->pci_dev);
6529 RTL_W8(ChipCmd, CmdRxEnb);
6538 static void rtl_shutdown(struct pci_dev *pdev)
6540 struct net_device *dev = pci_get_drvdata(pdev);
6541 struct rtl8169_private *tp = netdev_priv(dev);
6542 struct device *d = &pdev->dev;
6544 pm_runtime_get_sync(d);
6546 rtl8169_net_suspend(dev);
6548 /* Restore original MAC address */
6549 rtl_rar_set(tp, dev->perm_addr);
6551 rtl8169_hw_reset(tp);
6553 if (system_state == SYSTEM_POWER_OFF) {
6554 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6555 rtl_wol_suspend_quirk(tp);
6556 rtl_wol_shutdown_quirk(tp);
6559 pci_wake_from_d3(pdev, true);
6560 pci_set_power_state(pdev, PCI_D3hot);
6563 pm_runtime_put_noidle(d);
6566 static void rtl_remove_one(struct pci_dev *pdev)
6568 struct net_device *dev = pci_get_drvdata(pdev);
6569 struct rtl8169_private *tp = netdev_priv(dev);
6571 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6572 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6573 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6574 rtl8168_driver_stop(tp);
6577 cancel_work_sync(&tp->wk.work);
6579 netif_napi_del(&tp->napi);
6581 unregister_netdev(dev);
6583 rtl_release_firmware(tp);
6585 if (pci_dev_run_wake(pdev))
6586 pm_runtime_get_noresume(&pdev->dev);
6588 /* restore original MAC address */
6589 rtl_rar_set(tp, dev->perm_addr);
6591 rtl_disable_msi(pdev, tp);
6592 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6593 pci_set_drvdata(pdev, NULL);
6596 static const struct net_device_ops rtl_netdev_ops = {
6597 .ndo_open = rtl_open,
6598 .ndo_stop = rtl8169_close,
6599 .ndo_get_stats64 = rtl8169_get_stats64,
6600 .ndo_start_xmit = rtl8169_start_xmit,
6601 .ndo_tx_timeout = rtl8169_tx_timeout,
6602 .ndo_validate_addr = eth_validate_addr,
6603 .ndo_change_mtu = rtl8169_change_mtu,
6604 .ndo_fix_features = rtl8169_fix_features,
6605 .ndo_set_features = rtl8169_set_features,
6606 .ndo_set_mac_address = rtl_set_mac_address,
6607 .ndo_do_ioctl = rtl8169_ioctl,
6608 .ndo_set_rx_mode = rtl_set_rx_mode,
6609 #ifdef CONFIG_NET_POLL_CONTROLLER
6610 .ndo_poll_controller = rtl8169_netpoll,
6615 static const struct rtl_cfg_info {
6616 void (*hw_start)(struct net_device *);
6617 unsigned int region;
6622 } rtl_cfg_infos [] = {
6624 .hw_start = rtl_hw_start_8169,
6627 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6628 .features = RTL_FEATURE_GMII,
6629 .default_ver = RTL_GIGA_MAC_VER_01,
6632 .hw_start = rtl_hw_start_8168,
6635 .event_slow = SYSErr | LinkChg | RxOverflow,
6636 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6637 .default_ver = RTL_GIGA_MAC_VER_11,
6640 .hw_start = rtl_hw_start_8101,
6643 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6645 .features = RTL_FEATURE_MSI,
6646 .default_ver = RTL_GIGA_MAC_VER_13,
6650 /* Cfg9346_Unlock assumed. */
6651 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6652 const struct rtl_cfg_info *cfg)
6654 void __iomem *ioaddr = tp->mmio_addr;
6658 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6659 if (cfg->features & RTL_FEATURE_MSI) {
6660 if (pci_enable_msi(tp->pci_dev)) {
6661 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6664 msi = RTL_FEATURE_MSI;
6667 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6668 RTL_W8(Config2, cfg2);
6672 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6674 void __iomem *ioaddr = tp->mmio_addr;
6676 return RTL_R8(MCU) & LINK_LIST_RDY;
6679 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6681 void __iomem *ioaddr = tp->mmio_addr;
6683 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6686 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6688 void __iomem *ioaddr = tp->mmio_addr;
6691 tp->ocp_base = OCP_STD_PHY_BASE;
6693 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6695 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6698 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6701 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6703 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6705 data = r8168_mac_ocp_read(tp, 0xe8de);
6707 r8168_mac_ocp_write(tp, 0xe8de, data);
6709 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6712 data = r8168_mac_ocp_read(tp, 0xe8de);
6714 r8168_mac_ocp_write(tp, 0xe8de, data);
6716 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6720 static void rtl_hw_initialize(struct rtl8169_private *tp)
6722 switch (tp->mac_version) {
6723 case RTL_GIGA_MAC_VER_40:
6724 case RTL_GIGA_MAC_VER_41:
6725 rtl_hw_init_8168g(tp);
6734 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6736 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6737 const unsigned int region = cfg->region;
6738 struct rtl8169_private *tp;
6739 struct mii_if_info *mii;
6740 struct net_device *dev;
6741 void __iomem *ioaddr;
6745 if (netif_msg_drv(&debug)) {
6746 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6747 MODULENAME, RTL8169_VERSION);
6750 dev = alloc_etherdev(sizeof (*tp));
6756 SET_NETDEV_DEV(dev, &pdev->dev);
6757 dev->netdev_ops = &rtl_netdev_ops;
6758 tp = netdev_priv(dev);
6761 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6765 mii->mdio_read = rtl_mdio_read;
6766 mii->mdio_write = rtl_mdio_write;
6767 mii->phy_id_mask = 0x1f;
6768 mii->reg_num_mask = 0x1f;
6769 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6771 /* disable ASPM completely as that cause random device stop working
6772 * problems as well as full system hangs for some PCIe devices users */
6773 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6774 PCIE_LINK_STATE_CLKPM);
6776 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6777 rc = pci_enable_device(pdev);
6779 netif_err(tp, probe, dev, "enable failure\n");
6780 goto err_out_free_dev_1;
6783 if (pci_set_mwi(pdev) < 0)
6784 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6786 /* make sure PCI base addr 1 is MMIO */
6787 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6788 netif_err(tp, probe, dev,
6789 "region #%d not an MMIO resource, aborting\n",
6795 /* check for weird/broken PCI region reporting */
6796 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6797 netif_err(tp, probe, dev,
6798 "Invalid PCI region size(s), aborting\n");
6803 rc = pci_request_regions(pdev, MODULENAME);
6805 netif_err(tp, probe, dev, "could not request regions\n");
6809 tp->cp_cmd = RxChkSum;
6811 if ((sizeof(dma_addr_t) > 4) &&
6812 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6813 tp->cp_cmd |= PCIDAC;
6814 dev->features |= NETIF_F_HIGHDMA;
6816 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6818 netif_err(tp, probe, dev, "DMA configuration failed\n");
6819 goto err_out_free_res_3;
6823 /* ioremap MMIO region */
6824 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6826 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6828 goto err_out_free_res_3;
6830 tp->mmio_addr = ioaddr;
6832 if (!pci_is_pcie(pdev))
6833 netif_info(tp, probe, dev, "not PCI Express\n");
6835 /* Identify chip attached to board */
6836 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6840 rtl_irq_disable(tp);
6842 rtl_hw_initialize(tp);
6846 rtl_ack_events(tp, 0xffff);
6848 pci_set_master(pdev);
6851 * Pretend we are using VLANs; This bypasses a nasty bug where
6852 * Interrupts stop flowing on high load on 8110SCd controllers.
6854 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6855 tp->cp_cmd |= RxVlan;
6857 rtl_init_mdio_ops(tp);
6858 rtl_init_pll_power_ops(tp);
6859 rtl_init_jumbo_ops(tp);
6860 rtl_init_csi_ops(tp);
6862 rtl8169_print_mac_version(tp);
6864 chipset = tp->mac_version;
6865 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6867 RTL_W8(Cfg9346, Cfg9346_Unlock);
6868 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6869 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6870 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6871 tp->features |= RTL_FEATURE_WOL;
6872 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6873 tp->features |= RTL_FEATURE_WOL;
6874 tp->features |= rtl_try_msi(tp, cfg);
6875 RTL_W8(Cfg9346, Cfg9346_Lock);
6877 if (rtl_tbi_enabled(tp)) {
6878 tp->set_speed = rtl8169_set_speed_tbi;
6879 tp->get_settings = rtl8169_gset_tbi;
6880 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6881 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6882 tp->link_ok = rtl8169_tbi_link_ok;
6883 tp->do_ioctl = rtl_tbi_ioctl;
6885 tp->set_speed = rtl8169_set_speed_xmii;
6886 tp->get_settings = rtl8169_gset_xmii;
6887 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6888 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6889 tp->link_ok = rtl8169_xmii_link_ok;
6890 tp->do_ioctl = rtl_xmii_ioctl;
6893 mutex_init(&tp->wk.mutex);
6895 /* Get MAC address */
6896 for (i = 0; i < ETH_ALEN; i++)
6897 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6899 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6900 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6902 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6904 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6905 * properly for all devices */
6906 dev->features |= NETIF_F_RXCSUM |
6907 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6909 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6910 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6911 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6914 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6915 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6916 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6918 dev->hw_features |= NETIF_F_RXALL;
6919 dev->hw_features |= NETIF_F_RXFCS;
6921 tp->hw_start = cfg->hw_start;
6922 tp->event_slow = cfg->event_slow;
6924 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6925 ~(RxBOVF | RxFOVF) : ~0;
6927 init_timer(&tp->timer);
6928 tp->timer.data = (unsigned long) dev;
6929 tp->timer.function = rtl8169_phy_timer;
6931 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6933 rc = register_netdev(dev);
6937 pci_set_drvdata(pdev, dev);
6939 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6940 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6941 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6942 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6943 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6944 "tx checksumming: %s]\n",
6945 rtl_chip_infos[chipset].jumbo_max,
6946 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6949 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6950 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6951 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6952 rtl8168_driver_start(tp);
6955 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6957 if (pci_dev_run_wake(pdev))
6958 pm_runtime_put_noidle(&pdev->dev);
6960 netif_carrier_off(dev);
6966 netif_napi_del(&tp->napi);
6967 rtl_disable_msi(pdev, tp);
6970 pci_release_regions(pdev);
6972 pci_clear_mwi(pdev);
6973 pci_disable_device(pdev);
6979 static struct pci_driver rtl8169_pci_driver = {
6981 .id_table = rtl8169_pci_tbl,
6982 .probe = rtl_init_one,
6983 .remove = rtl_remove_one,
6984 .shutdown = rtl_shutdown,
6985 .driver.pm = RTL8169_PM_OPS,
6988 module_pci_driver(rtl8169_pci_driver);