1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
28 [link no longer provides useful info -jgarzik]
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #define DRV_NAME "via-rhine"
35 #define DRV_VERSION "1.5.0"
36 #define DRV_RELDATE "2010-10-09"
38 #include <linux/types.h>
40 /* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
43 #define RHINE_MSG_DEFAULT \
46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47 Setting to > 1518 effectively disables this feature. */
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
53 static int rx_copybreak;
56 /* Work-around for broken BIOSes: they are unable to get the chip back out of
57 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
61 * In case you are looking for 'options[]' or 'full_duplex[]', they
62 * are gone. Use ethtool(8) instead.
65 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66 The Rhine has a 64 element 8390-like hash table. */
67 static const int multicast_filter_limit = 32;
70 /* Operational parameters that are set at compile time. */
72 /* Keep the ring sizes a power of two for compile efficiency.
73 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74 Making the Tx ring too large decreases the effectiveness of channel
75 bonding and packet priority.
76 There are no ill effects from too-large receive rings. */
77 #define TX_RING_SIZE 16
78 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
79 #define RX_RING_SIZE 64
81 /* Operational parameters that usually are not changed. */
83 /* Time in jiffies before concluding the transmitter is hung. */
84 #define TX_TIMEOUT (2*HZ)
86 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
88 #include <linux/module.h>
89 #include <linux/moduleparam.h>
90 #include <linux/kernel.h>
91 #include <linux/string.h>
92 #include <linux/timer.h>
93 #include <linux/errno.h>
94 #include <linux/ioport.h>
95 #include <linux/interrupt.h>
96 #include <linux/pci.h>
97 #include <linux/dma-mapping.h>
98 #include <linux/netdevice.h>
99 #include <linux/etherdevice.h>
100 #include <linux/skbuff.h>
101 #include <linux/init.h>
102 #include <linux/delay.h>
103 #include <linux/mii.h>
104 #include <linux/ethtool.h>
105 #include <linux/crc32.h>
106 #include <linux/if_vlan.h>
107 #include <linux/bitops.h>
108 #include <linux/workqueue.h>
109 #include <asm/processor.h> /* Processor type for cache alignment. */
112 #include <asm/uaccess.h>
113 #include <linux/dmi.h>
115 /* These identify the driver base version and may not be removed. */
116 static const char version[] =
117 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
119 /* This driver was written to use PCI memory space. Some early versions
120 of the Rhine may only work correctly with I/O space accesses. */
121 #ifdef CONFIG_VIA_RHINE_MMIO
126 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128 MODULE_LICENSE("GPL");
130 module_param(debug, int, 0);
131 module_param(rx_copybreak, int, 0);
132 module_param(avoid_D3, bool, 0);
133 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
134 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
135 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
143 I. Board Compatibility
145 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
148 II. Board-specific settings
150 Boards with this chip are functional only in a bus-master PCI slot.
152 Many operational settings are loaded from the EEPROM to the Config word at
153 offset 0x78. For most of these settings, this driver assumes that they are
155 If this driver is compiled to use PCI memory space operations the EEPROM
156 must be configured to enable memory ops.
158 III. Driver operation
162 This driver uses two statically allocated fixed-size descriptor lists
163 formed into rings by a branch from the final descriptor to the beginning of
164 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
166 IIIb/c. Transmit/Receive Structure
168 This driver attempts to use a zero-copy receive and transmit scheme.
170 Alas, all data buffers are required to start on a 32 bit boundary, so
171 the driver must often copy transmit packets into bounce buffers.
173 The driver allocates full frame size skbuffs for the Rx ring buffers at
174 open() time and passes the skb->data field to the chip as receive data
175 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176 a fresh skbuff is allocated and the frame is copied to the new skbuff.
177 When the incoming frame is larger, the skbuff is passed directly up the
178 protocol stack. Buffers consumed this way are replaced by newly allocated
179 skbuffs in the last phase of rhine_rx().
181 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182 using a full-sized skbuff for small frames vs. the copying costs of larger
183 frames. New boards are typically used in generously configured machines
184 and the underfilled buffers have negligible impact compared to the benefit of
185 a single allocation size, so the default value of zero results in never
186 copying packets. When copying is done, the cost is usually mitigated by using
187 a combined copy/checksum routine. Copying also preloads the cache, which is
188 most useful with small frames.
190 Since the VIA chips are only able to transfer data to buffers on 32 bit
191 boundaries, the IP header at offset 14 in an ethernet frame isn't
192 longword aligned for further processing. Copying these unaligned buffers
193 has the beneficial effect of 16-byte aligning the IP header.
195 IIId. Synchronization
197 The driver runs as two independent, single-threaded flows of control. One
198 is the send-packet routine, which enforces single-threaded use by the
199 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
200 which is single threaded by the hardware and interrupt handling software.
202 The send packet thread has partial control over the Tx ring. It locks the
203 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
204 the ring is not available it stops the transmit queue by
205 calling netif_stop_queue.
207 The interrupt handler has exclusive control over the Rx ring and records stats
208 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
209 empty by incrementing the dirty_tx mark. If at least half of the entries in
210 the Rx ring are available the transmit queue is woken up if it was stopped.
216 Preliminary VT86C100A manual from http://www.via.com.tw/
217 http://www.scyld.com/expert/100mbps.html
218 http://www.scyld.com/expert/NWay.html
219 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
220 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
225 The VT86C100A manual is not reliable information.
226 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
227 in significant performance degradation for bounce buffer copies on transmit
228 and unaligned IP headers on receive.
229 The chip does not pad to minimum transmit length.
234 /* This table drives the PCI probe routines. It's mostly boilerplate in all
235 of the drivers, and will likely be provided by some future kernel.
236 Note the matching code -- the first table entry matchs all 56** cards but
237 second only the 1234 card.
244 VT8231 = 0x50, /* Integrated MAC */
245 VT8233 = 0x60, /* Integrated MAC */
246 VT8235 = 0x74, /* Integrated MAC */
247 VT8237 = 0x78, /* Integrated MAC */
254 VT6105M = 0x90, /* Management adapter */
258 rqWOL = 0x0001, /* Wake-On-LAN support */
259 rqForceReset = 0x0002,
260 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
261 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
262 rqRhineI = 0x0100, /* See comment below */
265 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
266 * MMIO as well as for the collision counter and the Tx FIFO underflow
267 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
270 /* Beware of PCI posted writes */
271 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
273 static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
274 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
275 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
276 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
277 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
278 { } /* terminate list */
280 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
283 /* Offsets to the device registers. */
284 enum register_offsets {
285 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
286 ChipCmd1=0x09, TQWake=0x0A,
287 IntrStatus=0x0C, IntrEnable=0x0E,
288 MulticastFilter0=0x10, MulticastFilter1=0x14,
289 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
290 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
291 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
292 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
293 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
294 StickyHW=0x83, IntrStatus2=0x84,
295 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
296 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
297 WOLcrClr1=0xA6, WOLcgClr=0xA7,
298 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
301 /* Bits in ConfigD */
303 BackOptional=0x01, BackModify=0x02,
304 BackCaptureEffect=0x04, BackRandom=0x08
307 /* Bits in the TxConfig (TCR) register */
310 TCR_LB0=0x02, /* loopback[0] */
311 TCR_LB1=0x04, /* loopback[1] */
319 /* Bits in the CamCon (CAMC) register */
327 /* Bits in the PCIBusConfig1 (BCR1) register */
335 BCR1_TXQNOBK=0x40, /* for VT6105 */
336 BCR1_VIDFR=0x80, /* for VT6105 */
337 BCR1_MED0=0x40, /* for VT6102 */
338 BCR1_MED1=0x80, /* for VT6102 */
342 /* Registers we check that mmio and reg are the same. */
343 static const int mmio_verify_registers[] = {
344 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
349 /* Bits in the interrupt status/mask registers. */
350 enum intr_status_bits {
354 IntrTxError = 0x0008,
355 IntrRxEmpty = 0x0020,
357 IntrStatsMax = 0x0080,
358 IntrRxEarly = 0x0100,
359 IntrTxUnderrun = 0x0210,
360 IntrRxOverflow = 0x0400,
361 IntrRxDropped = 0x0800,
362 IntrRxNoBuf = 0x1000,
363 IntrTxAborted = 0x2000,
364 IntrLinkChange = 0x4000,
365 IntrRxWakeUp = 0x8000,
366 IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
367 IntrNormalSummary = IntrRxDone | IntrTxDone,
368 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
372 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
381 /* The Rx and Tx buffer descriptors. */
384 __le32 desc_length; /* Chain flag, Buffer/frame length */
390 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
395 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
396 #define TXDESC 0x00e08000
398 enum rx_status_bits {
399 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
402 /* Bits in *_desc.*_status */
403 enum desc_status_bits {
407 /* Bits in *_desc.*_length */
408 enum desc_length_bits {
412 /* Bits in ChipCmd. */
414 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
415 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
416 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
417 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
423 struct u64_stats_sync syncp;
426 struct rhine_private {
427 /* Bit mask for configured VLAN ids */
428 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
430 /* Descriptor rings */
431 struct rx_desc *rx_ring;
432 struct tx_desc *tx_ring;
433 dma_addr_t rx_ring_dma;
434 dma_addr_t tx_ring_dma;
436 /* The addresses of receive-in-place skbuffs. */
437 struct sk_buff *rx_skbuff[RX_RING_SIZE];
438 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
440 /* The saved address of a sent-in-place packet/buffer, for later free(). */
441 struct sk_buff *tx_skbuff[TX_RING_SIZE];
442 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
444 /* Tx bounce buffers (Rhine-I only) */
445 unsigned char *tx_buf[TX_RING_SIZE];
446 unsigned char *tx_bufs;
447 dma_addr_t tx_bufs_dma;
449 struct pci_dev *pdev;
451 struct net_device *dev;
452 struct napi_struct napi;
454 struct mutex task_lock;
456 struct work_struct slow_event_task;
457 struct work_struct reset_task;
461 /* Frequently used values: keep some adjacent for cache effect. */
463 struct rx_desc *rx_head_desc;
464 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
465 unsigned int cur_tx, dirty_tx;
466 unsigned int rx_buf_sz; /* Based on MTU+slack. */
467 struct rhine_stats rx_stats;
468 struct rhine_stats tx_stats;
471 u8 tx_thresh, rx_thresh;
473 struct mii_if_info mii_if;
477 #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
478 #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
479 #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
481 #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
482 #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
483 #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
485 #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
486 #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
487 #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
489 #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
490 #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
491 #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
494 static int mdio_read(struct net_device *dev, int phy_id, int location);
495 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
496 static int rhine_open(struct net_device *dev);
497 static void rhine_reset_task(struct work_struct *work);
498 static void rhine_slow_event_task(struct work_struct *work);
499 static void rhine_tx_timeout(struct net_device *dev);
500 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
501 struct net_device *dev);
502 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
503 static void rhine_tx(struct net_device *dev);
504 static int rhine_rx(struct net_device *dev, int limit);
505 static void rhine_set_rx_mode(struct net_device *dev);
506 static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
507 struct rtnl_link_stats64 *stats);
508 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
509 static const struct ethtool_ops netdev_ethtool_ops;
510 static int rhine_close(struct net_device *dev);
511 static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
512 static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
513 static void rhine_restart_tx(struct net_device *dev);
515 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
517 void __iomem *ioaddr = rp->base;
520 for (i = 0; i < 1024; i++) {
521 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
523 if (low ^ has_mask_bits)
528 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
529 "count: %04d\n", low ? "low" : "high", reg, mask, i);
533 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
535 rhine_wait_bit(rp, reg, mask, false);
538 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
540 rhine_wait_bit(rp, reg, mask, true);
543 static u32 rhine_get_events(struct rhine_private *rp)
545 void __iomem *ioaddr = rp->base;
548 intr_status = ioread16(ioaddr + IntrStatus);
549 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
550 if (rp->quirks & rqStatusWBRace)
551 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
555 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
557 void __iomem *ioaddr = rp->base;
559 if (rp->quirks & rqStatusWBRace)
560 iowrite8(mask >> 16, ioaddr + IntrStatus2);
561 iowrite16(mask, ioaddr + IntrStatus);
566 * Get power related registers into sane state.
567 * Notify user about past WOL event.
569 static void rhine_power_init(struct net_device *dev)
571 struct rhine_private *rp = netdev_priv(dev);
572 void __iomem *ioaddr = rp->base;
575 if (rp->quirks & rqWOL) {
576 /* Make sure chip is in power state D0 */
577 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
579 /* Disable "force PME-enable" */
580 iowrite8(0x80, ioaddr + WOLcgClr);
582 /* Clear power-event config bits (WOL) */
583 iowrite8(0xFF, ioaddr + WOLcrClr);
584 /* More recent cards can manage two additional patterns */
585 if (rp->quirks & rq6patterns)
586 iowrite8(0x03, ioaddr + WOLcrClr1);
588 /* Save power-event status bits */
589 wolstat = ioread8(ioaddr + PwrcsrSet);
590 if (rp->quirks & rq6patterns)
591 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
593 /* Clear power-event status bits */
594 iowrite8(0xFF, ioaddr + PwrcsrClr);
595 if (rp->quirks & rq6patterns)
596 iowrite8(0x03, ioaddr + PwrcsrClr1);
602 reason = "Magic packet";
605 reason = "Link went up";
608 reason = "Link went down";
611 reason = "Unicast packet";
614 reason = "Multicast/broadcast packet";
619 netdev_info(dev, "Woke system up. Reason: %s\n",
625 static void rhine_chip_reset(struct net_device *dev)
627 struct rhine_private *rp = netdev_priv(dev);
628 void __iomem *ioaddr = rp->base;
631 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
634 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
635 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
638 if (rp->quirks & rqForceReset)
639 iowrite8(0x40, ioaddr + MiscCmd);
641 /* Reset can take somewhat longer (rare) */
642 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
645 cmd1 = ioread8(ioaddr + ChipCmd1);
646 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
647 "failed" : "succeeded");
651 static void enable_mmio(long pioaddr, u32 quirks)
654 if (quirks & rqRhineI) {
655 /* More recent docs say that this bit is reserved ... */
656 n = inb(pioaddr + ConfigA) | 0x20;
657 outb(n, pioaddr + ConfigA);
659 n = inb(pioaddr + ConfigD) | 0x80;
660 outb(n, pioaddr + ConfigD);
666 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
667 * (plus 0x6C for Rhine-I/II)
669 static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
671 struct rhine_private *rp = netdev_priv(dev);
672 void __iomem *ioaddr = rp->base;
675 outb(0x20, pioaddr + MACRegEEcsr);
676 for (i = 0; i < 1024; i++) {
677 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
681 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
685 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
686 * MMIO. If reloading EEPROM was done first this could be avoided, but
687 * it is not known if that still works with the "win98-reboot" problem.
689 enable_mmio(pioaddr, rp->quirks);
692 /* Turn off EEPROM-controlled wake-up (magic packet) */
693 if (rp->quirks & rqWOL)
694 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
698 #ifdef CONFIG_NET_POLL_CONTROLLER
699 static void rhine_poll(struct net_device *dev)
701 struct rhine_private *rp = netdev_priv(dev);
702 const int irq = rp->pdev->irq;
705 rhine_interrupt(irq, dev);
710 static void rhine_kick_tx_threshold(struct rhine_private *rp)
712 if (rp->tx_thresh < 0xe0) {
713 void __iomem *ioaddr = rp->base;
715 rp->tx_thresh += 0x20;
716 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
720 static void rhine_tx_err(struct rhine_private *rp, u32 status)
722 struct net_device *dev = rp->dev;
724 if (status & IntrTxAborted) {
725 netif_info(rp, tx_err, dev,
726 "Abort %08x, frame dropped\n", status);
729 if (status & IntrTxUnderrun) {
730 rhine_kick_tx_threshold(rp);
731 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
732 "Tx threshold now %02x\n", rp->tx_thresh);
735 if (status & IntrTxDescRace)
736 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
738 if ((status & IntrTxError) &&
739 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
740 rhine_kick_tx_threshold(rp);
741 netif_info(rp, tx_err, dev, "Unspecified error. "
742 "Tx threshold now %02x\n", rp->tx_thresh);
745 rhine_restart_tx(dev);
748 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
750 void __iomem *ioaddr = rp->base;
751 struct net_device_stats *stats = &rp->dev->stats;
753 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
754 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
757 * Clears the "tally counters" for CRC errors and missed frames(?).
758 * It has been reported that some chips need a write of 0 to clear
759 * these, for others the counters are set to 1 when written to and
760 * instead cleared when read. So we clear them both ways ...
762 iowrite32(0, ioaddr + RxMissed);
763 ioread16(ioaddr + RxCRCErrs);
764 ioread16(ioaddr + RxMissed);
767 #define RHINE_EVENT_NAPI_RX (IntrRxDone | \
775 #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
779 #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
781 #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
782 RHINE_EVENT_NAPI_TX | \
784 #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
785 #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
787 static int rhine_napipoll(struct napi_struct *napi, int budget)
789 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
790 struct net_device *dev = rp->dev;
791 void __iomem *ioaddr = rp->base;
792 u16 enable_mask = RHINE_EVENT & 0xffff;
796 status = rhine_get_events(rp);
797 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
799 if (status & RHINE_EVENT_NAPI_RX)
800 work_done += rhine_rx(dev, budget);
802 if (status & RHINE_EVENT_NAPI_TX) {
803 if (status & RHINE_EVENT_NAPI_TX_ERR) {
804 /* Avoid scavenging before Tx engine turned off */
805 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
806 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
807 netif_warn(rp, tx_err, dev, "Tx still on\n");
812 if (status & RHINE_EVENT_NAPI_TX_ERR)
813 rhine_tx_err(rp, status);
816 if (status & IntrStatsMax) {
817 spin_lock(&rp->lock);
818 rhine_update_rx_crc_and_missed_errord(rp);
819 spin_unlock(&rp->lock);
822 if (status & RHINE_EVENT_SLOW) {
823 enable_mask &= ~RHINE_EVENT_SLOW;
824 schedule_work(&rp->slow_event_task);
827 if (work_done < budget) {
829 iowrite16(enable_mask, ioaddr + IntrEnable);
835 static void rhine_hw_init(struct net_device *dev, long pioaddr)
837 struct rhine_private *rp = netdev_priv(dev);
839 /* Reset the chip to erase previous misconfiguration. */
840 rhine_chip_reset(dev);
842 /* Rhine-I needs extra time to recuperate before EEPROM reload */
843 if (rp->quirks & rqRhineI)
846 /* Reload EEPROM controlled bytes cleared by soft reset */
847 rhine_reload_eeprom(pioaddr, dev);
850 static const struct net_device_ops rhine_netdev_ops = {
851 .ndo_open = rhine_open,
852 .ndo_stop = rhine_close,
853 .ndo_start_xmit = rhine_start_tx,
854 .ndo_get_stats64 = rhine_get_stats64,
855 .ndo_set_rx_mode = rhine_set_rx_mode,
856 .ndo_change_mtu = eth_change_mtu,
857 .ndo_validate_addr = eth_validate_addr,
858 .ndo_set_mac_address = eth_mac_addr,
859 .ndo_do_ioctl = netdev_ioctl,
860 .ndo_tx_timeout = rhine_tx_timeout,
861 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
862 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
863 #ifdef CONFIG_NET_POLL_CONTROLLER
864 .ndo_poll_controller = rhine_poll,
868 static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
870 struct net_device *dev;
871 struct rhine_private *rp;
876 void __iomem *ioaddr;
885 /* when built into the kernel, we only print version if device is found */
887 pr_info_once("%s\n", version);
894 if (pdev->revision < VTunknown0) {
898 else if (pdev->revision >= VT6102) {
899 quirks = rqWOL | rqForceReset;
900 if (pdev->revision < VT6105) {
902 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
905 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
906 if (pdev->revision >= VT6105_B0)
907 quirks |= rq6patterns;
908 if (pdev->revision < VT6105M)
911 name = "Rhine III (Management Adapter)";
915 rc = pci_enable_device(pdev);
919 /* this should always be supported */
920 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
923 "32-bit PCI DMA addresses not supported by the card!?\n");
928 if ((pci_resource_len(pdev, 0) < io_size) ||
929 (pci_resource_len(pdev, 1) < io_size)) {
931 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
935 pioaddr = pci_resource_start(pdev, 0);
936 memaddr = pci_resource_start(pdev, 1);
938 pci_set_master(pdev);
940 dev = alloc_etherdev(sizeof(struct rhine_private));
945 SET_NETDEV_DEV(dev, &pdev->dev);
947 rp = netdev_priv(dev);
950 rp->pioaddr = pioaddr;
952 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
954 rc = pci_request_regions(pdev, DRV_NAME);
956 goto err_out_free_netdev;
958 ioaddr = pci_iomap(pdev, bar, io_size);
962 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
963 pci_name(pdev), io_size, memaddr);
964 goto err_out_free_res;
968 enable_mmio(pioaddr, quirks);
970 /* Check that selected MMIO registers match the PIO ones */
972 while (mmio_verify_registers[i]) {
973 int reg = mmio_verify_registers[i++];
974 unsigned char a = inb(pioaddr+reg);
975 unsigned char b = readb(ioaddr+reg);
979 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
984 #endif /* USE_MMIO */
988 /* Get chip registers into a sane state */
989 rhine_power_init(dev);
990 rhine_hw_init(dev, pioaddr);
992 for (i = 0; i < 6; i++)
993 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
995 if (!is_valid_ether_addr(dev->dev_addr)) {
996 /* Report it and use a random ethernet address instead */
997 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
998 eth_hw_addr_random(dev);
999 netdev_info(dev, "Using random MAC address: %pM\n",
1003 /* For Rhine-I/II, phy_id is loaded from EEPROM */
1005 phy_id = ioread8(ioaddr + 0x6C);
1007 spin_lock_init(&rp->lock);
1008 mutex_init(&rp->task_lock);
1009 INIT_WORK(&rp->reset_task, rhine_reset_task);
1010 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
1012 rp->mii_if.dev = dev;
1013 rp->mii_if.mdio_read = mdio_read;
1014 rp->mii_if.mdio_write = mdio_write;
1015 rp->mii_if.phy_id_mask = 0x1f;
1016 rp->mii_if.reg_num_mask = 0x1f;
1018 /* The chip-specific entries in the device structure. */
1019 dev->netdev_ops = &rhine_netdev_ops;
1020 dev->ethtool_ops = &netdev_ethtool_ops,
1021 dev->watchdog_timeo = TX_TIMEOUT;
1023 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1025 if (rp->quirks & rqRhineI)
1026 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1028 if (pdev->revision >= VT6105M)
1029 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1030 NETIF_F_HW_VLAN_CTAG_RX |
1031 NETIF_F_HW_VLAN_CTAG_FILTER;
1033 /* dev->name not defined before register_netdev()! */
1034 rc = register_netdev(dev);
1038 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1045 dev->dev_addr, pdev->irq);
1047 pci_set_drvdata(pdev, dev);
1051 int mii_status = mdio_read(dev, phy_id, 1);
1052 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1053 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1054 if (mii_status != 0xffff && mii_status != 0x0000) {
1055 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1057 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1059 mii_status, rp->mii_if.advertising,
1060 mdio_read(dev, phy_id, 5));
1062 /* set IFF_RUNNING */
1063 if (mii_status & BMSR_LSTATUS)
1064 netif_carrier_on(dev);
1066 netif_carrier_off(dev);
1070 rp->mii_if.phy_id = phy_id;
1072 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1077 pci_iounmap(pdev, ioaddr);
1079 pci_release_regions(pdev);
1080 err_out_free_netdev:
1086 static int alloc_ring(struct net_device* dev)
1088 struct rhine_private *rp = netdev_priv(dev);
1090 dma_addr_t ring_dma;
1092 ring = pci_alloc_consistent(rp->pdev,
1093 RX_RING_SIZE * sizeof(struct rx_desc) +
1094 TX_RING_SIZE * sizeof(struct tx_desc),
1097 netdev_err(dev, "Could not allocate DMA memory\n");
1100 if (rp->quirks & rqRhineI) {
1101 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
1102 PKT_BUF_SZ * TX_RING_SIZE,
1104 if (rp->tx_bufs == NULL) {
1105 pci_free_consistent(rp->pdev,
1106 RX_RING_SIZE * sizeof(struct rx_desc) +
1107 TX_RING_SIZE * sizeof(struct tx_desc),
1114 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1115 rp->rx_ring_dma = ring_dma;
1116 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1121 static void free_ring(struct net_device* dev)
1123 struct rhine_private *rp = netdev_priv(dev);
1125 pci_free_consistent(rp->pdev,
1126 RX_RING_SIZE * sizeof(struct rx_desc) +
1127 TX_RING_SIZE * sizeof(struct tx_desc),
1128 rp->rx_ring, rp->rx_ring_dma);
1132 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
1133 rp->tx_bufs, rp->tx_bufs_dma);
1139 static void alloc_rbufs(struct net_device *dev)
1141 struct rhine_private *rp = netdev_priv(dev);
1145 rp->dirty_rx = rp->cur_rx = 0;
1147 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1148 rp->rx_head_desc = &rp->rx_ring[0];
1149 next = rp->rx_ring_dma;
1151 /* Init the ring entries */
1152 for (i = 0; i < RX_RING_SIZE; i++) {
1153 rp->rx_ring[i].rx_status = 0;
1154 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1155 next += sizeof(struct rx_desc);
1156 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1157 rp->rx_skbuff[i] = NULL;
1159 /* Mark the last entry as wrapping the ring. */
1160 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1162 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1163 for (i = 0; i < RX_RING_SIZE; i++) {
1164 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1165 rp->rx_skbuff[i] = skb;
1169 rp->rx_skbuff_dma[i] =
1170 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1171 PCI_DMA_FROMDEVICE);
1173 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1174 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1176 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1179 static void free_rbufs(struct net_device* dev)
1181 struct rhine_private *rp = netdev_priv(dev);
1184 /* Free all the skbuffs in the Rx queue. */
1185 for (i = 0; i < RX_RING_SIZE; i++) {
1186 rp->rx_ring[i].rx_status = 0;
1187 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1188 if (rp->rx_skbuff[i]) {
1189 pci_unmap_single(rp->pdev,
1190 rp->rx_skbuff_dma[i],
1191 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1192 dev_kfree_skb(rp->rx_skbuff[i]);
1194 rp->rx_skbuff[i] = NULL;
1198 static void alloc_tbufs(struct net_device* dev)
1200 struct rhine_private *rp = netdev_priv(dev);
1204 rp->dirty_tx = rp->cur_tx = 0;
1205 next = rp->tx_ring_dma;
1206 for (i = 0; i < TX_RING_SIZE; i++) {
1207 rp->tx_skbuff[i] = NULL;
1208 rp->tx_ring[i].tx_status = 0;
1209 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1210 next += sizeof(struct tx_desc);
1211 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1212 if (rp->quirks & rqRhineI)
1213 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1215 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1219 static void free_tbufs(struct net_device* dev)
1221 struct rhine_private *rp = netdev_priv(dev);
1224 for (i = 0; i < TX_RING_SIZE; i++) {
1225 rp->tx_ring[i].tx_status = 0;
1226 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1227 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1228 if (rp->tx_skbuff[i]) {
1229 if (rp->tx_skbuff_dma[i]) {
1230 pci_unmap_single(rp->pdev,
1231 rp->tx_skbuff_dma[i],
1232 rp->tx_skbuff[i]->len,
1235 dev_kfree_skb(rp->tx_skbuff[i]);
1237 rp->tx_skbuff[i] = NULL;
1238 rp->tx_buf[i] = NULL;
1242 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1244 struct rhine_private *rp = netdev_priv(dev);
1245 void __iomem *ioaddr = rp->base;
1247 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1249 if (rp->mii_if.full_duplex)
1250 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1253 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1256 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1257 rp->mii_if.force_media, netif_carrier_ok(dev));
1260 /* Called after status of force_media possibly changed */
1261 static void rhine_set_carrier(struct mii_if_info *mii)
1263 struct net_device *dev = mii->dev;
1264 struct rhine_private *rp = netdev_priv(dev);
1266 if (mii->force_media) {
1267 /* autoneg is off: Link is always assumed to be up */
1268 if (!netif_carrier_ok(dev))
1269 netif_carrier_on(dev);
1270 } else /* Let MMI library update carrier status */
1271 rhine_check_media(dev, 0);
1273 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1274 mii->force_media, netif_carrier_ok(dev));
1278 * rhine_set_cam - set CAM multicast filters
1279 * @ioaddr: register block of this Rhine
1280 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1281 * @addr: multicast address (6 bytes)
1283 * Load addresses into multicast filters.
1285 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1289 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1292 /* Paranoid -- idx out of range should never happen */
1293 idx &= (MCAM_SIZE - 1);
1295 iowrite8((u8) idx, ioaddr + CamAddr);
1297 for (i = 0; i < 6; i++, addr++)
1298 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1302 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1305 iowrite8(0, ioaddr + CamCon);
1309 * rhine_set_vlan_cam - set CAM VLAN filters
1310 * @ioaddr: register block of this Rhine
1311 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1312 * @addr: VLAN ID (2 bytes)
1314 * Load addresses into VLAN filters.
1316 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1318 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1321 /* Paranoid -- idx out of range should never happen */
1322 idx &= (VCAM_SIZE - 1);
1324 iowrite8((u8) idx, ioaddr + CamAddr);
1326 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1330 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1333 iowrite8(0, ioaddr + CamCon);
1337 * rhine_set_cam_mask - set multicast CAM mask
1338 * @ioaddr: register block of this Rhine
1339 * @mask: multicast CAM mask
1341 * Mask sets multicast filters active/inactive.
1343 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1345 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1349 iowrite32(mask, ioaddr + CamMask);
1352 iowrite8(0, ioaddr + CamCon);
1356 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1357 * @ioaddr: register block of this Rhine
1358 * @mask: VLAN CAM mask
1360 * Mask sets VLAN filters active/inactive.
1362 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1364 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1368 iowrite32(mask, ioaddr + CamMask);
1371 iowrite8(0, ioaddr + CamCon);
1375 * rhine_init_cam_filter - initialize CAM filters
1376 * @dev: network device
1378 * Initialize (disable) hardware VLAN and multicast support on this
1381 static void rhine_init_cam_filter(struct net_device *dev)
1383 struct rhine_private *rp = netdev_priv(dev);
1384 void __iomem *ioaddr = rp->base;
1386 /* Disable all CAMs */
1387 rhine_set_vlan_cam_mask(ioaddr, 0);
1388 rhine_set_cam_mask(ioaddr, 0);
1390 /* disable hardware VLAN support */
1391 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1392 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1396 * rhine_update_vcam - update VLAN CAM filters
1397 * @rp: rhine_private data of this Rhine
1399 * Update VLAN CAM filters to match configuration change.
1401 static void rhine_update_vcam(struct net_device *dev)
1403 struct rhine_private *rp = netdev_priv(dev);
1404 void __iomem *ioaddr = rp->base;
1406 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1409 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1410 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1412 if (++i >= VCAM_SIZE)
1415 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1418 static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1420 struct rhine_private *rp = netdev_priv(dev);
1422 spin_lock_bh(&rp->lock);
1423 set_bit(vid, rp->active_vlans);
1424 rhine_update_vcam(dev);
1425 spin_unlock_bh(&rp->lock);
1429 static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1431 struct rhine_private *rp = netdev_priv(dev);
1433 spin_lock_bh(&rp->lock);
1434 clear_bit(vid, rp->active_vlans);
1435 rhine_update_vcam(dev);
1436 spin_unlock_bh(&rp->lock);
1440 static void init_registers(struct net_device *dev)
1442 struct rhine_private *rp = netdev_priv(dev);
1443 void __iomem *ioaddr = rp->base;
1446 for (i = 0; i < 6; i++)
1447 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1449 /* Initialize other registers. */
1450 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1451 /* Configure initial FIFO thresholds. */
1452 iowrite8(0x20, ioaddr + TxConfig);
1453 rp->tx_thresh = 0x20;
1454 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1456 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1457 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1459 rhine_set_rx_mode(dev);
1461 if (rp->pdev->revision >= VT6105M)
1462 rhine_init_cam_filter(dev);
1464 napi_enable(&rp->napi);
1466 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1468 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1470 rhine_check_media(dev, 1);
1473 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1474 static void rhine_enable_linkmon(struct rhine_private *rp)
1476 void __iomem *ioaddr = rp->base;
1478 iowrite8(0, ioaddr + MIICmd);
1479 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1480 iowrite8(0x80, ioaddr + MIICmd);
1482 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1484 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1487 /* Disable MII link status auto-polling (required for MDIO access) */
1488 static void rhine_disable_linkmon(struct rhine_private *rp)
1490 void __iomem *ioaddr = rp->base;
1492 iowrite8(0, ioaddr + MIICmd);
1494 if (rp->quirks & rqRhineI) {
1495 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1497 /* Can be called from ISR. Evil. */
1500 /* 0x80 must be set immediately before turning it off */
1501 iowrite8(0x80, ioaddr + MIICmd);
1503 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1505 /* Heh. Now clear 0x80 again. */
1506 iowrite8(0, ioaddr + MIICmd);
1509 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1512 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1514 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1516 struct rhine_private *rp = netdev_priv(dev);
1517 void __iomem *ioaddr = rp->base;
1520 rhine_disable_linkmon(rp);
1522 /* rhine_disable_linkmon already cleared MIICmd */
1523 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1524 iowrite8(regnum, ioaddr + MIIRegAddr);
1525 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1526 rhine_wait_bit_low(rp, MIICmd, 0x40);
1527 result = ioread16(ioaddr + MIIData);
1529 rhine_enable_linkmon(rp);
1533 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1535 struct rhine_private *rp = netdev_priv(dev);
1536 void __iomem *ioaddr = rp->base;
1538 rhine_disable_linkmon(rp);
1540 /* rhine_disable_linkmon already cleared MIICmd */
1541 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1542 iowrite8(regnum, ioaddr + MIIRegAddr);
1543 iowrite16(value, ioaddr + MIIData);
1544 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1545 rhine_wait_bit_low(rp, MIICmd, 0x20);
1547 rhine_enable_linkmon(rp);
1550 static void rhine_task_disable(struct rhine_private *rp)
1552 mutex_lock(&rp->task_lock);
1553 rp->task_enable = false;
1554 mutex_unlock(&rp->task_lock);
1556 cancel_work_sync(&rp->slow_event_task);
1557 cancel_work_sync(&rp->reset_task);
1560 static void rhine_task_enable(struct rhine_private *rp)
1562 mutex_lock(&rp->task_lock);
1563 rp->task_enable = true;
1564 mutex_unlock(&rp->task_lock);
1567 static int rhine_open(struct net_device *dev)
1569 struct rhine_private *rp = netdev_priv(dev);
1570 void __iomem *ioaddr = rp->base;
1573 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1578 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1580 rc = alloc_ring(dev);
1582 free_irq(rp->pdev->irq, dev);
1587 rhine_chip_reset(dev);
1588 rhine_task_enable(rp);
1589 init_registers(dev);
1591 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1592 __func__, ioread16(ioaddr + ChipCmd),
1593 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1595 netif_start_queue(dev);
1600 static void rhine_reset_task(struct work_struct *work)
1602 struct rhine_private *rp = container_of(work, struct rhine_private,
1604 struct net_device *dev = rp->dev;
1606 mutex_lock(&rp->task_lock);
1608 if (!rp->task_enable)
1611 napi_disable(&rp->napi);
1612 spin_lock_bh(&rp->lock);
1614 /* clear all descriptors */
1620 /* Reinitialize the hardware. */
1621 rhine_chip_reset(dev);
1622 init_registers(dev);
1624 spin_unlock_bh(&rp->lock);
1626 dev->trans_start = jiffies; /* prevent tx timeout */
1627 dev->stats.tx_errors++;
1628 netif_wake_queue(dev);
1631 mutex_unlock(&rp->task_lock);
1634 static void rhine_tx_timeout(struct net_device *dev)
1636 struct rhine_private *rp = netdev_priv(dev);
1637 void __iomem *ioaddr = rp->base;
1639 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1640 ioread16(ioaddr + IntrStatus),
1641 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1643 schedule_work(&rp->reset_task);
1646 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1647 struct net_device *dev)
1649 struct rhine_private *rp = netdev_priv(dev);
1650 void __iomem *ioaddr = rp->base;
1653 /* Caution: the write order is important here, set the field
1654 with the "ownership" bits last. */
1656 /* Calculate the next Tx descriptor entry. */
1657 entry = rp->cur_tx % TX_RING_SIZE;
1659 if (skb_padto(skb, ETH_ZLEN))
1660 return NETDEV_TX_OK;
1662 rp->tx_skbuff[entry] = skb;
1664 if ((rp->quirks & rqRhineI) &&
1665 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1666 /* Must use alignment buffer. */
1667 if (skb->len > PKT_BUF_SZ) {
1668 /* packet too long, drop it */
1670 rp->tx_skbuff[entry] = NULL;
1671 dev->stats.tx_dropped++;
1672 return NETDEV_TX_OK;
1675 /* Padding is not copied and so must be redone. */
1676 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1677 if (skb->len < ETH_ZLEN)
1678 memset(rp->tx_buf[entry] + skb->len, 0,
1679 ETH_ZLEN - skb->len);
1680 rp->tx_skbuff_dma[entry] = 0;
1681 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1682 (rp->tx_buf[entry] -
1685 rp->tx_skbuff_dma[entry] =
1686 pci_map_single(rp->pdev, skb->data, skb->len,
1688 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1691 rp->tx_ring[entry].desc_length =
1692 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1694 if (unlikely(vlan_tx_tag_present(skb))) {
1695 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1696 /* request tagging */
1697 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1700 rp->tx_ring[entry].tx_status = 0;
1704 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1709 /* Non-x86 Todo: explicitly flush cache lines here. */
1711 if (vlan_tx_tag_present(skb))
1712 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1713 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1715 /* Wake the potentially-idle transmit channel */
1716 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1720 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1721 netif_stop_queue(dev);
1723 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1724 rp->cur_tx - 1, entry);
1726 return NETDEV_TX_OK;
1729 static void rhine_irq_disable(struct rhine_private *rp)
1731 iowrite16(0x0000, rp->base + IntrEnable);
1735 /* The interrupt handler does all of the Rx thread work and cleans up
1736 after the Tx thread. */
1737 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1739 struct net_device *dev = dev_instance;
1740 struct rhine_private *rp = netdev_priv(dev);
1744 status = rhine_get_events(rp);
1746 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1748 if (status & RHINE_EVENT) {
1751 rhine_irq_disable(rp);
1752 napi_schedule(&rp->napi);
1755 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1756 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1760 return IRQ_RETVAL(handled);
1763 /* This routine is logically part of the interrupt handler, but isolated
1765 static void rhine_tx(struct net_device *dev)
1767 struct rhine_private *rp = netdev_priv(dev);
1768 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1770 /* find and cleanup dirty tx descriptors */
1771 while (rp->dirty_tx != rp->cur_tx) {
1772 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1773 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1775 if (txstatus & DescOwn)
1777 if (txstatus & 0x8000) {
1778 netif_dbg(rp, tx_done, dev,
1779 "Transmit error, Tx status %08x\n", txstatus);
1780 dev->stats.tx_errors++;
1781 if (txstatus & 0x0400)
1782 dev->stats.tx_carrier_errors++;
1783 if (txstatus & 0x0200)
1784 dev->stats.tx_window_errors++;
1785 if (txstatus & 0x0100)
1786 dev->stats.tx_aborted_errors++;
1787 if (txstatus & 0x0080)
1788 dev->stats.tx_heartbeat_errors++;
1789 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1790 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1791 dev->stats.tx_fifo_errors++;
1792 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1793 break; /* Keep the skb - we try again */
1795 /* Transmitter restarted in 'abnormal' handler. */
1797 if (rp->quirks & rqRhineI)
1798 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1800 dev->stats.collisions += txstatus & 0x0F;
1801 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1802 (txstatus >> 3) & 0xF, txstatus & 0xF);
1804 u64_stats_update_begin(&rp->tx_stats.syncp);
1805 rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
1806 rp->tx_stats.packets++;
1807 u64_stats_update_end(&rp->tx_stats.syncp);
1809 /* Free the original skb. */
1810 if (rp->tx_skbuff_dma[entry]) {
1811 pci_unmap_single(rp->pdev,
1812 rp->tx_skbuff_dma[entry],
1813 rp->tx_skbuff[entry]->len,
1816 dev_kfree_skb(rp->tx_skbuff[entry]);
1817 rp->tx_skbuff[entry] = NULL;
1818 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1820 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1821 netif_wake_queue(dev);
1825 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1826 * @skb: pointer to sk_buff
1827 * @data_size: used data area of the buffer including CRC
1829 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1830 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1831 * aligned following the CRC.
1833 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1835 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1836 return be16_to_cpup((__be16 *)trailer);
1839 /* Process up to limit frames from receive ring */
1840 static int rhine_rx(struct net_device *dev, int limit)
1842 struct rhine_private *rp = netdev_priv(dev);
1844 int entry = rp->cur_rx % RX_RING_SIZE;
1846 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1847 entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1849 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1850 for (count = 0; count < limit; ++count) {
1851 struct rx_desc *desc = rp->rx_head_desc;
1852 u32 desc_status = le32_to_cpu(desc->rx_status);
1853 u32 desc_length = le32_to_cpu(desc->desc_length);
1854 int data_size = desc_status >> 16;
1856 if (desc_status & DescOwn)
1859 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1862 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1863 if ((desc_status & RxWholePkt) != RxWholePkt) {
1865 "Oversized Ethernet frame spanned multiple buffers, "
1866 "entry %#x length %d status %08x!\n",
1870 "Oversized Ethernet frame %p vs %p\n",
1872 &rp->rx_ring[entry]);
1873 dev->stats.rx_length_errors++;
1874 } else if (desc_status & RxErr) {
1875 /* There was a error. */
1876 netif_dbg(rp, rx_err, dev,
1877 "%s() Rx error %08x\n", __func__,
1879 dev->stats.rx_errors++;
1880 if (desc_status & 0x0030)
1881 dev->stats.rx_length_errors++;
1882 if (desc_status & 0x0048)
1883 dev->stats.rx_fifo_errors++;
1884 if (desc_status & 0x0004)
1885 dev->stats.rx_frame_errors++;
1886 if (desc_status & 0x0002) {
1887 /* this can also be updated outside the interrupt handler */
1888 spin_lock(&rp->lock);
1889 dev->stats.rx_crc_errors++;
1890 spin_unlock(&rp->lock);
1894 struct sk_buff *skb = NULL;
1895 /* Length should omit the CRC */
1896 int pkt_len = data_size - 4;
1899 /* Check if the packet is long enough to accept without
1900 copying to a minimally-sized skbuff. */
1901 if (pkt_len < rx_copybreak)
1902 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1904 pci_dma_sync_single_for_cpu(rp->pdev,
1905 rp->rx_skbuff_dma[entry],
1907 PCI_DMA_FROMDEVICE);
1909 skb_copy_to_linear_data(skb,
1910 rp->rx_skbuff[entry]->data,
1912 skb_put(skb, pkt_len);
1913 pci_dma_sync_single_for_device(rp->pdev,
1914 rp->rx_skbuff_dma[entry],
1916 PCI_DMA_FROMDEVICE);
1918 skb = rp->rx_skbuff[entry];
1920 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1923 rp->rx_skbuff[entry] = NULL;
1924 skb_put(skb, pkt_len);
1925 pci_unmap_single(rp->pdev,
1926 rp->rx_skbuff_dma[entry],
1928 PCI_DMA_FROMDEVICE);
1931 if (unlikely(desc_length & DescTag))
1932 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1934 skb->protocol = eth_type_trans(skb, dev);
1936 if (unlikely(desc_length & DescTag))
1937 __vlan_hwaccel_put_tag(skb, vlan_tci);
1938 netif_receive_skb(skb);
1940 u64_stats_update_begin(&rp->rx_stats.syncp);
1941 rp->rx_stats.bytes += pkt_len;
1942 rp->rx_stats.packets++;
1943 u64_stats_update_end(&rp->rx_stats.syncp);
1945 entry = (++rp->cur_rx) % RX_RING_SIZE;
1946 rp->rx_head_desc = &rp->rx_ring[entry];
1949 /* Refill the Rx ring buffers. */
1950 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1951 struct sk_buff *skb;
1952 entry = rp->dirty_rx % RX_RING_SIZE;
1953 if (rp->rx_skbuff[entry] == NULL) {
1954 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1955 rp->rx_skbuff[entry] = skb;
1957 break; /* Better luck next round. */
1958 rp->rx_skbuff_dma[entry] =
1959 pci_map_single(rp->pdev, skb->data,
1961 PCI_DMA_FROMDEVICE);
1962 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1964 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1970 static void rhine_restart_tx(struct net_device *dev) {
1971 struct rhine_private *rp = netdev_priv(dev);
1972 void __iomem *ioaddr = rp->base;
1973 int entry = rp->dirty_tx % TX_RING_SIZE;
1977 * If new errors occurred, we need to sort them out before doing Tx.
1978 * In that case the ISR will be back here RSN anyway.
1980 intr_status = rhine_get_events(rp);
1982 if ((intr_status & IntrTxErrSummary) == 0) {
1984 /* We know better than the chip where it should continue. */
1985 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1986 ioaddr + TxRingPtr);
1988 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1991 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1992 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1993 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1995 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2000 /* This should never happen */
2001 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2007 static void rhine_slow_event_task(struct work_struct *work)
2009 struct rhine_private *rp =
2010 container_of(work, struct rhine_private, slow_event_task);
2011 struct net_device *dev = rp->dev;
2014 mutex_lock(&rp->task_lock);
2016 if (!rp->task_enable)
2019 intr_status = rhine_get_events(rp);
2020 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2022 if (intr_status & IntrLinkChange)
2023 rhine_check_media(dev, 0);
2025 if (intr_status & IntrPCIErr)
2026 netif_warn(rp, hw, dev, "PCI error\n");
2028 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2031 mutex_unlock(&rp->task_lock);
2034 static struct rtnl_link_stats64 *
2035 rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2037 struct rhine_private *rp = netdev_priv(dev);
2040 spin_lock_bh(&rp->lock);
2041 rhine_update_rx_crc_and_missed_errord(rp);
2042 spin_unlock_bh(&rp->lock);
2044 netdev_stats_to_stats64(stats, &dev->stats);
2047 start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp);
2048 stats->rx_packets = rp->rx_stats.packets;
2049 stats->rx_bytes = rp->rx_stats.bytes;
2050 } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start));
2053 start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp);
2054 stats->tx_packets = rp->tx_stats.packets;
2055 stats->tx_bytes = rp->tx_stats.bytes;
2056 } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start));
2061 static void rhine_set_rx_mode(struct net_device *dev)
2063 struct rhine_private *rp = netdev_priv(dev);
2064 void __iomem *ioaddr = rp->base;
2065 u32 mc_filter[2]; /* Multicast hash filter */
2066 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
2067 struct netdev_hw_addr *ha;
2069 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2071 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2072 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2073 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2074 (dev->flags & IFF_ALLMULTI)) {
2075 /* Too many to match, or accept all multicasts. */
2076 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2077 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2078 } else if (rp->pdev->revision >= VT6105M) {
2080 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2081 netdev_for_each_mc_addr(ha, dev) {
2084 rhine_set_cam(ioaddr, i, ha->addr);
2088 rhine_set_cam_mask(ioaddr, mCAMmask);
2090 memset(mc_filter, 0, sizeof(mc_filter));
2091 netdev_for_each_mc_addr(ha, dev) {
2092 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2094 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2096 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2097 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2099 /* enable/disable VLAN receive filtering */
2100 if (rp->pdev->revision >= VT6105M) {
2101 if (dev->flags & IFF_PROMISC)
2102 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2104 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2106 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2109 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2111 struct rhine_private *rp = netdev_priv(dev);
2113 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2114 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2115 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
2118 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2120 struct rhine_private *rp = netdev_priv(dev);
2123 mutex_lock(&rp->task_lock);
2124 rc = mii_ethtool_gset(&rp->mii_if, cmd);
2125 mutex_unlock(&rp->task_lock);
2130 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2132 struct rhine_private *rp = netdev_priv(dev);
2135 mutex_lock(&rp->task_lock);
2136 rc = mii_ethtool_sset(&rp->mii_if, cmd);
2137 rhine_set_carrier(&rp->mii_if);
2138 mutex_unlock(&rp->task_lock);
2143 static int netdev_nway_reset(struct net_device *dev)
2145 struct rhine_private *rp = netdev_priv(dev);
2147 return mii_nway_restart(&rp->mii_if);
2150 static u32 netdev_get_link(struct net_device *dev)
2152 struct rhine_private *rp = netdev_priv(dev);
2154 return mii_link_ok(&rp->mii_if);
2157 static u32 netdev_get_msglevel(struct net_device *dev)
2159 struct rhine_private *rp = netdev_priv(dev);
2161 return rp->msg_enable;
2164 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2166 struct rhine_private *rp = netdev_priv(dev);
2168 rp->msg_enable = value;
2171 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2173 struct rhine_private *rp = netdev_priv(dev);
2175 if (!(rp->quirks & rqWOL))
2178 spin_lock_irq(&rp->lock);
2179 wol->supported = WAKE_PHY | WAKE_MAGIC |
2180 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2181 wol->wolopts = rp->wolopts;
2182 spin_unlock_irq(&rp->lock);
2185 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2187 struct rhine_private *rp = netdev_priv(dev);
2188 u32 support = WAKE_PHY | WAKE_MAGIC |
2189 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2191 if (!(rp->quirks & rqWOL))
2194 if (wol->wolopts & ~support)
2197 spin_lock_irq(&rp->lock);
2198 rp->wolopts = wol->wolopts;
2199 spin_unlock_irq(&rp->lock);
2204 static const struct ethtool_ops netdev_ethtool_ops = {
2205 .get_drvinfo = netdev_get_drvinfo,
2206 .get_settings = netdev_get_settings,
2207 .set_settings = netdev_set_settings,
2208 .nway_reset = netdev_nway_reset,
2209 .get_link = netdev_get_link,
2210 .get_msglevel = netdev_get_msglevel,
2211 .set_msglevel = netdev_set_msglevel,
2212 .get_wol = rhine_get_wol,
2213 .set_wol = rhine_set_wol,
2216 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2218 struct rhine_private *rp = netdev_priv(dev);
2221 if (!netif_running(dev))
2224 mutex_lock(&rp->task_lock);
2225 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2226 rhine_set_carrier(&rp->mii_if);
2227 mutex_unlock(&rp->task_lock);
2232 static int rhine_close(struct net_device *dev)
2234 struct rhine_private *rp = netdev_priv(dev);
2235 void __iomem *ioaddr = rp->base;
2237 rhine_task_disable(rp);
2238 napi_disable(&rp->napi);
2239 netif_stop_queue(dev);
2241 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2242 ioread16(ioaddr + ChipCmd));
2244 /* Switch to loopback mode to avoid hardware races. */
2245 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2247 rhine_irq_disable(rp);
2249 /* Stop the chip's Tx and Rx processes. */
2250 iowrite16(CmdStop, ioaddr + ChipCmd);
2252 free_irq(rp->pdev->irq, dev);
2261 static void rhine_remove_one(struct pci_dev *pdev)
2263 struct net_device *dev = pci_get_drvdata(pdev);
2264 struct rhine_private *rp = netdev_priv(dev);
2266 unregister_netdev(dev);
2268 pci_iounmap(pdev, rp->base);
2269 pci_release_regions(pdev);
2272 pci_disable_device(pdev);
2273 pci_set_drvdata(pdev, NULL);
2276 static void rhine_shutdown (struct pci_dev *pdev)
2278 struct net_device *dev = pci_get_drvdata(pdev);
2279 struct rhine_private *rp = netdev_priv(dev);
2280 void __iomem *ioaddr = rp->base;
2282 if (!(rp->quirks & rqWOL))
2283 return; /* Nothing to do for non-WOL adapters */
2285 rhine_power_init(dev);
2287 /* Make sure we use pattern 0, 1 and not 4, 5 */
2288 if (rp->quirks & rq6patterns)
2289 iowrite8(0x04, ioaddr + WOLcgClr);
2291 spin_lock(&rp->lock);
2293 if (rp->wolopts & WAKE_MAGIC) {
2294 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2296 * Turn EEPROM-controlled wake-up back on -- some hardware may
2297 * not cooperate otherwise.
2299 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2302 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2303 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2305 if (rp->wolopts & WAKE_PHY)
2306 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2308 if (rp->wolopts & WAKE_UCAST)
2309 iowrite8(WOLucast, ioaddr + WOLcrSet);
2312 /* Enable legacy WOL (for old motherboards) */
2313 iowrite8(0x01, ioaddr + PwcfgSet);
2314 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2317 spin_unlock(&rp->lock);
2319 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2320 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2322 pci_wake_from_d3(pdev, true);
2323 pci_set_power_state(pdev, PCI_D3hot);
2327 #ifdef CONFIG_PM_SLEEP
2328 static int rhine_suspend(struct device *device)
2330 struct pci_dev *pdev = to_pci_dev(device);
2331 struct net_device *dev = pci_get_drvdata(pdev);
2332 struct rhine_private *rp = netdev_priv(dev);
2334 if (!netif_running(dev))
2337 rhine_task_disable(rp);
2338 rhine_irq_disable(rp);
2339 napi_disable(&rp->napi);
2341 netif_device_detach(dev);
2343 rhine_shutdown(pdev);
2348 static int rhine_resume(struct device *device)
2350 struct pci_dev *pdev = to_pci_dev(device);
2351 struct net_device *dev = pci_get_drvdata(pdev);
2352 struct rhine_private *rp = netdev_priv(dev);
2354 if (!netif_running(dev))
2358 enable_mmio(rp->pioaddr, rp->quirks);
2360 rhine_power_init(dev);
2365 rhine_task_enable(rp);
2366 spin_lock_bh(&rp->lock);
2367 init_registers(dev);
2368 spin_unlock_bh(&rp->lock);
2370 netif_device_attach(dev);
2375 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2376 #define RHINE_PM_OPS (&rhine_pm_ops)
2380 #define RHINE_PM_OPS NULL
2382 #endif /* !CONFIG_PM_SLEEP */
2384 static struct pci_driver rhine_driver = {
2386 .id_table = rhine_pci_tbl,
2387 .probe = rhine_init_one,
2388 .remove = rhine_remove_one,
2389 .shutdown = rhine_shutdown,
2390 .driver.pm = RHINE_PM_OPS,
2393 static struct dmi_system_id __initdata rhine_dmi_table[] = {
2397 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2398 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2404 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2405 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2411 static int __init rhine_init(void)
2413 /* when a module, this is printed whether or not devices are found in probe */
2415 pr_info("%s\n", version);
2417 if (dmi_check_system(rhine_dmi_table)) {
2418 /* these BIOSes fail at PXE boot if chip is in D3 */
2420 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2423 pr_info("avoid_D3 set\n");
2425 return pci_register_driver(&rhine_driver);
2429 static void __exit rhine_cleanup(void)
2431 pci_unregister_driver(&rhine_driver);
2435 module_init(rhine_init);
2436 module_exit(rhine_cleanup);