1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
41 static int rx_copybreak;
42 static int flowctrl=1;
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
57 static char *media[MAX_UNITS];
60 /* Operational parameters that are set at compile time. */
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h> /* Processor type for cache alignment. */
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #include <linux/dma-mapping.h>
100 #include <linux/crc32.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
104 /* These identify the driver base version and may not be removed. */
105 static const char version[] =
106 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
107 " Written by Donald Becker\n";
109 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
110 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
111 MODULE_LICENSE("GPL");
113 module_param(debug, int, 0);
114 module_param(rx_copybreak, int, 0);
115 module_param_array(media, charp, NULL, 0);
116 module_param(flowctrl, int, 0);
117 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
118 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
119 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
124 I. Board Compatibility
126 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
128 II. Board-specific settings
130 III. Driver operation
134 This driver uses two statically allocated fixed-size descriptor lists
135 formed into rings by a branch from the final descriptor to the beginning of
136 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
137 Some chips explicitly use only 2^N sized rings, while others use a
138 'next descriptor' pointer that the driver forms into rings.
140 IIIb/c. Transmit/Receive Structure
142 This driver uses a zero-copy receive and transmit scheme.
143 The driver allocates full frame size skbuffs for the Rx ring buffers at
144 open() time and passes the skb->data field to the chip as receive data
145 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
146 a fresh skbuff is allocated and the frame is copied to the new skbuff.
147 When the incoming frame is larger, the skbuff is passed directly up the
148 protocol stack. Buffers consumed this way are replaced by newly allocated
149 skbuffs in a later phase of receives.
151 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
152 using a full-sized skbuff for small frames vs. the copying costs of larger
153 frames. New boards are typically used in generously configured machines
154 and the underfilled buffers have negligible impact compared to the benefit of
155 a single allocation size, so the default value of zero results in never
156 copying packets. When copying is done, the cost is usually mitigated by using
157 a combined copy/checksum routine. Copying also preloads the cache, which is
158 most useful with small frames.
160 A subtle aspect of the operation is that the IP header at offset 14 in an
161 ethernet frame isn't longword aligned for further processing.
162 Unaligned buffers are permitted by the Sundance hardware, so
163 frames are received into the skbuff at an offset of "+2", 16-byte aligning
166 IIId. Synchronization
168 The driver runs as two independent, single-threaded flows of control. One
169 is the send-packet routine, which enforces single-threaded use by the
170 dev->tbusy flag. The other thread is the interrupt handler, which is single
171 threaded by the hardware and interrupt handling software.
173 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
174 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
175 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
176 the 'lp->tx_full' flag.
178 The interrupt handler has exclusive control over the Rx ring and records stats
179 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
180 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
181 clears both the tx_full and tbusy flags.
187 The Sundance ST201 datasheet, preliminary version.
188 The Kendin KS8723 datasheet, preliminary version.
189 The ICplus IP100 datasheet, preliminary version.
190 http://www.scyld.com/expert/100mbps.html
191 http://www.scyld.com/expert/NWay.html
197 /* Work-around for Kendin chip bugs. */
198 #ifndef CONFIG_SUNDANCE_MMIO
202 static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
203 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
204 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
205 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
206 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
207 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
208 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
209 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
212 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
221 static const struct pci_id_info pci_id_tbl[] = {
222 {"D-Link DFE-550TX FAST Ethernet Adapter"},
223 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 {"D-Link DFE-580TX 4 port Server Adapter"},
225 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
226 {"D-Link DL10050-based FAST Ethernet Adapter"},
227 {"Sundance Technology Alta"},
228 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
229 { } /* terminate list. */
232 /* This driver was written to use PCI memory space, however x86-oriented
233 hardware often uses I/O space accesses. */
235 /* Offsets to the device registers.
236 Unlike software-only systems, device drivers interact with complex hardware.
237 It's not useful to define symbolic names for every register bit in the
238 device. The name can only partially document the semantics and make
239 the driver longer and more difficult to read.
240 In general, only the important configuration values or bits changed
241 multiple times should be defined symbolically.
246 TxDMABurstThresh = 0x08,
247 TxDMAUrgentThresh = 0x09,
248 TxDMAPollPeriod = 0x0a,
253 RxDMABurstThresh = 0x14,
254 RxDMAUrgentThresh = 0x15,
255 RxDMAPollPeriod = 0x16,
275 MulticastFilter0 = 0x60,
276 MulticastFilter1 = 0x64,
283 StatsCarrierError = 0x74,
284 StatsLateColl = 0x75,
285 StatsMultiColl = 0x76,
289 StatsTxXSDefer = 0x7a,
295 /* Aliased and bogus values! */
299 #define ASIC_HI_WORD(x) ((x) + 2)
301 enum ASICCtrl_HiWord_bit {
302 GlobalReset = 0x0001,
307 NetworkReset = 0x0020,
312 /* Bits in the interrupt status/mask registers. */
313 enum intr_status_bits {
314 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
315 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
317 StatsMax=0x0080, LinkChange=0x0100,
318 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
321 /* Bits in the RxMode register. */
323 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
324 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
326 /* Bits in MACCtrl. */
327 enum mac_ctrl0_bits {
328 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
329 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
331 enum mac_ctrl1_bits {
332 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
333 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
334 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
337 /* Bits in WakeEvent register. */
338 enum wake_event_bits {
339 WakePktEnable = 0x01,
340 MagicPktEnable = 0x02,
341 LinkEventEnable = 0x04,
345 /* The Rx and Tx buffer descriptors. */
346 /* Note that using only 32 bit fields simplifies conversion to big-endian
351 struct desc_frag { __le32 addr, length; } frag[1];
354 /* Bits in netdev_desc.status */
355 enum desc_status_bits {
357 DescEndPacket=0x4000,
361 DescIntrOnDMADone=0x80000000,
362 DisableAlign = 0x00000001,
365 #define PRIV_ALIGN 15 /* Required alignment mask */
366 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
367 within the structure. */
369 struct netdev_private {
370 /* Descriptor rings first for alignment. */
371 struct netdev_desc *rx_ring;
372 struct netdev_desc *tx_ring;
373 struct sk_buff* rx_skbuff[RX_RING_SIZE];
374 struct sk_buff* tx_skbuff[TX_RING_SIZE];
375 dma_addr_t tx_ring_dma;
376 dma_addr_t rx_ring_dma;
377 struct timer_list timer; /* Media monitoring timer. */
378 /* ethtool extra stats */
380 u64 tx_multiple_collisions;
381 u64 tx_single_collisions;
382 u64 tx_late_collisions;
384 u64 tx_deferred_excessive;
391 /* Frequently used values: keep some adjacent for cache effect. */
395 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
396 unsigned int rx_buf_sz; /* Based on MTU+slack. */
397 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
398 unsigned int cur_tx, dirty_tx;
399 /* These values are keep track of the transceiver/media in use. */
400 unsigned int flowctrl:1;
401 unsigned int default_port:4; /* Last dev->if_port value. */
402 unsigned int an_enable:1;
404 unsigned int wol_enabled:1; /* Wake on LAN enabled */
405 struct tasklet_struct rx_tasklet;
406 struct tasklet_struct tx_tasklet;
409 /* Multicast and receive mode. */
410 spinlock_t mcastlock; /* SMP lock multicast updates. */
412 /* MII transceiver section. */
413 struct mii_if_info mii_if;
414 int mii_preamble_required;
415 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
416 struct pci_dev *pci_dev;
421 /* The station address location in the EEPROM. */
422 #define EEPROM_SA_OFFSET 0x10
423 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
424 IntrDrvRqst | IntrTxDone | StatsMax | \
427 static int change_mtu(struct net_device *dev, int new_mtu);
428 static int eeprom_read(void __iomem *ioaddr, int location);
429 static int mdio_read(struct net_device *dev, int phy_id, int location);
430 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
431 static int mdio_wait_link(struct net_device *dev, int wait);
432 static int netdev_open(struct net_device *dev);
433 static void check_duplex(struct net_device *dev);
434 static void netdev_timer(unsigned long data);
435 static void tx_timeout(struct net_device *dev);
436 static void init_ring(struct net_device *dev);
437 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
438 static int reset_tx (struct net_device *dev);
439 static irqreturn_t intr_handler(int irq, void *dev_instance);
440 static void rx_poll(unsigned long data);
441 static void tx_poll(unsigned long data);
442 static void refill_rx (struct net_device *dev);
443 static void netdev_error(struct net_device *dev, int intr_status);
444 static void netdev_error(struct net_device *dev, int intr_status);
445 static void set_rx_mode(struct net_device *dev);
446 static int __set_mac_addr(struct net_device *dev);
447 static int sundance_set_mac_addr(struct net_device *dev, void *data);
448 static struct net_device_stats *get_stats(struct net_device *dev);
449 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
450 static int netdev_close(struct net_device *dev);
451 static const struct ethtool_ops ethtool_ops;
453 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
455 struct netdev_private *np = netdev_priv(dev);
456 void __iomem *ioaddr = np->base + ASICCtrl;
459 /* ST201 documentation states ASICCtrl is a 32bit register */
460 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
461 /* ST201 documentation states reset can take up to 1 ms */
463 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
464 if (--countdown == 0) {
465 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
472 #ifdef CONFIG_NET_POLL_CONTROLLER
473 static void sundance_poll_controller(struct net_device *dev)
475 struct netdev_private *np = netdev_priv(dev);
477 disable_irq(np->pci_dev->irq);
478 intr_handler(np->pci_dev->irq, dev);
479 enable_irq(np->pci_dev->irq);
483 static const struct net_device_ops netdev_ops = {
484 .ndo_open = netdev_open,
485 .ndo_stop = netdev_close,
486 .ndo_start_xmit = start_tx,
487 .ndo_get_stats = get_stats,
488 .ndo_set_rx_mode = set_rx_mode,
489 .ndo_do_ioctl = netdev_ioctl,
490 .ndo_tx_timeout = tx_timeout,
491 .ndo_change_mtu = change_mtu,
492 .ndo_set_mac_address = sundance_set_mac_addr,
493 .ndo_validate_addr = eth_validate_addr,
494 #ifdef CONFIG_NET_POLL_CONTROLLER
495 .ndo_poll_controller = sundance_poll_controller,
499 static int sundance_probe1(struct pci_dev *pdev,
500 const struct pci_device_id *ent)
502 struct net_device *dev;
503 struct netdev_private *np;
505 int chip_idx = ent->driver_data;
508 void __iomem *ioaddr;
517 int phy, phy_end, phy_idx = 0;
519 /* when built into the kernel, we only print version if device is found */
521 static int printed_version;
522 if (!printed_version++)
526 if (pci_enable_device(pdev))
528 pci_set_master(pdev);
532 dev = alloc_etherdev(sizeof(*np));
535 SET_NETDEV_DEV(dev, &pdev->dev);
537 if (pci_request_regions(pdev, DRV_NAME))
540 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
544 for (i = 0; i < 3; i++)
545 ((__le16 *)dev->dev_addr)[i] =
546 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
548 np = netdev_priv(dev);
551 np->chip_id = chip_idx;
552 np->msg_enable = (1 << debug) - 1;
553 spin_lock_init(&np->lock);
554 spin_lock_init(&np->statlock);
555 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
556 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
558 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
559 &ring_dma, GFP_KERNEL);
561 goto err_out_cleardev;
562 np->tx_ring = (struct netdev_desc *)ring_space;
563 np->tx_ring_dma = ring_dma;
565 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
566 &ring_dma, GFP_KERNEL);
568 goto err_out_unmap_tx;
569 np->rx_ring = (struct netdev_desc *)ring_space;
570 np->rx_ring_dma = ring_dma;
572 np->mii_if.dev = dev;
573 np->mii_if.mdio_read = mdio_read;
574 np->mii_if.mdio_write = mdio_write;
575 np->mii_if.phy_id_mask = 0x1f;
576 np->mii_if.reg_num_mask = 0x1f;
578 /* The chip-specific entries in the device structure. */
579 dev->netdev_ops = &netdev_ops;
580 SET_ETHTOOL_OPS(dev, ðtool_ops);
581 dev->watchdog_timeo = TX_TIMEOUT;
583 pci_set_drvdata(pdev, dev);
585 i = register_netdev(dev);
587 goto err_out_unmap_rx;
589 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
590 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
593 np->phys[0] = 1; /* Default setting */
594 np->mii_preamble_required++;
597 * It seems some phys doesn't deal well with address 0 being accessed
600 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
605 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
607 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
608 int phyx = phy & 0x1f;
609 int mii_status = mdio_read(dev, phyx, MII_BMSR);
610 if (mii_status != 0xffff && mii_status != 0x0000) {
611 np->phys[phy_idx++] = phyx;
612 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
613 if ((mii_status & 0x0040) == 0)
614 np->mii_preamble_required++;
615 printk(KERN_INFO "%s: MII PHY found at address %d, status "
616 "0x%4.4x advertising %4.4x.\n",
617 dev->name, phyx, mii_status, np->mii_if.advertising);
620 np->mii_preamble_required--;
623 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
624 dev->name, ioread32(ioaddr + ASICCtrl));
625 goto err_out_unregister;
628 np->mii_if.phy_id = np->phys[0];
630 /* Parse override configuration */
632 if (card_idx < MAX_UNITS) {
633 if (media[card_idx] != NULL) {
635 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
636 strcmp (media[card_idx], "4") == 0) {
638 np->mii_if.full_duplex = 1;
639 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
640 strcmp (media[card_idx], "3") == 0) {
642 np->mii_if.full_duplex = 0;
643 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
644 strcmp (media[card_idx], "2") == 0) {
646 np->mii_if.full_duplex = 1;
647 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
648 strcmp (media[card_idx], "1") == 0) {
650 np->mii_if.full_duplex = 0;
660 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
661 /* Default 100Mbps Full */
664 np->mii_if.full_duplex = 1;
669 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
671 /* If flow control enabled, we need to advertise it.*/
673 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
674 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
675 /* Force media type */
676 if (!np->an_enable) {
678 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
679 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
680 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
681 printk (KERN_INFO "Override speed=%d, %s duplex\n",
682 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
686 /* Perhaps move the reset here? */
687 /* Reset the chip to erase previous misconfiguration. */
688 if (netif_msg_hw(np))
689 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
690 sundance_reset(dev, 0x00ff << 16);
691 if (netif_msg_hw(np))
692 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
698 unregister_netdev(dev);
700 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
701 np->rx_ring, np->rx_ring_dma);
703 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
704 np->tx_ring, np->tx_ring_dma);
706 pci_set_drvdata(pdev, NULL);
707 pci_iounmap(pdev, ioaddr);
709 pci_release_regions(pdev);
715 static int change_mtu(struct net_device *dev, int new_mtu)
717 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
719 if (netif_running(dev))
725 #define eeprom_delay(ee_addr) ioread32(ee_addr)
726 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
727 static int eeprom_read(void __iomem *ioaddr, int location)
729 int boguscnt = 10000; /* Typical 1900 ticks. */
730 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
732 eeprom_delay(ioaddr + EECtrl);
733 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
734 return ioread16(ioaddr + EEData);
736 } while (--boguscnt > 0);
740 /* MII transceiver control section.
741 Read and write the MII registers using software-generated serial
742 MDIO protocol. See the MII specifications or DP83840A data sheet
745 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
746 met by back-to-back 33Mhz PCI cycles. */
747 #define mdio_delay() ioread8(mdio_addr)
750 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
752 #define MDIO_EnbIn (0)
753 #define MDIO_WRITE0 (MDIO_EnbOutput)
754 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
756 /* Generate the preamble required for initial synchronization and
757 a few older transceivers. */
758 static void mdio_sync(void __iomem *mdio_addr)
762 /* Establish sync by sending at least 32 logic ones. */
763 while (--bits >= 0) {
764 iowrite8(MDIO_WRITE1, mdio_addr);
766 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
771 static int mdio_read(struct net_device *dev, int phy_id, int location)
773 struct netdev_private *np = netdev_priv(dev);
774 void __iomem *mdio_addr = np->base + MIICtrl;
775 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
778 if (np->mii_preamble_required)
779 mdio_sync(mdio_addr);
781 /* Shift the read command bits out. */
782 for (i = 15; i >= 0; i--) {
783 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
785 iowrite8(dataval, mdio_addr);
787 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
790 /* Read the two transition, 16 data, and wire-idle bits. */
791 for (i = 19; i > 0; i--) {
792 iowrite8(MDIO_EnbIn, mdio_addr);
794 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
795 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
798 return (retval>>1) & 0xffff;
801 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
803 struct netdev_private *np = netdev_priv(dev);
804 void __iomem *mdio_addr = np->base + MIICtrl;
805 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
808 if (np->mii_preamble_required)
809 mdio_sync(mdio_addr);
811 /* Shift the command bits out. */
812 for (i = 31; i >= 0; i--) {
813 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
815 iowrite8(dataval, mdio_addr);
817 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
820 /* Clear out extra bits. */
821 for (i = 2; i > 0; i--) {
822 iowrite8(MDIO_EnbIn, mdio_addr);
824 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
829 static int mdio_wait_link(struct net_device *dev, int wait)
833 struct netdev_private *np;
835 np = netdev_priv(dev);
836 phy_id = np->phys[0];
839 bmsr = mdio_read(dev, phy_id, MII_BMSR);
843 } while (--wait > 0);
847 static int netdev_open(struct net_device *dev)
849 struct netdev_private *np = netdev_priv(dev);
850 void __iomem *ioaddr = np->base;
851 const int irq = np->pci_dev->irq;
855 sundance_reset(dev, 0x00ff << 16);
857 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
861 if (netif_msg_ifup(np))
862 printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
866 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
867 /* The Tx list pointer is written as packets are queued. */
869 /* Initialize other registers. */
871 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
872 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
874 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
877 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
879 /* Configure the PCI bus bursts and FIFO thresholds. */
881 if (dev->if_port == 0)
882 dev->if_port = np->default_port;
884 spin_lock_init(&np->mcastlock);
887 iowrite16(0, ioaddr + IntrEnable);
888 iowrite16(0, ioaddr + DownCounter);
889 /* Set the chip to poll every N*320nsec. */
890 iowrite8(100, ioaddr + RxDMAPollPeriod);
891 iowrite8(127, ioaddr + TxDMAPollPeriod);
892 /* Fix DFE-580TX packet drop issue */
893 if (np->pci_dev->revision >= 0x14)
894 iowrite8(0x01, ioaddr + DebugCtrl1);
895 netif_start_queue(dev);
897 spin_lock_irqsave(&np->lock, flags);
899 spin_unlock_irqrestore(&np->lock, flags);
901 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
904 iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
907 if (netif_msg_ifup(np))
908 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
909 "MAC Control %x, %4.4x %4.4x.\n",
910 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
911 ioread32(ioaddr + MACCtrl0),
912 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
914 /* Set the timer to check for link beat. */
915 init_timer(&np->timer);
916 np->timer.expires = jiffies + 3*HZ;
917 np->timer.data = (unsigned long)dev;
918 np->timer.function = netdev_timer; /* timer handler */
919 add_timer(&np->timer);
921 /* Enable interrupts by setting the interrupt mask. */
922 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
927 static void check_duplex(struct net_device *dev)
929 struct netdev_private *np = netdev_priv(dev);
930 void __iomem *ioaddr = np->base;
931 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
932 int negotiated = mii_lpa & np->mii_if.advertising;
936 if (!np->an_enable || mii_lpa == 0xffff) {
937 if (np->mii_if.full_duplex)
938 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
943 /* Autonegotiation */
944 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
945 if (np->mii_if.full_duplex != duplex) {
946 np->mii_if.full_duplex = duplex;
947 if (netif_msg_link(np))
948 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
949 "negotiated capability %4.4x.\n", dev->name,
950 duplex ? "full" : "half", np->phys[0], negotiated);
951 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
955 static void netdev_timer(unsigned long data)
957 struct net_device *dev = (struct net_device *)data;
958 struct netdev_private *np = netdev_priv(dev);
959 void __iomem *ioaddr = np->base;
960 int next_tick = 10*HZ;
962 if (netif_msg_timer(np)) {
963 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
965 dev->name, ioread16(ioaddr + IntrEnable),
966 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
969 np->timer.expires = jiffies + next_tick;
970 add_timer(&np->timer);
973 static void tx_timeout(struct net_device *dev)
975 struct netdev_private *np = netdev_priv(dev);
976 void __iomem *ioaddr = np->base;
979 netif_stop_queue(dev);
980 tasklet_disable(&np->tx_tasklet);
981 iowrite16(0, ioaddr + IntrEnable);
982 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
984 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
985 ioread8(ioaddr + TxFrameId));
989 for (i=0; i<TX_RING_SIZE; i++) {
990 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
991 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
992 le32_to_cpu(np->tx_ring[i].next_desc),
993 le32_to_cpu(np->tx_ring[i].status),
994 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
995 le32_to_cpu(np->tx_ring[i].frag[0].addr),
996 le32_to_cpu(np->tx_ring[i].frag[0].length));
998 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
999 ioread32(np->base + TxListPtr),
1000 netif_queue_stopped(dev));
1001 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1002 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1003 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1004 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1005 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1007 spin_lock_irqsave(&np->lock, flag);
1009 /* Stop and restart the chip's Tx processes . */
1011 spin_unlock_irqrestore(&np->lock, flag);
1015 dev->trans_start = jiffies; /* prevent tx timeout */
1016 dev->stats.tx_errors++;
1017 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1018 netif_wake_queue(dev);
1020 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1021 tasklet_enable(&np->tx_tasklet);
1025 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1026 static void init_ring(struct net_device *dev)
1028 struct netdev_private *np = netdev_priv(dev);
1031 np->cur_rx = np->cur_tx = 0;
1032 np->dirty_rx = np->dirty_tx = 0;
1035 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1037 /* Initialize all Rx descriptors. */
1038 for (i = 0; i < RX_RING_SIZE; i++) {
1039 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1040 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1041 np->rx_ring[i].status = 0;
1042 np->rx_ring[i].frag[0].length = 0;
1043 np->rx_skbuff[i] = NULL;
1046 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1047 for (i = 0; i < RX_RING_SIZE; i++) {
1048 struct sk_buff *skb =
1049 netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1050 np->rx_skbuff[i] = skb;
1053 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1054 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1055 dma_map_single(&np->pci_dev->dev, skb->data,
1056 np->rx_buf_sz, DMA_FROM_DEVICE));
1057 if (dma_mapping_error(&np->pci_dev->dev,
1058 np->rx_ring[i].frag[0].addr)) {
1060 np->rx_skbuff[i] = NULL;
1063 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1065 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1067 for (i = 0; i < TX_RING_SIZE; i++) {
1068 np->tx_skbuff[i] = NULL;
1069 np->tx_ring[i].status = 0;
1073 static void tx_poll (unsigned long data)
1075 struct net_device *dev = (struct net_device *)data;
1076 struct netdev_private *np = netdev_priv(dev);
1077 unsigned head = np->cur_task % TX_RING_SIZE;
1078 struct netdev_desc *txdesc =
1079 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1081 /* Chain the next pointer */
1082 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1083 int entry = np->cur_task % TX_RING_SIZE;
1084 txdesc = &np->tx_ring[entry];
1086 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1087 entry*sizeof(struct netdev_desc));
1089 np->last_tx = txdesc;
1091 /* Indicate the latest descriptor of tx ring */
1092 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1094 if (ioread32 (np->base + TxListPtr) == 0)
1095 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1096 np->base + TxListPtr);
1100 start_tx (struct sk_buff *skb, struct net_device *dev)
1102 struct netdev_private *np = netdev_priv(dev);
1103 struct netdev_desc *txdesc;
1106 /* Calculate the next Tx descriptor entry. */
1107 entry = np->cur_tx % TX_RING_SIZE;
1108 np->tx_skbuff[entry] = skb;
1109 txdesc = &np->tx_ring[entry];
1111 txdesc->next_desc = 0;
1112 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1113 txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1114 skb->data, skb->len, DMA_TO_DEVICE));
1115 if (dma_mapping_error(&np->pci_dev->dev,
1116 txdesc->frag[0].addr))
1118 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1120 /* Increment cur_tx before tasklet_schedule() */
1123 /* Schedule a tx_poll() task */
1124 tasklet_schedule(&np->tx_tasklet);
1126 /* On some architectures: explicitly flush cache lines here. */
1127 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1128 !netif_queue_stopped(dev)) {
1131 netif_stop_queue (dev);
1133 if (netif_msg_tx_queued(np)) {
1135 "%s: Transmit frame #%d queued in slot %d.\n",
1136 dev->name, np->cur_tx, entry);
1138 return NETDEV_TX_OK;
1142 np->tx_skbuff[entry] = NULL;
1143 dev->stats.tx_dropped++;
1144 return NETDEV_TX_OK;
1147 /* Reset hardware tx and free all of tx buffers */
1149 reset_tx (struct net_device *dev)
1151 struct netdev_private *np = netdev_priv(dev);
1152 void __iomem *ioaddr = np->base;
1153 struct sk_buff *skb;
1156 /* Reset tx logic, TxListPtr will be cleaned */
1157 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1158 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1160 /* free all tx skbuff */
1161 for (i = 0; i < TX_RING_SIZE; i++) {
1162 np->tx_ring[i].next_desc = 0;
1164 skb = np->tx_skbuff[i];
1166 dma_unmap_single(&np->pci_dev->dev,
1167 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1168 skb->len, DMA_TO_DEVICE);
1169 dev_kfree_skb_any(skb);
1170 np->tx_skbuff[i] = NULL;
1171 dev->stats.tx_dropped++;
1174 np->cur_tx = np->dirty_tx = 0;
1178 iowrite8(127, ioaddr + TxDMAPollPeriod);
1180 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1184 /* The interrupt handler cleans up after the Tx thread,
1185 and schedule a Rx thread work */
1186 static irqreturn_t intr_handler(int irq, void *dev_instance)
1188 struct net_device *dev = (struct net_device *)dev_instance;
1189 struct netdev_private *np = netdev_priv(dev);
1190 void __iomem *ioaddr = np->base;
1199 int intr_status = ioread16(ioaddr + IntrStatus);
1200 iowrite16(intr_status, ioaddr + IntrStatus);
1202 if (netif_msg_intr(np))
1203 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1204 dev->name, intr_status);
1206 if (!(intr_status & DEFAULT_INTR))
1211 if (intr_status & (IntrRxDMADone)) {
1212 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1213 ioaddr + IntrEnable);
1215 np->budget = RX_BUDGET;
1216 tasklet_schedule(&np->rx_tasklet);
1218 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1219 tx_status = ioread16 (ioaddr + TxStatus);
1220 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1221 if (netif_msg_tx_done(np))
1223 ("%s: Transmit status is %2.2x.\n",
1224 dev->name, tx_status);
1225 if (tx_status & 0x1e) {
1226 if (netif_msg_tx_err(np))
1227 printk("%s: Transmit error status %4.4x.\n",
1228 dev->name, tx_status);
1229 dev->stats.tx_errors++;
1230 if (tx_status & 0x10)
1231 dev->stats.tx_fifo_errors++;
1232 if (tx_status & 0x08)
1233 dev->stats.collisions++;
1234 if (tx_status & 0x04)
1235 dev->stats.tx_fifo_errors++;
1236 if (tx_status & 0x02)
1237 dev->stats.tx_window_errors++;
1240 ** This reset has been verified on
1241 ** DFE-580TX boards ! phdm@macqel.be.
1243 if (tx_status & 0x10) { /* TxUnderrun */
1244 /* Restart Tx FIFO and transmitter */
1245 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1246 /* No need to reset the Tx pointer here */
1248 /* Restart the Tx. Need to make sure tx enabled */
1251 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1252 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1257 /* Yup, this is a documentation bug. It cost me *hours*. */
1258 iowrite16 (0, ioaddr + TxStatus);
1260 iowrite32(5000, ioaddr + DownCounter);
1263 tx_status = ioread16 (ioaddr + TxStatus);
1265 hw_frame_id = (tx_status >> 8) & 0xff;
1267 hw_frame_id = ioread8(ioaddr + TxFrameId);
1270 if (np->pci_dev->revision >= 0x14) {
1271 spin_lock(&np->lock);
1272 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1273 int entry = np->dirty_tx % TX_RING_SIZE;
1274 struct sk_buff *skb;
1276 sw_frame_id = (le32_to_cpu(
1277 np->tx_ring[entry].status) >> 2) & 0xff;
1278 if (sw_frame_id == hw_frame_id &&
1279 !(le32_to_cpu(np->tx_ring[entry].status)
1282 if (sw_frame_id == (hw_frame_id + 1) %
1285 skb = np->tx_skbuff[entry];
1286 /* Free the original skb. */
1287 dma_unmap_single(&np->pci_dev->dev,
1288 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1289 skb->len, DMA_TO_DEVICE);
1290 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1291 np->tx_skbuff[entry] = NULL;
1292 np->tx_ring[entry].frag[0].addr = 0;
1293 np->tx_ring[entry].frag[0].length = 0;
1295 spin_unlock(&np->lock);
1297 spin_lock(&np->lock);
1298 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1299 int entry = np->dirty_tx % TX_RING_SIZE;
1300 struct sk_buff *skb;
1301 if (!(le32_to_cpu(np->tx_ring[entry].status)
1304 skb = np->tx_skbuff[entry];
1305 /* Free the original skb. */
1306 dma_unmap_single(&np->pci_dev->dev,
1307 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1308 skb->len, DMA_TO_DEVICE);
1309 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1310 np->tx_skbuff[entry] = NULL;
1311 np->tx_ring[entry].frag[0].addr = 0;
1312 np->tx_ring[entry].frag[0].length = 0;
1314 spin_unlock(&np->lock);
1317 if (netif_queue_stopped(dev) &&
1318 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1319 /* The ring is no longer full, clear busy flag. */
1320 netif_wake_queue (dev);
1322 /* Abnormal error summary/uncommon events handlers. */
1323 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1324 netdev_error(dev, intr_status);
1326 if (netif_msg_intr(np))
1327 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1328 dev->name, ioread16(ioaddr + IntrStatus));
1329 return IRQ_RETVAL(handled);
1332 static void rx_poll(unsigned long data)
1334 struct net_device *dev = (struct net_device *)data;
1335 struct netdev_private *np = netdev_priv(dev);
1336 int entry = np->cur_rx % RX_RING_SIZE;
1337 int boguscnt = np->budget;
1338 void __iomem *ioaddr = np->base;
1341 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1343 struct netdev_desc *desc = &(np->rx_ring[entry]);
1344 u32 frame_status = le32_to_cpu(desc->status);
1347 if (--boguscnt < 0) {
1350 if (!(frame_status & DescOwn))
1352 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1353 if (netif_msg_rx_status(np))
1354 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1356 if (frame_status & 0x001f4000) {
1357 /* There was a error. */
1358 if (netif_msg_rx_err(np))
1359 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1361 dev->stats.rx_errors++;
1362 if (frame_status & 0x00100000)
1363 dev->stats.rx_length_errors++;
1364 if (frame_status & 0x00010000)
1365 dev->stats.rx_fifo_errors++;
1366 if (frame_status & 0x00060000)
1367 dev->stats.rx_frame_errors++;
1368 if (frame_status & 0x00080000)
1369 dev->stats.rx_crc_errors++;
1370 if (frame_status & 0x00100000) {
1371 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1373 dev->name, frame_status);
1376 struct sk_buff *skb;
1377 #ifndef final_version
1378 if (netif_msg_rx_status(np))
1379 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1380 ", bogus_cnt %d.\n",
1383 /* Check if the packet is long enough to accept without copying
1384 to a minimally-sized skbuff. */
1385 if (pkt_len < rx_copybreak &&
1386 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1387 skb_reserve(skb, 2); /* 16 byte align the IP header */
1388 dma_sync_single_for_cpu(&np->pci_dev->dev,
1389 le32_to_cpu(desc->frag[0].addr),
1390 np->rx_buf_sz, DMA_FROM_DEVICE);
1391 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1392 dma_sync_single_for_device(&np->pci_dev->dev,
1393 le32_to_cpu(desc->frag[0].addr),
1394 np->rx_buf_sz, DMA_FROM_DEVICE);
1395 skb_put(skb, pkt_len);
1397 dma_unmap_single(&np->pci_dev->dev,
1398 le32_to_cpu(desc->frag[0].addr),
1399 np->rx_buf_sz, DMA_FROM_DEVICE);
1400 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1401 np->rx_skbuff[entry] = NULL;
1403 skb->protocol = eth_type_trans(skb, dev);
1404 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1407 entry = (entry + 1) % RX_RING_SIZE;
1412 np->budget -= received;
1413 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1421 np->budget -= received;
1422 if (np->budget <= 0)
1423 np->budget = RX_BUDGET;
1424 tasklet_schedule(&np->rx_tasklet);
1427 static void refill_rx (struct net_device *dev)
1429 struct netdev_private *np = netdev_priv(dev);
1433 /* Refill the Rx ring buffers. */
1434 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1435 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1436 struct sk_buff *skb;
1437 entry = np->dirty_rx % RX_RING_SIZE;
1438 if (np->rx_skbuff[entry] == NULL) {
1439 skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1440 np->rx_skbuff[entry] = skb;
1442 break; /* Better luck next round. */
1443 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1444 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1445 dma_map_single(&np->pci_dev->dev, skb->data,
1446 np->rx_buf_sz, DMA_FROM_DEVICE));
1447 if (dma_mapping_error(&np->pci_dev->dev,
1448 np->rx_ring[entry].frag[0].addr)) {
1449 dev_kfree_skb_irq(skb);
1450 np->rx_skbuff[entry] = NULL;
1454 /* Perhaps we need not reset this field. */
1455 np->rx_ring[entry].frag[0].length =
1456 cpu_to_le32(np->rx_buf_sz | LastFrag);
1457 np->rx_ring[entry].status = 0;
1461 static void netdev_error(struct net_device *dev, int intr_status)
1463 struct netdev_private *np = netdev_priv(dev);
1464 void __iomem *ioaddr = np->base;
1465 u16 mii_ctl, mii_advertise, mii_lpa;
1468 if (intr_status & LinkChange) {
1469 if (mdio_wait_link(dev, 10) == 0) {
1470 printk(KERN_INFO "%s: Link up\n", dev->name);
1471 if (np->an_enable) {
1472 mii_advertise = mdio_read(dev, np->phys[0],
1474 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1475 mii_advertise &= mii_lpa;
1476 printk(KERN_INFO "%s: Link changed: ",
1478 if (mii_advertise & ADVERTISE_100FULL) {
1480 printk("100Mbps, full duplex\n");
1481 } else if (mii_advertise & ADVERTISE_100HALF) {
1483 printk("100Mbps, half duplex\n");
1484 } else if (mii_advertise & ADVERTISE_10FULL) {
1486 printk("10Mbps, full duplex\n");
1487 } else if (mii_advertise & ADVERTISE_10HALF) {
1489 printk("10Mbps, half duplex\n");
1494 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1495 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1497 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1499 printk("%s duplex.\n",
1500 (mii_ctl & BMCR_FULLDPLX) ?
1504 if (np->flowctrl && np->mii_if.full_duplex) {
1505 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1506 ioaddr + MulticastFilter1+2);
1507 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1510 netif_carrier_on(dev);
1512 printk(KERN_INFO "%s: Link down\n", dev->name);
1513 netif_carrier_off(dev);
1516 if (intr_status & StatsMax) {
1519 if (intr_status & IntrPCIErr) {
1520 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1521 dev->name, intr_status);
1522 /* We must do a global reset of DMA to continue. */
1526 static struct net_device_stats *get_stats(struct net_device *dev)
1528 struct netdev_private *np = netdev_priv(dev);
1529 void __iomem *ioaddr = np->base;
1530 unsigned long flags;
1531 u8 late_coll, single_coll, mult_coll;
1533 spin_lock_irqsave(&np->statlock, flags);
1534 /* The chip only need report frame silently dropped. */
1535 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1536 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1537 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1538 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1540 mult_coll = ioread8(ioaddr + StatsMultiColl);
1541 np->xstats.tx_multiple_collisions += mult_coll;
1542 single_coll = ioread8(ioaddr + StatsOneColl);
1543 np->xstats.tx_single_collisions += single_coll;
1544 late_coll = ioread8(ioaddr + StatsLateColl);
1545 np->xstats.tx_late_collisions += late_coll;
1546 dev->stats.collisions += mult_coll
1550 np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1551 np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1552 np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1553 np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1554 np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1555 np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1556 np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1558 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1559 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1560 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1561 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1563 spin_unlock_irqrestore(&np->statlock, flags);
1568 static void set_rx_mode(struct net_device *dev)
1570 struct netdev_private *np = netdev_priv(dev);
1571 void __iomem *ioaddr = np->base;
1572 u16 mc_filter[4]; /* Multicast hash filter */
1576 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1577 memset(mc_filter, 0xff, sizeof(mc_filter));
1578 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1579 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1580 (dev->flags & IFF_ALLMULTI)) {
1581 /* Too many to match, or accept all multicasts. */
1582 memset(mc_filter, 0xff, sizeof(mc_filter));
1583 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1584 } else if (!netdev_mc_empty(dev)) {
1585 struct netdev_hw_addr *ha;
1589 memset (mc_filter, 0, sizeof (mc_filter));
1590 netdev_for_each_mc_addr(ha, dev) {
1591 crc = ether_crc_le(ETH_ALEN, ha->addr);
1592 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1593 if (crc & 0x80000000) index |= 1 << bit;
1594 mc_filter[index/16] |= (1 << (index % 16));
1596 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1598 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1601 if (np->mii_if.full_duplex && np->flowctrl)
1602 mc_filter[3] |= 0x0200;
1604 for (i = 0; i < 4; i++)
1605 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1606 iowrite8(rx_mode, ioaddr + RxMode);
1609 static int __set_mac_addr(struct net_device *dev)
1611 struct netdev_private *np = netdev_priv(dev);
1614 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1615 iowrite16(addr16, np->base + StationAddr);
1616 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1617 iowrite16(addr16, np->base + StationAddr+2);
1618 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1619 iowrite16(addr16, np->base + StationAddr+4);
1623 /* Invoked with rtnl_lock held */
1624 static int sundance_set_mac_addr(struct net_device *dev, void *data)
1626 const struct sockaddr *addr = data;
1628 if (!is_valid_ether_addr(addr->sa_data))
1629 return -EADDRNOTAVAIL;
1630 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1631 __set_mac_addr(dev);
1636 static const struct {
1637 const char name[ETH_GSTRING_LEN];
1638 } sundance_stats[] = {
1639 { "tx_multiple_collisions" },
1640 { "tx_single_collisions" },
1641 { "tx_late_collisions" },
1643 { "tx_deferred_excessive" },
1651 static int check_if_running(struct net_device *dev)
1653 if (!netif_running(dev))
1658 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1660 struct netdev_private *np = netdev_priv(dev);
1661 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1662 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1663 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1666 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1668 struct netdev_private *np = netdev_priv(dev);
1669 spin_lock_irq(&np->lock);
1670 mii_ethtool_gset(&np->mii_if, ecmd);
1671 spin_unlock_irq(&np->lock);
1675 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1677 struct netdev_private *np = netdev_priv(dev);
1679 spin_lock_irq(&np->lock);
1680 res = mii_ethtool_sset(&np->mii_if, ecmd);
1681 spin_unlock_irq(&np->lock);
1685 static int nway_reset(struct net_device *dev)
1687 struct netdev_private *np = netdev_priv(dev);
1688 return mii_nway_restart(&np->mii_if);
1691 static u32 get_link(struct net_device *dev)
1693 struct netdev_private *np = netdev_priv(dev);
1694 return mii_link_ok(&np->mii_if);
1697 static u32 get_msglevel(struct net_device *dev)
1699 struct netdev_private *np = netdev_priv(dev);
1700 return np->msg_enable;
1703 static void set_msglevel(struct net_device *dev, u32 val)
1705 struct netdev_private *np = netdev_priv(dev);
1706 np->msg_enable = val;
1709 static void get_strings(struct net_device *dev, u32 stringset,
1712 if (stringset == ETH_SS_STATS)
1713 memcpy(data, sundance_stats, sizeof(sundance_stats));
1716 static int get_sset_count(struct net_device *dev, int sset)
1720 return ARRAY_SIZE(sundance_stats);
1726 static void get_ethtool_stats(struct net_device *dev,
1727 struct ethtool_stats *stats, u64 *data)
1729 struct netdev_private *np = netdev_priv(dev);
1733 data[i++] = np->xstats.tx_multiple_collisions;
1734 data[i++] = np->xstats.tx_single_collisions;
1735 data[i++] = np->xstats.tx_late_collisions;
1736 data[i++] = np->xstats.tx_deferred;
1737 data[i++] = np->xstats.tx_deferred_excessive;
1738 data[i++] = np->xstats.tx_aborted;
1739 data[i++] = np->xstats.tx_bcasts;
1740 data[i++] = np->xstats.rx_bcasts;
1741 data[i++] = np->xstats.tx_mcasts;
1742 data[i++] = np->xstats.rx_mcasts;
1747 static void sundance_get_wol(struct net_device *dev,
1748 struct ethtool_wolinfo *wol)
1750 struct netdev_private *np = netdev_priv(dev);
1751 void __iomem *ioaddr = np->base;
1756 wol->supported = (WAKE_PHY | WAKE_MAGIC);
1757 if (!np->wol_enabled)
1760 wol_bits = ioread8(ioaddr + WakeEvent);
1761 if (wol_bits & MagicPktEnable)
1762 wol->wolopts |= WAKE_MAGIC;
1763 if (wol_bits & LinkEventEnable)
1764 wol->wolopts |= WAKE_PHY;
1767 static int sundance_set_wol(struct net_device *dev,
1768 struct ethtool_wolinfo *wol)
1770 struct netdev_private *np = netdev_priv(dev);
1771 void __iomem *ioaddr = np->base;
1774 if (!device_can_wakeup(&np->pci_dev->dev))
1777 np->wol_enabled = !!(wol->wolopts);
1778 wol_bits = ioread8(ioaddr + WakeEvent);
1779 wol_bits &= ~(WakePktEnable | MagicPktEnable |
1780 LinkEventEnable | WolEnable);
1782 if (np->wol_enabled) {
1783 if (wol->wolopts & WAKE_MAGIC)
1784 wol_bits |= (MagicPktEnable | WolEnable);
1785 if (wol->wolopts & WAKE_PHY)
1786 wol_bits |= (LinkEventEnable | WolEnable);
1788 iowrite8(wol_bits, ioaddr + WakeEvent);
1790 device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1795 #define sundance_get_wol NULL
1796 #define sundance_set_wol NULL
1797 #endif /* CONFIG_PM */
1799 static const struct ethtool_ops ethtool_ops = {
1800 .begin = check_if_running,
1801 .get_drvinfo = get_drvinfo,
1802 .get_settings = get_settings,
1803 .set_settings = set_settings,
1804 .nway_reset = nway_reset,
1805 .get_link = get_link,
1806 .get_wol = sundance_get_wol,
1807 .set_wol = sundance_set_wol,
1808 .get_msglevel = get_msglevel,
1809 .set_msglevel = set_msglevel,
1810 .get_strings = get_strings,
1811 .get_sset_count = get_sset_count,
1812 .get_ethtool_stats = get_ethtool_stats,
1815 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1817 struct netdev_private *np = netdev_priv(dev);
1820 if (!netif_running(dev))
1823 spin_lock_irq(&np->lock);
1824 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1825 spin_unlock_irq(&np->lock);
1830 static int netdev_close(struct net_device *dev)
1832 struct netdev_private *np = netdev_priv(dev);
1833 void __iomem *ioaddr = np->base;
1834 struct sk_buff *skb;
1837 /* Wait and kill tasklet */
1838 tasklet_kill(&np->rx_tasklet);
1839 tasklet_kill(&np->tx_tasklet);
1845 netif_stop_queue(dev);
1847 if (netif_msg_ifdown(np)) {
1848 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1849 "Rx %4.4x Int %2.2x.\n",
1850 dev->name, ioread8(ioaddr + TxStatus),
1851 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1852 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1853 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1856 /* Disable interrupts by clearing the interrupt mask. */
1857 iowrite16(0x0000, ioaddr + IntrEnable);
1859 /* Disable Rx and Tx DMA for safely release resource */
1860 iowrite32(0x500, ioaddr + DMACtrl);
1862 /* Stop the chip's Tx and Rx processes. */
1863 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1865 for (i = 2000; i > 0; i--) {
1866 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1871 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1872 ioaddr + ASIC_HI_WORD(ASICCtrl));
1874 for (i = 2000; i > 0; i--) {
1875 if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1881 if (netif_msg_hw(np)) {
1882 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1883 (int)(np->tx_ring_dma));
1884 for (i = 0; i < TX_RING_SIZE; i++)
1885 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1886 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1887 np->tx_ring[i].frag[0].length);
1888 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1889 (int)(np->rx_ring_dma));
1890 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1891 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1892 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1893 np->rx_ring[i].frag[0].length);
1896 #endif /* __i386__ debugging only */
1898 free_irq(np->pci_dev->irq, dev);
1900 del_timer_sync(&np->timer);
1902 /* Free all the skbuffs in the Rx queue. */
1903 for (i = 0; i < RX_RING_SIZE; i++) {
1904 np->rx_ring[i].status = 0;
1905 skb = np->rx_skbuff[i];
1907 dma_unmap_single(&np->pci_dev->dev,
1908 le32_to_cpu(np->rx_ring[i].frag[0].addr),
1909 np->rx_buf_sz, DMA_FROM_DEVICE);
1911 np->rx_skbuff[i] = NULL;
1913 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1915 for (i = 0; i < TX_RING_SIZE; i++) {
1916 np->tx_ring[i].next_desc = 0;
1917 skb = np->tx_skbuff[i];
1919 dma_unmap_single(&np->pci_dev->dev,
1920 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1921 skb->len, DMA_TO_DEVICE);
1923 np->tx_skbuff[i] = NULL;
1930 static void sundance_remove1(struct pci_dev *pdev)
1932 struct net_device *dev = pci_get_drvdata(pdev);
1935 struct netdev_private *np = netdev_priv(dev);
1936 unregister_netdev(dev);
1937 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1938 np->rx_ring, np->rx_ring_dma);
1939 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1940 np->tx_ring, np->tx_ring_dma);
1941 pci_iounmap(pdev, np->base);
1942 pci_release_regions(pdev);
1944 pci_set_drvdata(pdev, NULL);
1950 static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1952 struct net_device *dev = pci_get_drvdata(pci_dev);
1953 struct netdev_private *np = netdev_priv(dev);
1954 void __iomem *ioaddr = np->base;
1956 if (!netif_running(dev))
1960 netif_device_detach(dev);
1962 pci_save_state(pci_dev);
1963 if (np->wol_enabled) {
1964 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1965 iowrite16(RxEnable, ioaddr + MACCtrl1);
1967 pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1969 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1974 static int sundance_resume(struct pci_dev *pci_dev)
1976 struct net_device *dev = pci_get_drvdata(pci_dev);
1979 if (!netif_running(dev))
1982 pci_set_power_state(pci_dev, PCI_D0);
1983 pci_restore_state(pci_dev);
1984 pci_enable_wake(pci_dev, PCI_D0, 0);
1986 err = netdev_open(dev);
1988 printk(KERN_ERR "%s: Can't resume interface!\n",
1993 netif_device_attach(dev);
1999 #endif /* CONFIG_PM */
2001 static struct pci_driver sundance_driver = {
2003 .id_table = sundance_pci_tbl,
2004 .probe = sundance_probe1,
2005 .remove = sundance_remove1,
2007 .suspend = sundance_suspend,
2008 .resume = sundance_resume,
2009 #endif /* CONFIG_PM */
2012 static int __init sundance_init(void)
2014 /* when a module, this is printed whether or not devices are found in probe */
2018 return pci_register_driver(&sundance_driver);
2021 static void __exit sundance_exit(void)
2023 pci_unregister_driver(&sundance_driver);
2026 module_init(sundance_init);
2027 module_exit(sundance_exit);