1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
30 /* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
41 static int rx_copybreak;
42 static int flowctrl=1;
44 /* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
57 static char *media[MAX_UNITS];
60 /* Operational parameters that are set at compile time. */
62 /* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
67 Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h> /* Processor type for cache alignment. */
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #ifndef _COMPAT_WITH_OLD_KERNEL
100 #include <linux/crc32.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
110 /* These identify the driver base version and may not be removed. */
111 static const char version[] __devinitconst =
112 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
113 " Written by Donald Becker\n";
115 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
116 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
117 MODULE_LICENSE("GPL");
119 module_param(debug, int, 0);
120 module_param(rx_copybreak, int, 0);
121 module_param_array(media, charp, NULL, 0);
122 module_param(flowctrl, int, 0);
123 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
124 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
125 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
130 I. Board Compatibility
132 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
134 II. Board-specific settings
136 III. Driver operation
140 This driver uses two statically allocated fixed-size descriptor lists
141 formed into rings by a branch from the final descriptor to the beginning of
142 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
143 Some chips explicitly use only 2^N sized rings, while others use a
144 'next descriptor' pointer that the driver forms into rings.
146 IIIb/c. Transmit/Receive Structure
148 This driver uses a zero-copy receive and transmit scheme.
149 The driver allocates full frame size skbuffs for the Rx ring buffers at
150 open() time and passes the skb->data field to the chip as receive data
151 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
152 a fresh skbuff is allocated and the frame is copied to the new skbuff.
153 When the incoming frame is larger, the skbuff is passed directly up the
154 protocol stack. Buffers consumed this way are replaced by newly allocated
155 skbuffs in a later phase of receives.
157 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
158 using a full-sized skbuff for small frames vs. the copying costs of larger
159 frames. New boards are typically used in generously configured machines
160 and the underfilled buffers have negligible impact compared to the benefit of
161 a single allocation size, so the default value of zero results in never
162 copying packets. When copying is done, the cost is usually mitigated by using
163 a combined copy/checksum routine. Copying also preloads the cache, which is
164 most useful with small frames.
166 A subtle aspect of the operation is that the IP header at offset 14 in an
167 ethernet frame isn't longword aligned for further processing.
168 Unaligned buffers are permitted by the Sundance hardware, so
169 frames are received into the skbuff at an offset of "+2", 16-byte aligning
172 IIId. Synchronization
174 The driver runs as two independent, single-threaded flows of control. One
175 is the send-packet routine, which enforces single-threaded use by the
176 dev->tbusy flag. The other thread is the interrupt handler, which is single
177 threaded by the hardware and interrupt handling software.
179 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
180 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
181 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
182 the 'lp->tx_full' flag.
184 The interrupt handler has exclusive control over the Rx ring and records stats
185 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
186 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
187 clears both the tx_full and tbusy flags.
193 The Sundance ST201 datasheet, preliminary version.
194 The Kendin KS8723 datasheet, preliminary version.
195 The ICplus IP100 datasheet, preliminary version.
196 http://www.scyld.com/expert/100mbps.html
197 http://www.scyld.com/expert/NWay.html
203 /* Work-around for Kendin chip bugs. */
204 #ifndef CONFIG_SUNDANCE_MMIO
208 static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
209 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
210 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
211 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
212 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
213 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
214 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
215 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
218 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
227 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
228 {"D-Link DFE-550TX FAST Ethernet Adapter"},
229 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
230 {"D-Link DFE-580TX 4 port Server Adapter"},
231 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
232 {"D-Link DL10050-based FAST Ethernet Adapter"},
233 {"Sundance Technology Alta"},
234 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
235 { } /* terminate list. */
238 /* This driver was written to use PCI memory space, however x86-oriented
239 hardware often uses I/O space accesses. */
241 /* Offsets to the device registers.
242 Unlike software-only systems, device drivers interact with complex hardware.
243 It's not useful to define symbolic names for every register bit in the
244 device. The name can only partially document the semantics and make
245 the driver longer and more difficult to read.
246 In general, only the important configuration values or bits changed
247 multiple times should be defined symbolically.
252 TxDMABurstThresh = 0x08,
253 TxDMAUrgentThresh = 0x09,
254 TxDMAPollPeriod = 0x0a,
259 RxDMABurstThresh = 0x14,
260 RxDMAUrgentThresh = 0x15,
261 RxDMAPollPeriod = 0x16,
280 MulticastFilter0 = 0x60,
281 MulticastFilter1 = 0x64,
288 StatsCarrierError = 0x74,
289 StatsLateColl = 0x75,
290 StatsMultiColl = 0x76,
294 StatsTxXSDefer = 0x7a,
300 /* Aliased and bogus values! */
303 enum ASICCtrl_HiWord_bit {
304 GlobalReset = 0x0001,
309 NetworkReset = 0x0020,
314 /* Bits in the interrupt status/mask registers. */
315 enum intr_status_bits {
316 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
317 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
319 StatsMax=0x0080, LinkChange=0x0100,
320 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
323 /* Bits in the RxMode register. */
325 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
326 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
328 /* Bits in MACCtrl. */
329 enum mac_ctrl0_bits {
330 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
331 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
333 enum mac_ctrl1_bits {
334 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
335 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
336 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
339 /* The Rx and Tx buffer descriptors. */
340 /* Note that using only 32 bit fields simplifies conversion to big-endian
345 struct desc_frag { __le32 addr, length; } frag[1];
348 /* Bits in netdev_desc.status */
349 enum desc_status_bits {
351 DescEndPacket=0x4000,
355 DescIntrOnDMADone=0x80000000,
356 DisableAlign = 0x00000001,
359 #define PRIV_ALIGN 15 /* Required alignment mask */
360 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
361 within the structure. */
363 struct netdev_private {
364 /* Descriptor rings first for alignment. */
365 struct netdev_desc *rx_ring;
366 struct netdev_desc *tx_ring;
367 struct sk_buff* rx_skbuff[RX_RING_SIZE];
368 struct sk_buff* tx_skbuff[TX_RING_SIZE];
369 dma_addr_t tx_ring_dma;
370 dma_addr_t rx_ring_dma;
371 struct timer_list timer; /* Media monitoring timer. */
372 /* Frequently used values: keep some adjacent for cache effect. */
374 spinlock_t rx_lock; /* Group with Tx control cache line. */
377 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
378 unsigned int rx_buf_sz; /* Based on MTU+slack. */
379 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
380 unsigned int cur_tx, dirty_tx;
381 /* These values are keep track of the transceiver/media in use. */
382 unsigned int flowctrl:1;
383 unsigned int default_port:4; /* Last dev->if_port value. */
384 unsigned int an_enable:1;
386 struct tasklet_struct rx_tasklet;
387 struct tasklet_struct tx_tasklet;
390 /* Multicast and receive mode. */
391 spinlock_t mcastlock; /* SMP lock multicast updates. */
393 /* MII transceiver section. */
394 struct mii_if_info mii_if;
395 int mii_preamble_required;
396 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
397 struct pci_dev *pci_dev;
401 /* The station address location in the EEPROM. */
402 #define EEPROM_SA_OFFSET 0x10
403 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
404 IntrDrvRqst | IntrTxDone | StatsMax | \
407 static int change_mtu(struct net_device *dev, int new_mtu);
408 static int eeprom_read(void __iomem *ioaddr, int location);
409 static int mdio_read(struct net_device *dev, int phy_id, int location);
410 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
411 static int mdio_wait_link(struct net_device *dev, int wait);
412 static int netdev_open(struct net_device *dev);
413 static void check_duplex(struct net_device *dev);
414 static void netdev_timer(unsigned long data);
415 static void tx_timeout(struct net_device *dev);
416 static void init_ring(struct net_device *dev);
417 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
418 static int reset_tx (struct net_device *dev);
419 static irqreturn_t intr_handler(int irq, void *dev_instance);
420 static void rx_poll(unsigned long data);
421 static void tx_poll(unsigned long data);
422 static void refill_rx (struct net_device *dev);
423 static void netdev_error(struct net_device *dev, int intr_status);
424 static void netdev_error(struct net_device *dev, int intr_status);
425 static void set_rx_mode(struct net_device *dev);
426 static int __set_mac_addr(struct net_device *dev);
427 static struct net_device_stats *get_stats(struct net_device *dev);
428 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
429 static int netdev_close(struct net_device *dev);
430 static const struct ethtool_ops ethtool_ops;
432 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
434 struct netdev_private *np = netdev_priv(dev);
435 void __iomem *ioaddr = np->base + ASICCtrl;
438 /* ST201 documentation states ASICCtrl is a 32bit register */
439 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
440 /* ST201 documentation states reset can take up to 1 ms */
442 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
443 if (--countdown == 0) {
444 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
451 static const struct net_device_ops netdev_ops = {
452 .ndo_open = netdev_open,
453 .ndo_stop = netdev_close,
454 .ndo_start_xmit = start_tx,
455 .ndo_get_stats = get_stats,
456 .ndo_set_multicast_list = set_rx_mode,
457 .ndo_do_ioctl = netdev_ioctl,
458 .ndo_tx_timeout = tx_timeout,
459 .ndo_change_mtu = change_mtu,
460 .ndo_set_mac_address = eth_mac_addr,
461 .ndo_validate_addr = eth_validate_addr,
464 static int __devinit sundance_probe1 (struct pci_dev *pdev,
465 const struct pci_device_id *ent)
467 struct net_device *dev;
468 struct netdev_private *np;
470 int chip_idx = ent->driver_data;
473 void __iomem *ioaddr;
482 int phy, phy_end, phy_idx = 0;
484 /* when built into the kernel, we only print version if device is found */
486 static int printed_version;
487 if (!printed_version++)
491 if (pci_enable_device(pdev))
493 pci_set_master(pdev);
497 dev = alloc_etherdev(sizeof(*np));
500 SET_NETDEV_DEV(dev, &pdev->dev);
502 if (pci_request_regions(pdev, DRV_NAME))
505 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
509 for (i = 0; i < 3; i++)
510 ((__le16 *)dev->dev_addr)[i] =
511 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
512 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
514 dev->base_addr = (unsigned long)ioaddr;
517 np = netdev_priv(dev);
520 np->chip_id = chip_idx;
521 np->msg_enable = (1 << debug) - 1;
522 spin_lock_init(&np->lock);
523 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
524 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
526 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
528 goto err_out_cleardev;
529 np->tx_ring = (struct netdev_desc *)ring_space;
530 np->tx_ring_dma = ring_dma;
532 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
534 goto err_out_unmap_tx;
535 np->rx_ring = (struct netdev_desc *)ring_space;
536 np->rx_ring_dma = ring_dma;
538 np->mii_if.dev = dev;
539 np->mii_if.mdio_read = mdio_read;
540 np->mii_if.mdio_write = mdio_write;
541 np->mii_if.phy_id_mask = 0x1f;
542 np->mii_if.reg_num_mask = 0x1f;
544 /* The chip-specific entries in the device structure. */
545 dev->netdev_ops = &netdev_ops;
546 SET_ETHTOOL_OPS(dev, ðtool_ops);
547 dev->watchdog_timeo = TX_TIMEOUT;
549 pci_set_drvdata(pdev, dev);
551 i = register_netdev(dev);
553 goto err_out_unmap_rx;
555 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
556 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
559 np->phys[0] = 1; /* Default setting */
560 np->mii_preamble_required++;
563 * It seems some phys doesn't deal well with address 0 being accessed
566 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
571 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
573 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
574 int phyx = phy & 0x1f;
575 int mii_status = mdio_read(dev, phyx, MII_BMSR);
576 if (mii_status != 0xffff && mii_status != 0x0000) {
577 np->phys[phy_idx++] = phyx;
578 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
579 if ((mii_status & 0x0040) == 0)
580 np->mii_preamble_required++;
581 printk(KERN_INFO "%s: MII PHY found at address %d, status "
582 "0x%4.4x advertising %4.4x.\n",
583 dev->name, phyx, mii_status, np->mii_if.advertising);
586 np->mii_preamble_required--;
589 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
590 dev->name, ioread32(ioaddr + ASICCtrl));
591 goto err_out_unregister;
594 np->mii_if.phy_id = np->phys[0];
596 /* Parse override configuration */
598 if (card_idx < MAX_UNITS) {
599 if (media[card_idx] != NULL) {
601 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
602 strcmp (media[card_idx], "4") == 0) {
604 np->mii_if.full_duplex = 1;
605 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
606 strcmp (media[card_idx], "3") == 0) {
608 np->mii_if.full_duplex = 0;
609 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
610 strcmp (media[card_idx], "2") == 0) {
612 np->mii_if.full_duplex = 1;
613 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
614 strcmp (media[card_idx], "1") == 0) {
616 np->mii_if.full_duplex = 0;
626 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
627 /* Default 100Mbps Full */
630 np->mii_if.full_duplex = 1;
635 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
637 /* If flow control enabled, we need to advertise it.*/
639 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
640 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
641 /* Force media type */
642 if (!np->an_enable) {
644 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
645 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
646 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
647 printk (KERN_INFO "Override speed=%d, %s duplex\n",
648 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
652 /* Perhaps move the reset here? */
653 /* Reset the chip to erase previous misconfiguration. */
654 if (netif_msg_hw(np))
655 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
656 sundance_reset(dev, 0x00ff << 16);
657 if (netif_msg_hw(np))
658 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
664 unregister_netdev(dev);
666 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
668 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
670 pci_set_drvdata(pdev, NULL);
671 pci_iounmap(pdev, ioaddr);
673 pci_release_regions(pdev);
679 static int change_mtu(struct net_device *dev, int new_mtu)
681 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
683 if (netif_running(dev))
689 #define eeprom_delay(ee_addr) ioread32(ee_addr)
690 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
691 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
693 int boguscnt = 10000; /* Typical 1900 ticks. */
694 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
696 eeprom_delay(ioaddr + EECtrl);
697 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
698 return ioread16(ioaddr + EEData);
700 } while (--boguscnt > 0);
704 /* MII transceiver control section.
705 Read and write the MII registers using software-generated serial
706 MDIO protocol. See the MII specifications or DP83840A data sheet
709 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
710 met by back-to-back 33Mhz PCI cycles. */
711 #define mdio_delay() ioread8(mdio_addr)
714 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
716 #define MDIO_EnbIn (0)
717 #define MDIO_WRITE0 (MDIO_EnbOutput)
718 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
720 /* Generate the preamble required for initial synchronization and
721 a few older transceivers. */
722 static void mdio_sync(void __iomem *mdio_addr)
726 /* Establish sync by sending at least 32 logic ones. */
727 while (--bits >= 0) {
728 iowrite8(MDIO_WRITE1, mdio_addr);
730 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
735 static int mdio_read(struct net_device *dev, int phy_id, int location)
737 struct netdev_private *np = netdev_priv(dev);
738 void __iomem *mdio_addr = np->base + MIICtrl;
739 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
742 if (np->mii_preamble_required)
743 mdio_sync(mdio_addr);
745 /* Shift the read command bits out. */
746 for (i = 15; i >= 0; i--) {
747 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
749 iowrite8(dataval, mdio_addr);
751 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
754 /* Read the two transition, 16 data, and wire-idle bits. */
755 for (i = 19; i > 0; i--) {
756 iowrite8(MDIO_EnbIn, mdio_addr);
758 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
759 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
762 return (retval>>1) & 0xffff;
765 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
767 struct netdev_private *np = netdev_priv(dev);
768 void __iomem *mdio_addr = np->base + MIICtrl;
769 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
772 if (np->mii_preamble_required)
773 mdio_sync(mdio_addr);
775 /* Shift the command bits out. */
776 for (i = 31; i >= 0; i--) {
777 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
779 iowrite8(dataval, mdio_addr);
781 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
784 /* Clear out extra bits. */
785 for (i = 2; i > 0; i--) {
786 iowrite8(MDIO_EnbIn, mdio_addr);
788 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
793 static int mdio_wait_link(struct net_device *dev, int wait)
797 struct netdev_private *np;
799 np = netdev_priv(dev);
800 phy_id = np->phys[0];
803 bmsr = mdio_read(dev, phy_id, MII_BMSR);
807 } while (--wait > 0);
811 static int netdev_open(struct net_device *dev)
813 struct netdev_private *np = netdev_priv(dev);
814 void __iomem *ioaddr = np->base;
818 /* Do we need to reset the chip??? */
820 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
824 if (netif_msg_ifup(np))
825 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
826 dev->name, dev->irq);
829 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
830 /* The Tx list pointer is written as packets are queued. */
832 /* Initialize other registers. */
834 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
835 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
837 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
840 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
842 /* Configure the PCI bus bursts and FIFO thresholds. */
844 if (dev->if_port == 0)
845 dev->if_port = np->default_port;
847 spin_lock_init(&np->mcastlock);
850 iowrite16(0, ioaddr + IntrEnable);
851 iowrite16(0, ioaddr + DownCounter);
852 /* Set the chip to poll every N*320nsec. */
853 iowrite8(100, ioaddr + RxDMAPollPeriod);
854 iowrite8(127, ioaddr + TxDMAPollPeriod);
855 /* Fix DFE-580TX packet drop issue */
856 if (np->pci_dev->revision >= 0x14)
857 iowrite8(0x01, ioaddr + DebugCtrl1);
858 netif_start_queue(dev);
860 spin_lock_irqsave(&np->lock, flags);
862 spin_unlock_irqrestore(&np->lock, flags);
864 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
866 if (netif_msg_ifup(np))
867 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
868 "MAC Control %x, %4.4x %4.4x.\n",
869 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
870 ioread32(ioaddr + MACCtrl0),
871 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
873 /* Set the timer to check for link beat. */
874 init_timer(&np->timer);
875 np->timer.expires = jiffies + 3*HZ;
876 np->timer.data = (unsigned long)dev;
877 np->timer.function = &netdev_timer; /* timer handler */
878 add_timer(&np->timer);
880 /* Enable interrupts by setting the interrupt mask. */
881 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
886 static void check_duplex(struct net_device *dev)
888 struct netdev_private *np = netdev_priv(dev);
889 void __iomem *ioaddr = np->base;
890 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
891 int negotiated = mii_lpa & np->mii_if.advertising;
895 if (!np->an_enable || mii_lpa == 0xffff) {
896 if (np->mii_if.full_duplex)
897 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
902 /* Autonegotiation */
903 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
904 if (np->mii_if.full_duplex != duplex) {
905 np->mii_if.full_duplex = duplex;
906 if (netif_msg_link(np))
907 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
908 "negotiated capability %4.4x.\n", dev->name,
909 duplex ? "full" : "half", np->phys[0], negotiated);
910 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
914 static void netdev_timer(unsigned long data)
916 struct net_device *dev = (struct net_device *)data;
917 struct netdev_private *np = netdev_priv(dev);
918 void __iomem *ioaddr = np->base;
919 int next_tick = 10*HZ;
921 if (netif_msg_timer(np)) {
922 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
924 dev->name, ioread16(ioaddr + IntrEnable),
925 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
928 np->timer.expires = jiffies + next_tick;
929 add_timer(&np->timer);
932 static void tx_timeout(struct net_device *dev)
934 struct netdev_private *np = netdev_priv(dev);
935 void __iomem *ioaddr = np->base;
938 netif_stop_queue(dev);
939 tasklet_disable(&np->tx_tasklet);
940 iowrite16(0, ioaddr + IntrEnable);
941 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
943 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
944 ioread8(ioaddr + TxFrameId));
948 for (i=0; i<TX_RING_SIZE; i++) {
949 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
950 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
951 le32_to_cpu(np->tx_ring[i].next_desc),
952 le32_to_cpu(np->tx_ring[i].status),
953 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
954 le32_to_cpu(np->tx_ring[i].frag[0].addr),
955 le32_to_cpu(np->tx_ring[i].frag[0].length));
957 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
958 ioread32(np->base + TxListPtr),
959 netif_queue_stopped(dev));
960 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
961 np->cur_tx, np->cur_tx % TX_RING_SIZE,
962 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
963 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
964 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
966 spin_lock_irqsave(&np->lock, flag);
968 /* Stop and restart the chip's Tx processes . */
970 spin_unlock_irqrestore(&np->lock, flag);
974 dev->trans_start = jiffies; /* prevent tx timeout */
975 dev->stats.tx_errors++;
976 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
977 netif_wake_queue(dev);
979 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
980 tasklet_enable(&np->tx_tasklet);
984 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
985 static void init_ring(struct net_device *dev)
987 struct netdev_private *np = netdev_priv(dev);
990 np->cur_rx = np->cur_tx = 0;
991 np->dirty_rx = np->dirty_tx = 0;
994 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
996 /* Initialize all Rx descriptors. */
997 for (i = 0; i < RX_RING_SIZE; i++) {
998 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
999 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1000 np->rx_ring[i].status = 0;
1001 np->rx_ring[i].frag[0].length = 0;
1002 np->rx_skbuff[i] = NULL;
1005 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1006 for (i = 0; i < RX_RING_SIZE; i++) {
1007 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1008 np->rx_skbuff[i] = skb;
1011 skb->dev = dev; /* Mark as being used by this device. */
1012 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1013 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1014 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1015 PCI_DMA_FROMDEVICE));
1016 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1018 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1020 for (i = 0; i < TX_RING_SIZE; i++) {
1021 np->tx_skbuff[i] = NULL;
1022 np->tx_ring[i].status = 0;
1026 static void tx_poll (unsigned long data)
1028 struct net_device *dev = (struct net_device *)data;
1029 struct netdev_private *np = netdev_priv(dev);
1030 unsigned head = np->cur_task % TX_RING_SIZE;
1031 struct netdev_desc *txdesc =
1032 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1034 /* Chain the next pointer */
1035 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1036 int entry = np->cur_task % TX_RING_SIZE;
1037 txdesc = &np->tx_ring[entry];
1039 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1040 entry*sizeof(struct netdev_desc));
1042 np->last_tx = txdesc;
1044 /* Indicate the latest descriptor of tx ring */
1045 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1047 if (ioread32 (np->base + TxListPtr) == 0)
1048 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1049 np->base + TxListPtr);
1053 start_tx (struct sk_buff *skb, struct net_device *dev)
1055 struct netdev_private *np = netdev_priv(dev);
1056 struct netdev_desc *txdesc;
1059 /* Calculate the next Tx descriptor entry. */
1060 entry = np->cur_tx % TX_RING_SIZE;
1061 np->tx_skbuff[entry] = skb;
1062 txdesc = &np->tx_ring[entry];
1064 txdesc->next_desc = 0;
1065 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1066 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1069 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1071 /* Increment cur_tx before tasklet_schedule() */
1074 /* Schedule a tx_poll() task */
1075 tasklet_schedule(&np->tx_tasklet);
1077 /* On some architectures: explicitly flush cache lines here. */
1078 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1079 !netif_queue_stopped(dev)) {
1082 netif_stop_queue (dev);
1084 if (netif_msg_tx_queued(np)) {
1086 "%s: Transmit frame #%d queued in slot %d.\n",
1087 dev->name, np->cur_tx, entry);
1089 return NETDEV_TX_OK;
1092 /* Reset hardware tx and free all of tx buffers */
1094 reset_tx (struct net_device *dev)
1096 struct netdev_private *np = netdev_priv(dev);
1097 void __iomem *ioaddr = np->base;
1098 struct sk_buff *skb;
1100 int irq = in_interrupt();
1102 /* Reset tx logic, TxListPtr will be cleaned */
1103 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1104 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1106 /* free all tx skbuff */
1107 for (i = 0; i < TX_RING_SIZE; i++) {
1108 np->tx_ring[i].next_desc = 0;
1110 skb = np->tx_skbuff[i];
1112 pci_unmap_single(np->pci_dev,
1113 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1114 skb->len, PCI_DMA_TODEVICE);
1116 dev_kfree_skb_irq (skb);
1118 dev_kfree_skb (skb);
1119 np->tx_skbuff[i] = NULL;
1120 dev->stats.tx_dropped++;
1123 np->cur_tx = np->dirty_tx = 0;
1127 iowrite8(127, ioaddr + TxDMAPollPeriod);
1129 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1133 /* The interrupt handler cleans up after the Tx thread,
1134 and schedule a Rx thread work */
1135 static irqreturn_t intr_handler(int irq, void *dev_instance)
1137 struct net_device *dev = (struct net_device *)dev_instance;
1138 struct netdev_private *np = netdev_priv(dev);
1139 void __iomem *ioaddr = np->base;
1148 int intr_status = ioread16(ioaddr + IntrStatus);
1149 iowrite16(intr_status, ioaddr + IntrStatus);
1151 if (netif_msg_intr(np))
1152 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1153 dev->name, intr_status);
1155 if (!(intr_status & DEFAULT_INTR))
1160 if (intr_status & (IntrRxDMADone)) {
1161 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1162 ioaddr + IntrEnable);
1164 np->budget = RX_BUDGET;
1165 tasklet_schedule(&np->rx_tasklet);
1167 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1168 tx_status = ioread16 (ioaddr + TxStatus);
1169 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1170 if (netif_msg_tx_done(np))
1172 ("%s: Transmit status is %2.2x.\n",
1173 dev->name, tx_status);
1174 if (tx_status & 0x1e) {
1175 if (netif_msg_tx_err(np))
1176 printk("%s: Transmit error status %4.4x.\n",
1177 dev->name, tx_status);
1178 dev->stats.tx_errors++;
1179 if (tx_status & 0x10)
1180 dev->stats.tx_fifo_errors++;
1181 if (tx_status & 0x08)
1182 dev->stats.collisions++;
1183 if (tx_status & 0x04)
1184 dev->stats.tx_fifo_errors++;
1185 if (tx_status & 0x02)
1186 dev->stats.tx_window_errors++;
1189 ** This reset has been verified on
1190 ** DFE-580TX boards ! phdm@macqel.be.
1192 if (tx_status & 0x10) { /* TxUnderrun */
1193 /* Restart Tx FIFO and transmitter */
1194 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1195 /* No need to reset the Tx pointer here */
1197 /* Restart the Tx. Need to make sure tx enabled */
1200 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1201 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1206 /* Yup, this is a documentation bug. It cost me *hours*. */
1207 iowrite16 (0, ioaddr + TxStatus);
1209 iowrite32(5000, ioaddr + DownCounter);
1212 tx_status = ioread16 (ioaddr + TxStatus);
1214 hw_frame_id = (tx_status >> 8) & 0xff;
1216 hw_frame_id = ioread8(ioaddr + TxFrameId);
1219 if (np->pci_dev->revision >= 0x14) {
1220 spin_lock(&np->lock);
1221 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1222 int entry = np->dirty_tx % TX_RING_SIZE;
1223 struct sk_buff *skb;
1225 sw_frame_id = (le32_to_cpu(
1226 np->tx_ring[entry].status) >> 2) & 0xff;
1227 if (sw_frame_id == hw_frame_id &&
1228 !(le32_to_cpu(np->tx_ring[entry].status)
1231 if (sw_frame_id == (hw_frame_id + 1) %
1234 skb = np->tx_skbuff[entry];
1235 /* Free the original skb. */
1236 pci_unmap_single(np->pci_dev,
1237 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1238 skb->len, PCI_DMA_TODEVICE);
1239 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1240 np->tx_skbuff[entry] = NULL;
1241 np->tx_ring[entry].frag[0].addr = 0;
1242 np->tx_ring[entry].frag[0].length = 0;
1244 spin_unlock(&np->lock);
1246 spin_lock(&np->lock);
1247 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1248 int entry = np->dirty_tx % TX_RING_SIZE;
1249 struct sk_buff *skb;
1250 if (!(le32_to_cpu(np->tx_ring[entry].status)
1253 skb = np->tx_skbuff[entry];
1254 /* Free the original skb. */
1255 pci_unmap_single(np->pci_dev,
1256 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1257 skb->len, PCI_DMA_TODEVICE);
1258 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1259 np->tx_skbuff[entry] = NULL;
1260 np->tx_ring[entry].frag[0].addr = 0;
1261 np->tx_ring[entry].frag[0].length = 0;
1263 spin_unlock(&np->lock);
1266 if (netif_queue_stopped(dev) &&
1267 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1268 /* The ring is no longer full, clear busy flag. */
1269 netif_wake_queue (dev);
1271 /* Abnormal error summary/uncommon events handlers. */
1272 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1273 netdev_error(dev, intr_status);
1275 if (netif_msg_intr(np))
1276 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1277 dev->name, ioread16(ioaddr + IntrStatus));
1278 return IRQ_RETVAL(handled);
1281 static void rx_poll(unsigned long data)
1283 struct net_device *dev = (struct net_device *)data;
1284 struct netdev_private *np = netdev_priv(dev);
1285 int entry = np->cur_rx % RX_RING_SIZE;
1286 int boguscnt = np->budget;
1287 void __iomem *ioaddr = np->base;
1290 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1292 struct netdev_desc *desc = &(np->rx_ring[entry]);
1293 u32 frame_status = le32_to_cpu(desc->status);
1296 if (--boguscnt < 0) {
1299 if (!(frame_status & DescOwn))
1301 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1302 if (netif_msg_rx_status(np))
1303 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1305 if (frame_status & 0x001f4000) {
1306 /* There was a error. */
1307 if (netif_msg_rx_err(np))
1308 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1310 dev->stats.rx_errors++;
1311 if (frame_status & 0x00100000)
1312 dev->stats.rx_length_errors++;
1313 if (frame_status & 0x00010000)
1314 dev->stats.rx_fifo_errors++;
1315 if (frame_status & 0x00060000)
1316 dev->stats.rx_frame_errors++;
1317 if (frame_status & 0x00080000)
1318 dev->stats.rx_crc_errors++;
1319 if (frame_status & 0x00100000) {
1320 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1322 dev->name, frame_status);
1325 struct sk_buff *skb;
1326 #ifndef final_version
1327 if (netif_msg_rx_status(np))
1328 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1329 ", bogus_cnt %d.\n",
1332 /* Check if the packet is long enough to accept without copying
1333 to a minimally-sized skbuff. */
1334 if (pkt_len < rx_copybreak &&
1335 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1336 skb_reserve(skb, 2); /* 16 byte align the IP header */
1337 pci_dma_sync_single_for_cpu(np->pci_dev,
1338 le32_to_cpu(desc->frag[0].addr),
1340 PCI_DMA_FROMDEVICE);
1342 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1343 pci_dma_sync_single_for_device(np->pci_dev,
1344 le32_to_cpu(desc->frag[0].addr),
1346 PCI_DMA_FROMDEVICE);
1347 skb_put(skb, pkt_len);
1349 pci_unmap_single(np->pci_dev,
1350 le32_to_cpu(desc->frag[0].addr),
1352 PCI_DMA_FROMDEVICE);
1353 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1354 np->rx_skbuff[entry] = NULL;
1356 skb->protocol = eth_type_trans(skb, dev);
1357 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1360 entry = (entry + 1) % RX_RING_SIZE;
1365 np->budget -= received;
1366 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1374 np->budget -= received;
1375 if (np->budget <= 0)
1376 np->budget = RX_BUDGET;
1377 tasklet_schedule(&np->rx_tasklet);
1380 static void refill_rx (struct net_device *dev)
1382 struct netdev_private *np = netdev_priv(dev);
1386 /* Refill the Rx ring buffers. */
1387 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1388 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1389 struct sk_buff *skb;
1390 entry = np->dirty_rx % RX_RING_SIZE;
1391 if (np->rx_skbuff[entry] == NULL) {
1392 skb = dev_alloc_skb(np->rx_buf_sz);
1393 np->rx_skbuff[entry] = skb;
1395 break; /* Better luck next round. */
1396 skb->dev = dev; /* Mark as being used by this device. */
1397 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1398 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1399 pci_map_single(np->pci_dev, skb->data,
1400 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1402 /* Perhaps we need not reset this field. */
1403 np->rx_ring[entry].frag[0].length =
1404 cpu_to_le32(np->rx_buf_sz | LastFrag);
1405 np->rx_ring[entry].status = 0;
1409 static void netdev_error(struct net_device *dev, int intr_status)
1411 struct netdev_private *np = netdev_priv(dev);
1412 void __iomem *ioaddr = np->base;
1413 u16 mii_ctl, mii_advertise, mii_lpa;
1416 if (intr_status & LinkChange) {
1417 if (mdio_wait_link(dev, 10) == 0) {
1418 printk(KERN_INFO "%s: Link up\n", dev->name);
1419 if (np->an_enable) {
1420 mii_advertise = mdio_read(dev, np->phys[0],
1422 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1423 mii_advertise &= mii_lpa;
1424 printk(KERN_INFO "%s: Link changed: ",
1426 if (mii_advertise & ADVERTISE_100FULL) {
1428 printk("100Mbps, full duplex\n");
1429 } else if (mii_advertise & ADVERTISE_100HALF) {
1431 printk("100Mbps, half duplex\n");
1432 } else if (mii_advertise & ADVERTISE_10FULL) {
1434 printk("10Mbps, full duplex\n");
1435 } else if (mii_advertise & ADVERTISE_10HALF) {
1437 printk("10Mbps, half duplex\n");
1442 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1443 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1445 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1447 printk("%s duplex.\n",
1448 (mii_ctl & BMCR_FULLDPLX) ?
1452 if (np->flowctrl && np->mii_if.full_duplex) {
1453 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1454 ioaddr + MulticastFilter1+2);
1455 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1458 netif_carrier_on(dev);
1460 printk(KERN_INFO "%s: Link down\n", dev->name);
1461 netif_carrier_off(dev);
1464 if (intr_status & StatsMax) {
1467 if (intr_status & IntrPCIErr) {
1468 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1469 dev->name, intr_status);
1470 /* We must do a global reset of DMA to continue. */
1474 static struct net_device_stats *get_stats(struct net_device *dev)
1476 struct netdev_private *np = netdev_priv(dev);
1477 void __iomem *ioaddr = np->base;
1480 /* We should lock this segment of code for SMP eventually, although
1481 the vulnerability window is very small and statistics are
1483 /* The chip only need report frame silently dropped. */
1484 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1485 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1486 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1487 dev->stats.collisions += ioread8(ioaddr + StatsLateColl);
1488 dev->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1489 dev->stats.collisions += ioread8(ioaddr + StatsOneColl);
1490 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1491 ioread8(ioaddr + StatsTxDefer);
1492 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1493 ioread8(ioaddr + i);
1494 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1495 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1496 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1497 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1502 static void set_rx_mode(struct net_device *dev)
1504 struct netdev_private *np = netdev_priv(dev);
1505 void __iomem *ioaddr = np->base;
1506 u16 mc_filter[4]; /* Multicast hash filter */
1510 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1511 memset(mc_filter, 0xff, sizeof(mc_filter));
1512 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1513 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1514 (dev->flags & IFF_ALLMULTI)) {
1515 /* Too many to match, or accept all multicasts. */
1516 memset(mc_filter, 0xff, sizeof(mc_filter));
1517 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1518 } else if (!netdev_mc_empty(dev)) {
1519 struct netdev_hw_addr *ha;
1523 memset (mc_filter, 0, sizeof (mc_filter));
1524 netdev_for_each_mc_addr(ha, dev) {
1525 crc = ether_crc_le(ETH_ALEN, ha->addr);
1526 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1527 if (crc & 0x80000000) index |= 1 << bit;
1528 mc_filter[index/16] |= (1 << (index % 16));
1530 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1532 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1535 if (np->mii_if.full_duplex && np->flowctrl)
1536 mc_filter[3] |= 0x0200;
1538 for (i = 0; i < 4; i++)
1539 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1540 iowrite8(rx_mode, ioaddr + RxMode);
1543 static int __set_mac_addr(struct net_device *dev)
1545 struct netdev_private *np = netdev_priv(dev);
1548 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1549 iowrite16(addr16, np->base + StationAddr);
1550 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1551 iowrite16(addr16, np->base + StationAddr+2);
1552 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1553 iowrite16(addr16, np->base + StationAddr+4);
1557 static int check_if_running(struct net_device *dev)
1559 if (!netif_running(dev))
1564 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1566 struct netdev_private *np = netdev_priv(dev);
1567 strcpy(info->driver, DRV_NAME);
1568 strcpy(info->version, DRV_VERSION);
1569 strcpy(info->bus_info, pci_name(np->pci_dev));
1572 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1574 struct netdev_private *np = netdev_priv(dev);
1575 spin_lock_irq(&np->lock);
1576 mii_ethtool_gset(&np->mii_if, ecmd);
1577 spin_unlock_irq(&np->lock);
1581 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1583 struct netdev_private *np = netdev_priv(dev);
1585 spin_lock_irq(&np->lock);
1586 res = mii_ethtool_sset(&np->mii_if, ecmd);
1587 spin_unlock_irq(&np->lock);
1591 static int nway_reset(struct net_device *dev)
1593 struct netdev_private *np = netdev_priv(dev);
1594 return mii_nway_restart(&np->mii_if);
1597 static u32 get_link(struct net_device *dev)
1599 struct netdev_private *np = netdev_priv(dev);
1600 return mii_link_ok(&np->mii_if);
1603 static u32 get_msglevel(struct net_device *dev)
1605 struct netdev_private *np = netdev_priv(dev);
1606 return np->msg_enable;
1609 static void set_msglevel(struct net_device *dev, u32 val)
1611 struct netdev_private *np = netdev_priv(dev);
1612 np->msg_enable = val;
1615 static const struct ethtool_ops ethtool_ops = {
1616 .begin = check_if_running,
1617 .get_drvinfo = get_drvinfo,
1618 .get_settings = get_settings,
1619 .set_settings = set_settings,
1620 .nway_reset = nway_reset,
1621 .get_link = get_link,
1622 .get_msglevel = get_msglevel,
1623 .set_msglevel = set_msglevel,
1626 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1628 struct netdev_private *np = netdev_priv(dev);
1631 if (!netif_running(dev))
1634 spin_lock_irq(&np->lock);
1635 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1636 spin_unlock_irq(&np->lock);
1641 static int netdev_close(struct net_device *dev)
1643 struct netdev_private *np = netdev_priv(dev);
1644 void __iomem *ioaddr = np->base;
1645 struct sk_buff *skb;
1648 /* Wait and kill tasklet */
1649 tasklet_kill(&np->rx_tasklet);
1650 tasklet_kill(&np->tx_tasklet);
1656 netif_stop_queue(dev);
1658 if (netif_msg_ifdown(np)) {
1659 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1660 "Rx %4.4x Int %2.2x.\n",
1661 dev->name, ioread8(ioaddr + TxStatus),
1662 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1663 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1664 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1667 /* Disable interrupts by clearing the interrupt mask. */
1668 iowrite16(0x0000, ioaddr + IntrEnable);
1670 /* Disable Rx and Tx DMA for safely release resource */
1671 iowrite32(0x500, ioaddr + DMACtrl);
1673 /* Stop the chip's Tx and Rx processes. */
1674 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1676 for (i = 2000; i > 0; i--) {
1677 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1682 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1683 ioaddr +ASICCtrl + 2);
1685 for (i = 2000; i > 0; i--) {
1686 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1692 if (netif_msg_hw(np)) {
1693 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1694 (int)(np->tx_ring_dma));
1695 for (i = 0; i < TX_RING_SIZE; i++)
1696 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1697 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1698 np->tx_ring[i].frag[0].length);
1699 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1700 (int)(np->rx_ring_dma));
1701 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1702 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1703 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1704 np->rx_ring[i].frag[0].length);
1707 #endif /* __i386__ debugging only */
1709 free_irq(dev->irq, dev);
1711 del_timer_sync(&np->timer);
1713 /* Free all the skbuffs in the Rx queue. */
1714 for (i = 0; i < RX_RING_SIZE; i++) {
1715 np->rx_ring[i].status = 0;
1716 skb = np->rx_skbuff[i];
1718 pci_unmap_single(np->pci_dev,
1719 le32_to_cpu(np->rx_ring[i].frag[0].addr),
1720 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1722 np->rx_skbuff[i] = NULL;
1724 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1726 for (i = 0; i < TX_RING_SIZE; i++) {
1727 np->tx_ring[i].next_desc = 0;
1728 skb = np->tx_skbuff[i];
1730 pci_unmap_single(np->pci_dev,
1731 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1732 skb->len, PCI_DMA_TODEVICE);
1734 np->tx_skbuff[i] = NULL;
1741 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1743 struct net_device *dev = pci_get_drvdata(pdev);
1746 struct netdev_private *np = netdev_priv(dev);
1748 unregister_netdev(dev);
1749 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1751 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1753 pci_iounmap(pdev, np->base);
1754 pci_release_regions(pdev);
1756 pci_set_drvdata(pdev, NULL);
1760 static struct pci_driver sundance_driver = {
1762 .id_table = sundance_pci_tbl,
1763 .probe = sundance_probe1,
1764 .remove = __devexit_p(sundance_remove1),
1767 static int __init sundance_init(void)
1769 /* when a module, this is printed whether or not devices are found in probe */
1773 return pci_register_driver(&sundance_driver);
1776 static void __exit sundance_exit(void)
1778 pci_unregister_driver(&sundance_driver);
1781 module_init(sundance_init);
1782 module_exit(sundance_exit);