2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of
12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB.
15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA.
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/string.h>
25 #include <linux/ptrace.h>
26 #include <linux/errno.h>
27 #include <linux/ioport.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/pci.h>
31 #include <linux/init.h>
32 #include <linux/delay.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/spinlock.h>
37 #include <linux/workqueue.h>
38 #include <linux/bitops.h>
40 #include <linux/irq.h>
41 #include <linux/clk.h>
42 #include <linux/platform_device.h>
43 #include <linux/phy.h>
44 #include <linux/fec.h>
46 #include <asm/cacheflush.h>
48 #ifndef CONFIG_ARCH_MXC
49 #include <asm/coldfire.h>
50 #include <asm/mcfsim.h>
55 #ifdef CONFIG_ARCH_MXC
56 #include <mach/hardware.h>
57 #define FEC_ALIGNMENT 0xf
59 #define FEC_ALIGNMENT 0x3
63 * Define the fixed address of the FEC hardware.
65 #if defined(CONFIG_M5272)
67 static unsigned char fec_mac_default[] = {
68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
72 * Some hardware gets it MAC address out of local flash memory.
73 * if this is non-zero then assume it is the address to get MAC from.
75 #if defined(CONFIG_NETtel)
76 #define FEC_FLASHMAC 0xf0006006
77 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
78 #define FEC_FLASHMAC 0xf0006000
79 #elif defined(CONFIG_CANCam)
80 #define FEC_FLASHMAC 0xf0020000
81 #elif defined (CONFIG_M5272C3)
82 #define FEC_FLASHMAC (0xffe04000 + 4)
83 #elif defined(CONFIG_MOD5272)
84 #define FEC_FLASHMAC 0xffc0406b
86 #define FEC_FLASHMAC 0
88 #endif /* CONFIG_M5272 */
90 /* The number of Tx and Rx buffers. These are allocated from the page
91 * pool. The code may assume these are power of two, so it it best
92 * to keep them that size.
93 * We don't need to allocate pages for the transmitter. We just use
94 * the skbuffer directly.
96 #define FEC_ENET_RX_PAGES 8
97 #define FEC_ENET_RX_FRSIZE 2048
98 #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
99 #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
100 #define FEC_ENET_TX_FRSIZE 2048
101 #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
102 #define TX_RING_SIZE 16 /* Must be power of two */
103 #define TX_RING_MOD_MASK 15 /* for this to work */
105 #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
106 #error "FEC: descriptor ring size constants too large"
109 /* Interrupt events/masks. */
110 #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
111 #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
112 #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
113 #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
114 #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
115 #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
116 #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
117 #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
118 #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
119 #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
121 #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
123 /* The FEC stores dest/src/type, data, and checksum for receive packets.
125 #define PKT_MAXBUF_SIZE 1518
126 #define PKT_MINBUF_SIZE 64
127 #define PKT_MAXBLR_SIZE 1520
131 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
132 * size bits. Other FEC hardware does not, so we need to take that into
133 * account when setting it.
135 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
136 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
137 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
139 #define OPT_FRAME_SIZE 0
142 /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
143 * tx_bd_base always point to the base of the buffer descriptors. The
144 * cur_rx and cur_tx point to the currently available buffer.
145 * The dirty_tx tracks the current buffer that is being sent by the
146 * controller. The cur_tx and dirty_tx are equal under both completely
147 * empty and completely full conditions. The empty/ready indicator in
148 * the buffer descriptor determines the actual condition.
150 struct fec_enet_private {
151 /* Hardware registers of the FEC device */
154 struct net_device *netdev;
158 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
159 unsigned char *tx_bounce[TX_RING_SIZE];
160 struct sk_buff* tx_skbuff[TX_RING_SIZE];
161 struct sk_buff* rx_skbuff[RX_RING_SIZE];
165 /* CPM dual port RAM relative addresses */
167 /* Address of Rx and Tx buffers */
168 struct bufdesc *rx_bd_base;
169 struct bufdesc *tx_bd_base;
170 /* The next free ring entry */
171 struct bufdesc *cur_rx, *cur_tx;
172 /* The ring entries to be free()ed */
173 struct bufdesc *dirty_tx;
176 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
179 struct platform_device *pdev;
183 /* Phylib and MDIO interface */
184 struct mii_bus *mii_bus;
185 struct phy_device *phy_dev;
188 phy_interface_t phy_interface;
192 struct completion mdio_done;
195 static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
196 static void fec_enet_tx(struct net_device *dev);
197 static void fec_enet_rx(struct net_device *dev);
198 static int fec_enet_close(struct net_device *dev);
199 static void fec_restart(struct net_device *dev, int duplex);
200 static void fec_stop(struct net_device *dev);
202 /* FEC MII MMFR bits definition */
203 #define FEC_MMFR_ST (1 << 30)
204 #define FEC_MMFR_OP_READ (2 << 28)
205 #define FEC_MMFR_OP_WRITE (1 << 28)
206 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
207 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
208 #define FEC_MMFR_TA (2 << 16)
209 #define FEC_MMFR_DATA(v) (v & 0xffff)
211 #define FEC_MII_TIMEOUT 1000 /* us */
213 /* Transmitter timeout */
214 #define TX_TIMEOUT (2 * HZ)
217 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
219 struct fec_enet_private *fep = netdev_priv(dev);
222 unsigned short status;
226 /* Link is down or autonegotiation is in progress. */
227 return NETDEV_TX_BUSY;
230 spin_lock_irqsave(&fep->hw_lock, flags);
231 /* Fill in a Tx ring entry */
234 status = bdp->cbd_sc;
236 if (status & BD_ENET_TX_READY) {
237 /* Ooops. All transmit buffers are full. Bail out.
238 * This should not happen, since dev->tbusy should be set.
240 printk("%s: tx queue full!.\n", dev->name);
241 spin_unlock_irqrestore(&fep->hw_lock, flags);
242 return NETDEV_TX_BUSY;
245 /* Clear all of the status flags */
246 status &= ~BD_ENET_TX_STATS;
248 /* Set buffer length and buffer pointer */
250 bdp->cbd_datlen = skb->len;
253 * On some FEC implementations data must be aligned on
254 * 4-byte boundaries. Use bounce buffers to copy data
255 * and get it aligned. Ugh.
257 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
259 index = bdp - fep->tx_bd_base;
260 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
261 bufaddr = fep->tx_bounce[index];
264 /* Save skb pointer */
265 fep->tx_skbuff[fep->skb_cur] = skb;
267 dev->stats.tx_bytes += skb->len;
268 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
270 /* Push the data cache so the CPM does not get stale memory
273 bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
274 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
276 /* Send it on its way. Tell FEC it's ready, interrupt when done,
277 * it's the last BD of the frame, and to put the CRC on the end.
279 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
280 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
281 bdp->cbd_sc = status;
283 /* Trigger transmission start */
284 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
286 /* If this was the last BD in the ring, start at the beginning again. */
287 if (status & BD_ENET_TX_WRAP)
288 bdp = fep->tx_bd_base;
292 if (bdp == fep->dirty_tx) {
294 netif_stop_queue(dev);
299 spin_unlock_irqrestore(&fep->hw_lock, flags);
305 fec_timeout(struct net_device *dev)
307 struct fec_enet_private *fep = netdev_priv(dev);
309 dev->stats.tx_errors++;
311 fec_restart(dev, fep->full_duplex);
312 netif_wake_queue(dev);
316 fec_enet_interrupt(int irq, void * dev_id)
318 struct net_device *dev = dev_id;
319 struct fec_enet_private *fep = netdev_priv(dev);
321 irqreturn_t ret = IRQ_NONE;
324 int_events = readl(fep->hwp + FEC_IEVENT);
325 writel(int_events, fep->hwp + FEC_IEVENT);
327 if (int_events & FEC_ENET_RXF) {
332 /* Transmit OK, or non-fatal error. Update the buffer
333 * descriptors. FEC handles all errors, we just discover
334 * them as part of the transmit process.
336 if (int_events & FEC_ENET_TXF) {
341 if (int_events & FEC_ENET_MII) {
343 complete(&fep->mdio_done);
345 } while (int_events);
352 fec_enet_tx(struct net_device *dev)
354 struct fec_enet_private *fep;
356 unsigned short status;
359 fep = netdev_priv(dev);
360 spin_lock(&fep->hw_lock);
363 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
364 if (bdp == fep->cur_tx && fep->tx_full == 0)
367 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
368 bdp->cbd_bufaddr = 0;
370 skb = fep->tx_skbuff[fep->skb_dirty];
371 /* Check for errors. */
372 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
373 BD_ENET_TX_RL | BD_ENET_TX_UN |
375 dev->stats.tx_errors++;
376 if (status & BD_ENET_TX_HB) /* No heartbeat */
377 dev->stats.tx_heartbeat_errors++;
378 if (status & BD_ENET_TX_LC) /* Late collision */
379 dev->stats.tx_window_errors++;
380 if (status & BD_ENET_TX_RL) /* Retrans limit */
381 dev->stats.tx_aborted_errors++;
382 if (status & BD_ENET_TX_UN) /* Underrun */
383 dev->stats.tx_fifo_errors++;
384 if (status & BD_ENET_TX_CSL) /* Carrier lost */
385 dev->stats.tx_carrier_errors++;
387 dev->stats.tx_packets++;
390 if (status & BD_ENET_TX_READY)
391 printk("HEY! Enet xmit interrupt and TX_READY.\n");
393 /* Deferred means some collisions occurred during transmit,
394 * but we eventually sent the packet OK.
396 if (status & BD_ENET_TX_DEF)
397 dev->stats.collisions++;
399 /* Free the sk buffer associated with this last transmit */
400 dev_kfree_skb_any(skb);
401 fep->tx_skbuff[fep->skb_dirty] = NULL;
402 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
404 /* Update pointer to next buffer descriptor to be transmitted */
405 if (status & BD_ENET_TX_WRAP)
406 bdp = fep->tx_bd_base;
410 /* Since we have freed up a buffer, the ring is no longer full
414 if (netif_queue_stopped(dev))
415 netif_wake_queue(dev);
419 spin_unlock(&fep->hw_lock);
423 /* During a receive, the cur_rx points to the current incoming buffer.
424 * When we update through the ring, if the next incoming buffer has
425 * not been given to the system, we just set the empty indicator,
426 * effectively tossing the packet.
429 fec_enet_rx(struct net_device *dev)
431 struct fec_enet_private *fep = netdev_priv(dev);
433 unsigned short status;
442 spin_lock(&fep->hw_lock);
444 /* First, grab all of the stats for the incoming packet.
445 * These get messed up if we get called due to a busy condition.
449 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
451 /* Since we have allocated space to hold a complete frame,
452 * the last indicator should be set.
454 if ((status & BD_ENET_RX_LAST) == 0)
455 printk("FEC ENET: rcv is not +last\n");
458 goto rx_processing_done;
460 /* Check for errors. */
461 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
462 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
463 dev->stats.rx_errors++;
464 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
465 /* Frame too long or too short. */
466 dev->stats.rx_length_errors++;
468 if (status & BD_ENET_RX_NO) /* Frame alignment */
469 dev->stats.rx_frame_errors++;
470 if (status & BD_ENET_RX_CR) /* CRC Error */
471 dev->stats.rx_crc_errors++;
472 if (status & BD_ENET_RX_OV) /* FIFO overrun */
473 dev->stats.rx_fifo_errors++;
476 /* Report late collisions as a frame error.
477 * On this error, the BD is closed, but we don't know what we
478 * have in the buffer. So, just drop this frame on the floor.
480 if (status & BD_ENET_RX_CL) {
481 dev->stats.rx_errors++;
482 dev->stats.rx_frame_errors++;
483 goto rx_processing_done;
486 /* Process the incoming frame. */
487 dev->stats.rx_packets++;
488 pkt_len = bdp->cbd_datlen;
489 dev->stats.rx_bytes += pkt_len;
490 data = (__u8*)__va(bdp->cbd_bufaddr);
492 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
495 /* This does 16 byte alignment, exactly what we need.
496 * The packet length includes FCS, but we don't want to
497 * include that when passing upstream as it messes up
498 * bridging applications.
500 skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
502 if (unlikely(!skb)) {
503 printk("%s: Memory squeeze, dropping packet.\n",
505 dev->stats.rx_dropped++;
507 skb_reserve(skb, NET_IP_ALIGN);
508 skb_put(skb, pkt_len - 4); /* Make room */
509 skb_copy_to_linear_data(skb, data, pkt_len - 4);
510 skb->protocol = eth_type_trans(skb, dev);
514 bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
517 /* Clear the status flags for this buffer */
518 status &= ~BD_ENET_RX_STATS;
520 /* Mark the buffer empty */
521 status |= BD_ENET_RX_EMPTY;
522 bdp->cbd_sc = status;
524 /* Update BD pointer to next entry */
525 if (status & BD_ENET_RX_WRAP)
526 bdp = fep->rx_bd_base;
529 /* Doing this here will keep the FEC running while we process
530 * incoming frames. On a heavily loaded network, we should be
531 * able to keep up at the expense of system resources.
533 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
537 spin_unlock(&fep->hw_lock);
540 /* ------------------------------------------------------------------------- */
542 static void __inline__ fec_get_mac(struct net_device *dev)
544 struct fec_enet_private *fep = netdev_priv(dev);
545 unsigned char *iap, tmpaddr[ETH_ALEN];
549 * Get MAC address from FLASH.
550 * If it is all 1's or 0's, use the default.
552 iap = (unsigned char *)FEC_FLASHMAC;
553 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
554 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
555 iap = fec_mac_default;
556 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
557 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
558 iap = fec_mac_default;
560 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
561 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
565 memcpy(dev->dev_addr, iap, ETH_ALEN);
567 /* Adjust MAC if using default MAC address */
568 if (iap == fec_mac_default)
569 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
573 /* ------------------------------------------------------------------------- */
578 static void fec_enet_adjust_link(struct net_device *dev)
580 struct fec_enet_private *fep = netdev_priv(dev);
581 struct phy_device *phy_dev = fep->phy_dev;
584 int status_change = 0;
586 spin_lock_irqsave(&fep->hw_lock, flags);
588 /* Prevent a state halted on mii error */
589 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
590 phy_dev->state = PHY_RESUMING;
594 /* Duplex link change */
596 if (fep->full_duplex != phy_dev->duplex) {
597 fec_restart(dev, phy_dev->duplex);
602 /* Link on or off change */
603 if (phy_dev->link != fep->link) {
604 fep->link = phy_dev->link;
606 fec_restart(dev, phy_dev->duplex);
613 spin_unlock_irqrestore(&fep->hw_lock, flags);
616 phy_print_status(phy_dev);
619 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
621 struct fec_enet_private *fep = bus->priv;
622 unsigned long time_left;
624 fep->mii_timeout = 0;
625 init_completion(&fep->mdio_done);
627 /* start a read op */
628 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
629 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
630 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
632 /* wait for end of transfer */
633 time_left = wait_for_completion_timeout(&fep->mdio_done,
634 usecs_to_jiffies(FEC_MII_TIMEOUT));
635 if (time_left == 0) {
636 fep->mii_timeout = 1;
637 printk(KERN_ERR "FEC: MDIO read timeout\n");
642 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
645 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
648 struct fec_enet_private *fep = bus->priv;
649 unsigned long time_left;
651 fep->mii_timeout = 0;
652 init_completion(&fep->mdio_done);
654 /* start a read op */
655 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
656 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
657 FEC_MMFR_TA | FEC_MMFR_DATA(value),
658 fep->hwp + FEC_MII_DATA);
660 /* wait for end of transfer */
661 time_left = wait_for_completion_timeout(&fep->mdio_done,
662 usecs_to_jiffies(FEC_MII_TIMEOUT));
663 if (time_left == 0) {
664 fep->mii_timeout = 1;
665 printk(KERN_ERR "FEC: MDIO write timeout\n");
672 static int fec_enet_mdio_reset(struct mii_bus *bus)
677 static int fec_enet_mii_probe(struct net_device *dev)
679 struct fec_enet_private *fep = netdev_priv(dev);
680 struct phy_device *phy_dev = NULL;
681 char mdio_bus_id[MII_BUS_ID_SIZE];
682 char phy_name[MII_BUS_ID_SIZE + 3];
687 /* check for attached phy */
688 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
689 if ((fep->mii_bus->phy_mask & (1 << phy_id)))
691 if (fep->mii_bus->phy_map[phy_id] == NULL)
693 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
695 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
699 if (phy_id >= PHY_MAX_ADDR) {
700 printk(KERN_INFO "%s: no PHY, assuming direct connection "
701 "to switch\n", dev->name);
702 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
706 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
707 phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0,
708 PHY_INTERFACE_MODE_MII);
709 if (IS_ERR(phy_dev)) {
710 printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
711 return PTR_ERR(phy_dev);
714 /* mask with MAC supported features */
715 phy_dev->supported &= PHY_BASIC_FEATURES;
716 phy_dev->advertising = phy_dev->supported;
718 fep->phy_dev = phy_dev;
720 fep->full_duplex = 0;
722 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
723 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
724 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
730 static int fec_enet_mii_init(struct platform_device *pdev)
732 struct net_device *dev = platform_get_drvdata(pdev);
733 struct fec_enet_private *fep = netdev_priv(dev);
736 fep->mii_timeout = 0;
739 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
741 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1;
742 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
744 fep->mii_bus = mdiobus_alloc();
745 if (fep->mii_bus == NULL) {
750 fep->mii_bus->name = "fec_enet_mii_bus";
751 fep->mii_bus->read = fec_enet_mdio_read;
752 fep->mii_bus->write = fec_enet_mdio_write;
753 fep->mii_bus->reset = fec_enet_mdio_reset;
754 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
755 fep->mii_bus->priv = fep;
756 fep->mii_bus->parent = &pdev->dev;
758 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
759 if (!fep->mii_bus->irq) {
761 goto err_out_free_mdiobus;
764 for (i = 0; i < PHY_MAX_ADDR; i++)
765 fep->mii_bus->irq[i] = PHY_POLL;
767 platform_set_drvdata(dev, fep->mii_bus);
769 if (mdiobus_register(fep->mii_bus))
770 goto err_out_free_mdio_irq;
774 err_out_free_mdio_irq:
775 kfree(fep->mii_bus->irq);
776 err_out_free_mdiobus:
777 mdiobus_free(fep->mii_bus);
782 static void fec_enet_mii_remove(struct fec_enet_private *fep)
785 phy_disconnect(fep->phy_dev);
786 mdiobus_unregister(fep->mii_bus);
787 kfree(fep->mii_bus->irq);
788 mdiobus_free(fep->mii_bus);
791 static int fec_enet_get_settings(struct net_device *dev,
792 struct ethtool_cmd *cmd)
794 struct fec_enet_private *fep = netdev_priv(dev);
795 struct phy_device *phydev = fep->phy_dev;
800 return phy_ethtool_gset(phydev, cmd);
803 static int fec_enet_set_settings(struct net_device *dev,
804 struct ethtool_cmd *cmd)
806 struct fec_enet_private *fep = netdev_priv(dev);
807 struct phy_device *phydev = fep->phy_dev;
812 return phy_ethtool_sset(phydev, cmd);
815 static void fec_enet_get_drvinfo(struct net_device *dev,
816 struct ethtool_drvinfo *info)
818 struct fec_enet_private *fep = netdev_priv(dev);
820 strcpy(info->driver, fep->pdev->dev.driver->name);
821 strcpy(info->version, "Revision: 1.0");
822 strcpy(info->bus_info, dev_name(&dev->dev));
825 static struct ethtool_ops fec_enet_ethtool_ops = {
826 .get_settings = fec_enet_get_settings,
827 .set_settings = fec_enet_set_settings,
828 .get_drvinfo = fec_enet_get_drvinfo,
829 .get_link = ethtool_op_get_link,
832 static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
834 struct fec_enet_private *fep = netdev_priv(dev);
835 struct phy_device *phydev = fep->phy_dev;
837 if (!netif_running(dev))
843 return phy_mii_ioctl(phydev, rq, cmd);
846 static void fec_enet_free_buffers(struct net_device *dev)
848 struct fec_enet_private *fep = netdev_priv(dev);
853 bdp = fep->rx_bd_base;
854 for (i = 0; i < RX_RING_SIZE; i++) {
855 skb = fep->rx_skbuff[i];
857 if (bdp->cbd_bufaddr)
858 dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
859 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
865 bdp = fep->tx_bd_base;
866 for (i = 0; i < TX_RING_SIZE; i++)
867 kfree(fep->tx_bounce[i]);
870 static int fec_enet_alloc_buffers(struct net_device *dev)
872 struct fec_enet_private *fep = netdev_priv(dev);
877 bdp = fep->rx_bd_base;
878 for (i = 0; i < RX_RING_SIZE; i++) {
879 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
881 fec_enet_free_buffers(dev);
884 fep->rx_skbuff[i] = skb;
886 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
887 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
888 bdp->cbd_sc = BD_ENET_RX_EMPTY;
892 /* Set the last buffer to wrap. */
894 bdp->cbd_sc |= BD_SC_WRAP;
896 bdp = fep->tx_bd_base;
897 for (i = 0; i < TX_RING_SIZE; i++) {
898 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
901 bdp->cbd_bufaddr = 0;
905 /* Set the last buffer to wrap. */
907 bdp->cbd_sc |= BD_SC_WRAP;
913 fec_enet_open(struct net_device *dev)
915 struct fec_enet_private *fep = netdev_priv(dev);
918 /* I should reset the ring buffers here, but I don't yet know
919 * a simple way to do that.
922 ret = fec_enet_alloc_buffers(dev);
926 /* Probe and connect to PHY when open the interface */
927 ret = fec_enet_mii_probe(dev);
929 fec_enet_free_buffers(dev);
932 phy_start(fep->phy_dev);
933 netif_start_queue(dev);
939 fec_enet_close(struct net_device *dev)
941 struct fec_enet_private *fep = netdev_priv(dev);
943 /* Don't know what to do yet. */
945 netif_stop_queue(dev);
949 phy_disconnect(fep->phy_dev);
951 fec_enet_free_buffers(dev);
956 /* Set or clear the multicast filter for this adaptor.
957 * Skeleton taken from sunlance driver.
958 * The CPM Ethernet implementation allows Multicast as well as individual
959 * MAC address filtering. Some of the drivers check to make sure it is
960 * a group multicast address, and discard those that are not. I guess I
961 * will do the same for now, but just remove the test if you want
962 * individual filtering as well (do the upper net layers want or support
963 * this kind of feature?).
966 #define HASH_BITS 6 /* #bits in hash */
967 #define CRC32_POLY 0xEDB88320
969 static void set_multicast_list(struct net_device *dev)
971 struct fec_enet_private *fep = netdev_priv(dev);
972 struct netdev_hw_addr *ha;
973 unsigned int i, bit, data, crc, tmp;
976 if (dev->flags & IFF_PROMISC) {
977 tmp = readl(fep->hwp + FEC_R_CNTRL);
979 writel(tmp, fep->hwp + FEC_R_CNTRL);
983 tmp = readl(fep->hwp + FEC_R_CNTRL);
985 writel(tmp, fep->hwp + FEC_R_CNTRL);
987 if (dev->flags & IFF_ALLMULTI) {
988 /* Catch all multicast addresses, so set the
991 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
992 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
997 /* Clear filter and add the addresses in hash register
999 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1000 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1002 netdev_for_each_mc_addr(ha, dev) {
1003 /* Only support group multicast for now */
1004 if (!(ha->addr[0] & 1))
1007 /* calculate crc32 value of mac address */
1010 for (i = 0; i < dev->addr_len; i++) {
1012 for (bit = 0; bit < 8; bit++, data >>= 1) {
1014 (((crc ^ data) & 1) ? CRC32_POLY : 0);
1018 /* only upper 6 bits (HASH_BITS) are used
1019 * which point to specific bit in he hash registers
1021 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
1024 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1025 tmp |= 1 << (hash - 32);
1026 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1028 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1030 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1035 /* Set a MAC change in hardware. */
1037 fec_set_mac_address(struct net_device *dev, void *p)
1039 struct fec_enet_private *fep = netdev_priv(dev);
1040 struct sockaddr *addr = p;
1042 if (!is_valid_ether_addr(addr->sa_data))
1043 return -EADDRNOTAVAIL;
1045 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1047 writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
1048 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
1049 fep->hwp + FEC_ADDR_LOW);
1050 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
1051 fep->hwp + FEC_ADDR_HIGH);
1055 static const struct net_device_ops fec_netdev_ops = {
1056 .ndo_open = fec_enet_open,
1057 .ndo_stop = fec_enet_close,
1058 .ndo_start_xmit = fec_enet_start_xmit,
1059 .ndo_set_multicast_list = set_multicast_list,
1060 .ndo_change_mtu = eth_change_mtu,
1061 .ndo_validate_addr = eth_validate_addr,
1062 .ndo_tx_timeout = fec_timeout,
1063 .ndo_set_mac_address = fec_set_mac_address,
1064 .ndo_do_ioctl = fec_enet_ioctl,
1068 * XXX: We need to clean up on failure exits here.
1070 * index is only used in legacy code
1072 static int fec_enet_init(struct net_device *dev, int index)
1074 struct fec_enet_private *fep = netdev_priv(dev);
1075 struct bufdesc *cbd_base;
1076 struct bufdesc *bdp;
1079 /* Allocate memory for buffer descriptors. */
1080 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
1083 printk("FEC: allocate descriptor memory failed?\n");
1087 spin_lock_init(&fep->hw_lock);
1090 fep->hwp = (void __iomem *)dev->base_addr;
1093 /* Set the Ethernet address */
1099 l = readl(fep->hwp + FEC_ADDR_LOW);
1100 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
1101 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
1102 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
1103 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
1104 l = readl(fep->hwp + FEC_ADDR_HIGH);
1105 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
1106 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
1110 /* Set receive and transmit descriptor base. */
1111 fep->rx_bd_base = cbd_base;
1112 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1114 /* The FEC Ethernet specific entries in the device structure */
1115 dev->watchdog_timeo = TX_TIMEOUT;
1116 dev->netdev_ops = &fec_netdev_ops;
1117 dev->ethtool_ops = &fec_enet_ethtool_ops;
1119 /* Initialize the receive buffer descriptors. */
1120 bdp = fep->rx_bd_base;
1121 for (i = 0; i < RX_RING_SIZE; i++) {
1123 /* Initialize the BD for every fragment in the page. */
1128 /* Set the last buffer to wrap */
1130 bdp->cbd_sc |= BD_SC_WRAP;
1132 /* ...and the same for transmit */
1133 bdp = fep->tx_bd_base;
1134 for (i = 0; i < TX_RING_SIZE; i++) {
1136 /* Initialize the BD for every fragment in the page. */
1138 bdp->cbd_bufaddr = 0;
1142 /* Set the last buffer to wrap */
1144 bdp->cbd_sc |= BD_SC_WRAP;
1146 fec_restart(dev, 0);
1151 /* This function is called to start or restart the FEC during a link
1152 * change. This only happens when switching between half and full
1156 fec_restart(struct net_device *dev, int duplex)
1158 struct fec_enet_private *fep = netdev_priv(dev);
1161 /* Whack a reset. We should wait for this. */
1162 writel(1, fep->hwp + FEC_ECNTRL);
1165 /* Clear any outstanding interrupt. */
1166 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1168 /* Reset all multicast. */
1169 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1170 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1171 #ifndef CONFIG_M5272
1172 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1173 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1176 /* Set maximum receive buffer size. */
1177 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1179 /* Set receive and transmit descriptor base. */
1180 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1181 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
1182 fep->hwp + FEC_X_DES_START);
1184 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1185 fep->cur_rx = fep->rx_bd_base;
1187 /* Reset SKB transmit buffers. */
1188 fep->skb_cur = fep->skb_dirty = 0;
1189 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
1190 if (fep->tx_skbuff[i]) {
1191 dev_kfree_skb_any(fep->tx_skbuff[i]);
1192 fep->tx_skbuff[i] = NULL;
1196 /* Enable MII mode */
1198 /* MII enable / FD enable */
1199 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1200 writel(0x04, fep->hwp + FEC_X_CNTRL);
1202 /* MII enable / No Rcv on Xmit */
1203 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
1204 writel(0x0, fep->hwp + FEC_X_CNTRL);
1206 fep->full_duplex = duplex;
1209 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1211 #ifdef FEC_MIIGSK_ENR
1212 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
1213 /* disable the gasket and wait */
1214 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1215 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1218 /* configure the gasket: RMII, 50 MHz, no loopback, no echo */
1219 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
1221 /* re-enable the gasket */
1222 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1226 /* And last, enable the transmit and receive processing */
1227 writel(2, fep->hwp + FEC_ECNTRL);
1228 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1230 /* Enable interrupts we wish to service */
1231 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1235 fec_stop(struct net_device *dev)
1237 struct fec_enet_private *fep = netdev_priv(dev);
1239 /* We cannot expect a graceful transmit stop without link !!! */
1241 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1243 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1244 printk("fec_stop : Graceful transmit stop did not complete !\n");
1247 /* Whack a reset. We should wait for this. */
1248 writel(1, fep->hwp + FEC_ECNTRL);
1250 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1251 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1254 static int __devinit
1255 fec_probe(struct platform_device *pdev)
1257 struct fec_enet_private *fep;
1258 struct fec_platform_data *pdata;
1259 struct net_device *ndev;
1260 int i, irq, ret = 0;
1263 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1267 r = request_mem_region(r->start, resource_size(r), pdev->name);
1271 /* Init network device */
1272 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1276 SET_NETDEV_DEV(ndev, &pdev->dev);
1278 /* setup board info structure */
1279 fep = netdev_priv(ndev);
1280 memset(fep, 0, sizeof(*fep));
1282 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
1285 if (!ndev->base_addr) {
1287 goto failed_ioremap;
1290 platform_set_drvdata(pdev, ndev);
1292 pdata = pdev->dev.platform_data;
1294 fep->phy_interface = pdata->phy;
1296 /* This device has up to three irqs on some platforms */
1297 for (i = 0; i < 3; i++) {
1298 irq = platform_get_irq(pdev, i);
1301 ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
1304 irq = platform_get_irq(pdev, i);
1305 free_irq(irq, ndev);
1312 fep->clk = clk_get(&pdev->dev, "fec_clk");
1313 if (IS_ERR(fep->clk)) {
1314 ret = PTR_ERR(fep->clk);
1317 clk_enable(fep->clk);
1319 ret = fec_enet_init(ndev, 0);
1323 ret = fec_enet_mii_init(pdev);
1325 goto failed_mii_init;
1327 /* Carrier starts down, phylib will bring it up */
1328 netif_carrier_off(ndev);
1330 ret = register_netdev(ndev);
1332 goto failed_register;
1337 fec_enet_mii_remove(fep);
1340 clk_disable(fep->clk);
1343 for (i = 0; i < 3; i++) {
1344 irq = platform_get_irq(pdev, i);
1346 free_irq(irq, ndev);
1349 iounmap((void __iomem *)ndev->base_addr);
1356 static int __devexit
1357 fec_drv_remove(struct platform_device *pdev)
1359 struct net_device *ndev = platform_get_drvdata(pdev);
1360 struct fec_enet_private *fep = netdev_priv(ndev);
1362 platform_set_drvdata(pdev, NULL);
1365 fec_enet_mii_remove(fep);
1366 clk_disable(fep->clk);
1368 iounmap((void __iomem *)ndev->base_addr);
1369 unregister_netdev(ndev);
1376 fec_suspend(struct device *dev)
1378 struct net_device *ndev = dev_get_drvdata(dev);
1379 struct fec_enet_private *fep;
1382 fep = netdev_priv(ndev);
1383 if (netif_running(ndev))
1384 fec_enet_close(ndev);
1385 clk_disable(fep->clk);
1391 fec_resume(struct device *dev)
1393 struct net_device *ndev = dev_get_drvdata(dev);
1394 struct fec_enet_private *fep;
1397 fep = netdev_priv(ndev);
1398 clk_enable(fep->clk);
1399 if (netif_running(ndev))
1400 fec_enet_open(ndev);
1405 static const struct dev_pm_ops fec_pm_ops = {
1406 .suspend = fec_suspend,
1407 .resume = fec_resume,
1408 .freeze = fec_suspend,
1410 .poweroff = fec_suspend,
1411 .restore = fec_resume,
1415 static struct platform_driver fec_driver = {
1418 .owner = THIS_MODULE,
1424 .remove = __devexit_p(fec_drv_remove),
1428 fec_enet_module_init(void)
1430 printk(KERN_INFO "FEC Ethernet Driver\n");
1432 return platform_driver_register(&fec_driver);
1436 fec_enet_cleanup(void)
1438 platform_driver_unregister(&fec_driver);
1441 module_exit(fec_enet_cleanup);
1442 module_init(fec_enet_module_init);
1444 MODULE_LICENSE("GPL");