2 * Ethernet driver for the WIZnet W5100 chip.
4 * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
5 * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
7 * Licensed under the GPL-2 or later.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/kconfig.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/platform_device.h>
16 #include <linux/platform_data/wiznet.h>
17 #include <linux/ethtool.h>
18 #include <linux/skbuff.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
25 #include <linux/ioport.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/gpio.h>
32 #define DRV_NAME "w5100"
33 #define DRV_VERSION "2012-04-04"
35 MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver v"DRV_VERSION);
36 MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
37 MODULE_ALIAS("platform:"DRV_NAME);
38 MODULE_LICENSE("GPL");
43 #define W5100_COMMON_REGS 0x0000
44 #define W5100_MR 0x0000 /* Mode Register */
45 #define MR_RST 0x80 /* S/W reset */
46 #define MR_PB 0x10 /* Ping block */
47 #define MR_AI 0x02 /* Address Auto-Increment */
48 #define MR_IND 0x01 /* Indirect mode */
49 #define W5100_SHAR 0x0009 /* Source MAC address */
50 #define W5100_IR 0x0015 /* Interrupt Register */
51 #define W5100_IMR 0x0016 /* Interrupt Mask Register */
52 #define IR_S0 0x01 /* S0 interrupt */
53 #define W5100_RTR 0x0017 /* Retry Time-value Register */
54 #define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
55 #define W5100_RMSR 0x001a /* Receive Memory Size */
56 #define W5100_TMSR 0x001b /* Transmit Memory Size */
57 #define W5100_COMMON_REGS_LEN 0x0040
59 #define W5100_S0_REGS 0x0400
60 #define W5100_S0_MR 0x0400 /* S0 Mode Register */
61 #define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscuous) */
62 #define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */
63 #define W5100_S0_CR 0x0401 /* S0 Command Register */
64 #define S0_CR_OPEN 0x01 /* OPEN command */
65 #define S0_CR_CLOSE 0x10 /* CLOSE command */
66 #define S0_CR_SEND 0x20 /* SEND command */
67 #define S0_CR_RECV 0x40 /* RECV command */
68 #define W5100_S0_IR 0x0402 /* S0 Interrupt Register */
69 #define S0_IR_SENDOK 0x10 /* complete sending */
70 #define S0_IR_RECV 0x04 /* receiving data */
71 #define W5100_S0_SR 0x0403 /* S0 Status Register */
72 #define S0_SR_MACRAW 0x42 /* mac raw mode */
73 #define W5100_S0_TX_FSR 0x0420 /* S0 Transmit free memory size */
74 #define W5100_S0_TX_RD 0x0422 /* S0 Transmit memory read pointer */
75 #define W5100_S0_TX_WR 0x0424 /* S0 Transmit memory write pointer */
76 #define W5100_S0_RX_RSR 0x0426 /* S0 Receive free memory size */
77 #define W5100_S0_RX_RD 0x0428 /* S0 Receive memory read pointer */
78 #define W5100_S0_REGS_LEN 0x0040
80 #define W5100_TX_MEM_START 0x4000
81 #define W5100_TX_MEM_SIZE 0x2000
82 #define W5100_RX_MEM_START 0x6000
83 #define W5100_RX_MEM_SIZE 0x2000
86 * Device driver private data structure
90 const struct w5100_ops *ops;
95 struct napi_struct napi;
96 struct net_device *ndev;
100 struct workqueue_struct *xfer_wq;
101 struct work_struct rx_work;
102 struct sk_buff *tx_skb;
103 struct work_struct tx_work;
104 struct work_struct setrx_work;
105 struct work_struct restart_work;
108 /************************************************************************
110 * Lowlevel I/O functions
112 ***********************************************************************/
114 struct w5100_mmio_priv {
116 /* Serialize access in indirect address mode */
120 static inline struct w5100_mmio_priv *w5100_mmio_priv(struct net_device *dev)
122 return w5100_ops_priv(dev);
125 static inline void __iomem *w5100_mmio(struct net_device *ndev)
127 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
129 return mmio_priv->base;
133 * In direct address mode host system can directly access W5100 registers
134 * after mapping to Memory-Mapped I/O space.
136 * 0x8000 bytes are required for memory space.
138 static inline int w5100_read_direct(struct net_device *ndev, u16 addr)
140 return ioread8(w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
143 static inline int __w5100_write_direct(struct net_device *ndev, u16 addr,
146 iowrite8(data, w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
151 static inline int w5100_write_direct(struct net_device *ndev, u16 addr, u8 data)
153 __w5100_write_direct(ndev, addr, data);
159 static int w5100_read16_direct(struct net_device *ndev, u16 addr)
162 data = w5100_read_direct(ndev, addr) << 8;
163 data |= w5100_read_direct(ndev, addr + 1);
167 static int w5100_write16_direct(struct net_device *ndev, u16 addr, u16 data)
169 __w5100_write_direct(ndev, addr, data >> 8);
170 __w5100_write_direct(ndev, addr + 1, data);
176 static int w5100_readbulk_direct(struct net_device *ndev, u16 addr, u8 *buf,
181 for (i = 0; i < len; i++, addr++)
182 *buf++ = w5100_read_direct(ndev, addr);
187 static int w5100_writebulk_direct(struct net_device *ndev, u16 addr,
188 const u8 *buf, int len)
192 for (i = 0; i < len; i++, addr++)
193 __w5100_write_direct(ndev, addr, *buf++);
200 static int w5100_mmio_init(struct net_device *ndev)
202 struct platform_device *pdev = to_platform_device(ndev->dev.parent);
203 struct w5100_priv *priv = netdev_priv(ndev);
204 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
205 struct resource *mem;
207 spin_lock_init(&mmio_priv->reg_lock);
209 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
210 mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem);
211 if (IS_ERR(mmio_priv->base))
212 return PTR_ERR(mmio_priv->base);
214 netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq);
219 static const struct w5100_ops w5100_mmio_direct_ops = {
220 .read = w5100_read_direct,
221 .write = w5100_write_direct,
222 .read16 = w5100_read16_direct,
223 .write16 = w5100_write16_direct,
224 .readbulk = w5100_readbulk_direct,
225 .writebulk = w5100_writebulk_direct,
226 .init = w5100_mmio_init,
230 * In indirect address mode host system indirectly accesses registers by
231 * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
232 * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
233 * Mode Register (MR) is directly accessible.
235 * Only 0x04 bytes are required for memory space.
237 #define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */
238 #define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */
240 static int w5100_read_indirect(struct net_device *ndev, u16 addr)
242 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
246 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
247 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
248 data = w5100_read_direct(ndev, W5100_IDM_DR);
249 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
254 static int w5100_write_indirect(struct net_device *ndev, u16 addr, u8 data)
256 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
259 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
260 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
261 w5100_write_direct(ndev, W5100_IDM_DR, data);
262 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
267 static int w5100_read16_indirect(struct net_device *ndev, u16 addr)
269 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
273 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
274 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
275 data = w5100_read_direct(ndev, W5100_IDM_DR) << 8;
276 data |= w5100_read_direct(ndev, W5100_IDM_DR);
277 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
282 static int w5100_write16_indirect(struct net_device *ndev, u16 addr, u16 data)
284 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
287 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
288 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
289 __w5100_write_direct(ndev, W5100_IDM_DR, data >> 8);
290 w5100_write_direct(ndev, W5100_IDM_DR, data);
291 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
296 static int w5100_readbulk_indirect(struct net_device *ndev, u16 addr, u8 *buf,
299 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
303 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
304 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
306 for (i = 0; i < len; i++)
307 *buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
310 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
315 static int w5100_writebulk_indirect(struct net_device *ndev, u16 addr,
316 const u8 *buf, int len)
318 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
322 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
323 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
325 for (i = 0; i < len; i++)
326 __w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
329 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
334 static int w5100_reset_indirect(struct net_device *ndev)
336 w5100_write_direct(ndev, W5100_MR, MR_RST);
338 w5100_write_direct(ndev, W5100_MR, MR_PB | MR_AI | MR_IND);
343 static const struct w5100_ops w5100_mmio_indirect_ops = {
344 .read = w5100_read_indirect,
345 .write = w5100_write_indirect,
346 .read16 = w5100_read16_indirect,
347 .write16 = w5100_write16_indirect,
348 .readbulk = w5100_readbulk_indirect,
349 .writebulk = w5100_writebulk_indirect,
350 .init = w5100_mmio_init,
351 .reset = w5100_reset_indirect,
354 #if defined(CONFIG_WIZNET_BUS_DIRECT)
356 static int w5100_read(struct w5100_priv *priv, u16 addr)
358 return w5100_read_direct(priv->ndev, addr);
361 static int w5100_write(struct w5100_priv *priv, u16 addr, u8 data)
363 return w5100_write_direct(priv->ndev, addr, data);
366 static int w5100_read16(struct w5100_priv *priv, u16 addr)
368 return w5100_read16_direct(priv->ndev, addr);
371 static int w5100_write16(struct w5100_priv *priv, u16 addr, u16 data)
373 return w5100_write16_direct(priv->ndev, addr, data);
376 static int w5100_readbulk(struct w5100_priv *priv, u16 addr, u8 *buf, int len)
378 return w5100_readbulk_direct(priv->ndev, addr, buf, len);
381 static int w5100_writebulk(struct w5100_priv *priv, u16 addr, const u8 *buf,
384 return w5100_writebulk_direct(priv->ndev, addr, buf, len);
387 #elif defined(CONFIG_WIZNET_BUS_INDIRECT)
389 static int w5100_read(struct w5100_priv *priv, u16 addr)
391 return w5100_read_indirect(priv->ndev, addr);
394 static int w5100_write(struct w5100_priv *priv, u16 addr, u8 data)
396 return w5100_write_indirect(priv->ndev, addr, data);
399 static int w5100_read16(struct w5100_priv *priv, u16 addr)
401 return w5100_read16_indirect(priv->ndev, addr);
404 static int w5100_write16(struct w5100_priv *priv, u16 addr, u16 data)
406 return w5100_write16_indirect(priv->ndev, addr, data);
409 static int w5100_readbulk(struct w5100_priv *priv, u16 addr, u8 *buf, int len)
411 return w5100_readbulk_indirect(priv->ndev, addr, buf, len);
414 static int w5100_writebulk(struct w5100_priv *priv, u16 addr, const u8 *buf,
417 return w5100_writebulk_indirect(priv->ndev, addr, buf, len);
420 #else /* CONFIG_WIZNET_BUS_ANY */
422 static int w5100_read(struct w5100_priv *priv, u16 addr)
424 return priv->ops->read(priv->ndev, addr);
427 static int w5100_write(struct w5100_priv *priv, u16 addr, u8 data)
429 return priv->ops->write(priv->ndev, addr, data);
432 static int w5100_read16(struct w5100_priv *priv, u16 addr)
434 return priv->ops->read16(priv->ndev, addr);
437 static int w5100_write16(struct w5100_priv *priv, u16 addr, u16 data)
439 return priv->ops->write16(priv->ndev, addr, data);
442 static int w5100_readbulk(struct w5100_priv *priv, u16 addr, u8 *buf, int len)
444 return priv->ops->readbulk(priv->ndev, addr, buf, len);
447 static int w5100_writebulk(struct w5100_priv *priv, u16 addr, const u8 *buf,
450 return priv->ops->writebulk(priv->ndev, addr, buf, len);
455 static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len)
461 offset %= W5100_RX_MEM_SIZE;
462 addr = W5100_RX_MEM_START + offset;
464 if (offset + len > W5100_RX_MEM_SIZE) {
465 remain = (offset + len) % W5100_RX_MEM_SIZE;
466 len = W5100_RX_MEM_SIZE - offset;
469 ret = w5100_readbulk(priv, addr, buf, len);
473 return w5100_readbulk(priv, W5100_RX_MEM_START, buf + len, remain);
476 static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf,
483 offset %= W5100_TX_MEM_SIZE;
484 addr = W5100_TX_MEM_START + offset;
486 if (offset + len > W5100_TX_MEM_SIZE) {
487 remain = (offset + len) % W5100_TX_MEM_SIZE;
488 len = W5100_TX_MEM_SIZE - offset;
491 ret = w5100_writebulk(priv, addr, buf, len);
495 return w5100_writebulk(priv, W5100_TX_MEM_START, buf + len, remain);
498 static int w5100_reset(struct w5100_priv *priv)
500 if (priv->ops->reset)
501 return priv->ops->reset(priv->ndev);
503 w5100_write(priv, W5100_MR, MR_RST);
505 w5100_write(priv, W5100_MR, MR_PB);
510 static int w5100_command(struct w5100_priv *priv, u16 cmd)
512 unsigned long timeout;
514 w5100_write(priv, W5100_S0_CR, cmd);
516 timeout = jiffies + msecs_to_jiffies(100);
518 while (w5100_read(priv, W5100_S0_CR) != 0) {
519 if (time_after(jiffies, timeout))
527 static void w5100_write_macaddr(struct w5100_priv *priv)
529 struct net_device *ndev = priv->ndev;
531 w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN);
534 static void w5100_hw_reset(struct w5100_priv *priv)
538 w5100_write(priv, W5100_IMR, 0);
539 w5100_write_macaddr(priv);
541 /* Configure 16K of internal memory
542 * as 8K RX buffer and 8K TX buffer
544 w5100_write(priv, W5100_RMSR, 0x03);
545 w5100_write(priv, W5100_TMSR, 0x03);
548 static void w5100_hw_start(struct w5100_priv *priv)
550 w5100_write(priv, W5100_S0_MR, priv->promisc ?
551 S0_MR_MACRAW : S0_MR_MACRAW_MF);
552 w5100_command(priv, S0_CR_OPEN);
553 w5100_write(priv, W5100_IMR, IR_S0);
556 static void w5100_hw_close(struct w5100_priv *priv)
558 w5100_write(priv, W5100_IMR, 0);
559 w5100_command(priv, S0_CR_CLOSE);
562 /***********************************************************************
564 * Device driver functions / callbacks
566 ***********************************************************************/
568 static void w5100_get_drvinfo(struct net_device *ndev,
569 struct ethtool_drvinfo *info)
571 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
572 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
573 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
574 sizeof(info->bus_info));
577 static u32 w5100_get_link(struct net_device *ndev)
579 struct w5100_priv *priv = netdev_priv(ndev);
581 if (gpio_is_valid(priv->link_gpio))
582 return !!gpio_get_value(priv->link_gpio);
587 static u32 w5100_get_msglevel(struct net_device *ndev)
589 struct w5100_priv *priv = netdev_priv(ndev);
591 return priv->msg_enable;
594 static void w5100_set_msglevel(struct net_device *ndev, u32 value)
596 struct w5100_priv *priv = netdev_priv(ndev);
598 priv->msg_enable = value;
601 static int w5100_get_regs_len(struct net_device *ndev)
603 return W5100_COMMON_REGS_LEN + W5100_S0_REGS_LEN;
606 static void w5100_get_regs(struct net_device *ndev,
607 struct ethtool_regs *regs, void *buf)
609 struct w5100_priv *priv = netdev_priv(ndev);
612 w5100_readbulk(priv, W5100_COMMON_REGS, buf, W5100_COMMON_REGS_LEN);
613 buf += W5100_COMMON_REGS_LEN;
614 w5100_readbulk(priv, W5100_S0_REGS, buf, W5100_S0_REGS_LEN);
617 static void w5100_restart(struct net_device *ndev)
619 struct w5100_priv *priv = netdev_priv(ndev);
621 netif_stop_queue(ndev);
622 w5100_hw_reset(priv);
623 w5100_hw_start(priv);
624 ndev->stats.tx_errors++;
625 ndev->trans_start = jiffies;
626 netif_wake_queue(ndev);
629 static void w5100_restart_work(struct work_struct *work)
631 struct w5100_priv *priv = container_of(work, struct w5100_priv,
634 w5100_restart(priv->ndev);
637 static void w5100_tx_timeout(struct net_device *ndev)
639 struct w5100_priv *priv = netdev_priv(ndev);
641 if (priv->ops->may_sleep)
642 schedule_work(&priv->restart_work);
647 static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb)
649 struct w5100_priv *priv = netdev_priv(ndev);
652 offset = w5100_read16(priv, W5100_S0_TX_WR);
653 w5100_writebuf(priv, offset, skb->data, skb->len);
654 w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len);
655 ndev->stats.tx_bytes += skb->len;
656 ndev->stats.tx_packets++;
659 w5100_command(priv, S0_CR_SEND);
662 static void w5100_tx_work(struct work_struct *work)
664 struct w5100_priv *priv = container_of(work, struct w5100_priv,
666 struct sk_buff *skb = priv->tx_skb;
672 w5100_tx_skb(priv->ndev, skb);
675 static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
677 struct w5100_priv *priv = netdev_priv(ndev);
679 netif_stop_queue(ndev);
681 if (priv->ops->may_sleep) {
682 WARN_ON(priv->tx_skb);
684 queue_work(priv->xfer_wq, &priv->tx_work);
686 w5100_tx_skb(ndev, skb);
692 static struct sk_buff *w5100_rx_skb(struct net_device *ndev)
694 struct w5100_priv *priv = netdev_priv(ndev);
699 u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR);
704 offset = w5100_read16(priv, W5100_S0_RX_RD);
705 w5100_readbuf(priv, offset, header, 2);
706 rx_len = get_unaligned_be16(header) - 2;
708 skb = netdev_alloc_skb_ip_align(ndev, rx_len);
709 if (unlikely(!skb)) {
710 w5100_write16(priv, W5100_S0_RX_RD, offset + rx_buf_len);
711 w5100_command(priv, S0_CR_RECV);
712 ndev->stats.rx_dropped++;
716 skb_put(skb, rx_len);
717 w5100_readbuf(priv, offset + 2, skb->data, rx_len);
718 w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len);
719 w5100_command(priv, S0_CR_RECV);
720 skb->protocol = eth_type_trans(skb, ndev);
722 ndev->stats.rx_packets++;
723 ndev->stats.rx_bytes += rx_len;
728 static void w5100_rx_work(struct work_struct *work)
730 struct w5100_priv *priv = container_of(work, struct w5100_priv,
734 while ((skb = w5100_rx_skb(priv->ndev)))
737 w5100_write(priv, W5100_IMR, IR_S0);
740 static int w5100_napi_poll(struct napi_struct *napi, int budget)
742 struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
745 for (rx_count = 0; rx_count < budget; rx_count++) {
746 struct sk_buff *skb = w5100_rx_skb(priv->ndev);
749 netif_receive_skb(skb);
754 if (rx_count < budget) {
756 w5100_write(priv, W5100_IMR, IR_S0);
762 static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
764 struct net_device *ndev = ndev_instance;
765 struct w5100_priv *priv = netdev_priv(ndev);
767 int ir = w5100_read(priv, W5100_S0_IR);
770 w5100_write(priv, W5100_S0_IR, ir);
772 if (ir & S0_IR_SENDOK) {
773 netif_dbg(priv, tx_done, ndev, "tx done\n");
774 netif_wake_queue(ndev);
777 if (ir & S0_IR_RECV) {
778 w5100_write(priv, W5100_IMR, 0);
780 if (priv->ops->may_sleep)
781 queue_work(priv->xfer_wq, &priv->rx_work);
782 else if (napi_schedule_prep(&priv->napi))
783 __napi_schedule(&priv->napi);
789 static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
791 struct net_device *ndev = ndev_instance;
792 struct w5100_priv *priv = netdev_priv(ndev);
794 if (netif_running(ndev)) {
795 if (gpio_get_value(priv->link_gpio) != 0) {
796 netif_info(priv, link, ndev, "link is up\n");
797 netif_carrier_on(ndev);
799 netif_info(priv, link, ndev, "link is down\n");
800 netif_carrier_off(ndev);
807 static void w5100_setrx_work(struct work_struct *work)
809 struct w5100_priv *priv = container_of(work, struct w5100_priv,
812 w5100_hw_start(priv);
815 static void w5100_set_rx_mode(struct net_device *ndev)
817 struct w5100_priv *priv = netdev_priv(ndev);
818 bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
820 if (priv->promisc != set_promisc) {
821 priv->promisc = set_promisc;
823 if (priv->ops->may_sleep)
824 schedule_work(&priv->setrx_work);
826 w5100_hw_start(priv);
830 static int w5100_set_macaddr(struct net_device *ndev, void *addr)
832 struct w5100_priv *priv = netdev_priv(ndev);
833 struct sockaddr *sock_addr = addr;
835 if (!is_valid_ether_addr(sock_addr->sa_data))
836 return -EADDRNOTAVAIL;
837 memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
838 w5100_write_macaddr(priv);
842 static int w5100_open(struct net_device *ndev)
844 struct w5100_priv *priv = netdev_priv(ndev);
846 netif_info(priv, ifup, ndev, "enabling\n");
847 w5100_hw_start(priv);
848 napi_enable(&priv->napi);
849 netif_start_queue(ndev);
850 if (!gpio_is_valid(priv->link_gpio) ||
851 gpio_get_value(priv->link_gpio) != 0)
852 netif_carrier_on(ndev);
856 static int w5100_stop(struct net_device *ndev)
858 struct w5100_priv *priv = netdev_priv(ndev);
860 netif_info(priv, ifdown, ndev, "shutting down\n");
861 w5100_hw_close(priv);
862 netif_carrier_off(ndev);
863 netif_stop_queue(ndev);
864 napi_disable(&priv->napi);
868 static const struct ethtool_ops w5100_ethtool_ops = {
869 .get_drvinfo = w5100_get_drvinfo,
870 .get_msglevel = w5100_get_msglevel,
871 .set_msglevel = w5100_set_msglevel,
872 .get_link = w5100_get_link,
873 .get_regs_len = w5100_get_regs_len,
874 .get_regs = w5100_get_regs,
877 static const struct net_device_ops w5100_netdev_ops = {
878 .ndo_open = w5100_open,
879 .ndo_stop = w5100_stop,
880 .ndo_start_xmit = w5100_start_tx,
881 .ndo_tx_timeout = w5100_tx_timeout,
882 .ndo_set_rx_mode = w5100_set_rx_mode,
883 .ndo_set_mac_address = w5100_set_macaddr,
884 .ndo_validate_addr = eth_validate_addr,
885 .ndo_change_mtu = eth_change_mtu,
888 static int w5100_mmio_probe(struct platform_device *pdev)
890 struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
892 struct resource *mem;
893 const struct w5100_ops *ops;
896 if (data && is_valid_ether_addr(data->mac_addr))
897 mac_addr = data->mac_addr;
899 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
900 if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
901 ops = &w5100_mmio_indirect_ops;
903 ops = &w5100_mmio_direct_ops;
905 irq = platform_get_irq(pdev, 0);
909 return w5100_probe(&pdev->dev, ops, sizeof(struct w5100_mmio_priv),
910 mac_addr, irq, data ? data->link_gpio : -EINVAL);
913 static int w5100_mmio_remove(struct platform_device *pdev)
915 return w5100_remove(&pdev->dev);
918 void *w5100_ops_priv(const struct net_device *ndev)
920 return netdev_priv(ndev) +
921 ALIGN(sizeof(struct w5100_priv), NETDEV_ALIGN);
923 EXPORT_SYMBOL_GPL(w5100_ops_priv);
925 int w5100_probe(struct device *dev, const struct w5100_ops *ops,
926 int sizeof_ops_priv, u8 *mac_addr, int irq, int link_gpio)
928 struct w5100_priv *priv;
929 struct net_device *ndev;
933 alloc_size = sizeof(*priv);
934 if (sizeof_ops_priv) {
935 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
936 alloc_size += sizeof_ops_priv;
938 alloc_size += NETDEV_ALIGN - 1;
940 ndev = alloc_etherdev(alloc_size);
943 SET_NETDEV_DEV(ndev, dev);
944 dev_set_drvdata(dev, ndev);
945 priv = netdev_priv(ndev);
949 priv->link_gpio = link_gpio;
951 ndev->netdev_ops = &w5100_netdev_ops;
952 ndev->ethtool_ops = &w5100_ethtool_ops;
953 ndev->watchdog_timeo = HZ;
954 netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
956 /* This chip doesn't support VLAN packets with normal MTU,
957 * so disable VLAN for this device.
959 ndev->features |= NETIF_F_VLAN_CHALLENGED;
961 err = register_netdev(ndev);
965 priv->xfer_wq = create_workqueue(netdev_name(ndev));
966 if (!priv->xfer_wq) {
971 INIT_WORK(&priv->rx_work, w5100_rx_work);
972 INIT_WORK(&priv->tx_work, w5100_tx_work);
973 INIT_WORK(&priv->setrx_work, w5100_setrx_work);
974 INIT_WORK(&priv->restart_work, w5100_restart_work);
977 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
979 eth_hw_addr_random(ndev);
981 if (priv->ops->init) {
982 err = priv->ops->init(priv->ndev);
987 w5100_hw_reset(priv);
988 if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT) {
993 if (ops->may_sleep) {
994 err = request_threaded_irq(priv->irq, NULL, w5100_interrupt,
995 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
996 netdev_name(ndev), ndev);
998 err = request_irq(priv->irq, w5100_interrupt,
999 IRQF_TRIGGER_LOW, netdev_name(ndev), ndev);
1004 if (gpio_is_valid(priv->link_gpio)) {
1005 char *link_name = devm_kzalloc(dev, 16, GFP_KERNEL);
1011 snprintf(link_name, 16, "%s-link", netdev_name(ndev));
1012 priv->link_irq = gpio_to_irq(priv->link_gpio);
1013 if (request_any_context_irq(priv->link_irq, w5100_detect_link,
1014 IRQF_TRIGGER_RISING |
1015 IRQF_TRIGGER_FALLING,
1016 link_name, priv->ndev) < 0)
1017 priv->link_gpio = -EINVAL;
1023 free_irq(priv->irq, ndev);
1025 destroy_workqueue(priv->xfer_wq);
1027 unregister_netdev(ndev);
1032 EXPORT_SYMBOL_GPL(w5100_probe);
1034 int w5100_remove(struct device *dev)
1036 struct net_device *ndev = dev_get_drvdata(dev);
1037 struct w5100_priv *priv = netdev_priv(ndev);
1039 w5100_hw_reset(priv);
1040 free_irq(priv->irq, ndev);
1041 if (gpio_is_valid(priv->link_gpio))
1042 free_irq(priv->link_irq, ndev);
1044 flush_work(&priv->setrx_work);
1045 flush_work(&priv->restart_work);
1046 flush_workqueue(priv->xfer_wq);
1047 destroy_workqueue(priv->xfer_wq);
1049 unregister_netdev(ndev);
1053 EXPORT_SYMBOL_GPL(w5100_remove);
1055 #ifdef CONFIG_PM_SLEEP
1056 static int w5100_suspend(struct device *dev)
1058 struct net_device *ndev = dev_get_drvdata(dev);
1059 struct w5100_priv *priv = netdev_priv(ndev);
1061 if (netif_running(ndev)) {
1062 netif_carrier_off(ndev);
1063 netif_device_detach(ndev);
1065 w5100_hw_close(priv);
1070 static int w5100_resume(struct device *dev)
1072 struct net_device *ndev = dev_get_drvdata(dev);
1073 struct w5100_priv *priv = netdev_priv(ndev);
1075 if (netif_running(ndev)) {
1076 w5100_hw_reset(priv);
1077 w5100_hw_start(priv);
1079 netif_device_attach(ndev);
1080 if (!gpio_is_valid(priv->link_gpio) ||
1081 gpio_get_value(priv->link_gpio) != 0)
1082 netif_carrier_on(ndev);
1086 #endif /* CONFIG_PM_SLEEP */
1088 SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
1089 EXPORT_SYMBOL_GPL(w5100_pm_ops);
1091 static struct platform_driver w5100_mmio_driver = {
1094 .pm = &w5100_pm_ops,
1096 .probe = w5100_mmio_probe,
1097 .remove = w5100_mmio_remove,
1099 module_platform_driver(w5100_mmio_driver);