2 * SuperH Ethernet device driver
4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2009 Renesas Solutions Corp.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
23 #include <linux/init.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/etherdevice.h>
26 #include <linux/delay.h>
27 #include <linux/platform_device.h>
28 #include <linux/mdio-bitbang.h>
29 #include <linux/netdevice.h>
30 #include <linux/phy.h>
31 #include <linux/cache.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/slab.h>
35 #include <linux/ethtool.h>
36 #include <asm/cacheflush.h>
40 #define SH_ETH_DEF_MSG_ENABLE \
46 /* There is CPU dependent code */
47 #if defined(CONFIG_CPU_SUBTYPE_SH7724)
48 #define SH_ETH_RESET_DEFAULT 1
49 static void sh_eth_set_duplex(struct net_device *ndev)
51 struct sh_eth_private *mdp = netdev_priv(ndev);
53 if (mdp->duplex) /* Full */
54 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
56 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
59 static void sh_eth_set_rate(struct net_device *ndev)
61 struct sh_eth_private *mdp = netdev_priv(ndev);
65 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
67 case 100:/* 100BASE */
68 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
76 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
77 .set_duplex = sh_eth_set_duplex,
78 .set_rate = sh_eth_set_rate,
80 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
81 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
82 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
84 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
85 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
86 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
87 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
94 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
96 #elif defined(CONFIG_CPU_SUBTYPE_SH7757)
97 #define SH_ETH_RESET_DEFAULT 1
98 static void sh_eth_set_duplex(struct net_device *ndev)
100 struct sh_eth_private *mdp = netdev_priv(ndev);
102 if (mdp->duplex) /* Full */
103 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
105 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
108 static void sh_eth_set_rate(struct net_device *ndev)
110 struct sh_eth_private *mdp = netdev_priv(ndev);
112 switch (mdp->speed) {
113 case 10: /* 10BASE */
114 sh_eth_write(ndev, 0, RTRATE);
116 case 100:/* 100BASE */
117 sh_eth_write(ndev, 1, RTRATE);
125 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
126 .set_duplex = sh_eth_set_duplex,
127 .set_rate = sh_eth_set_rate,
129 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
130 .rmcr_value = 0x00000001,
132 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
133 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
134 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
135 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
144 #elif defined(CONFIG_CPU_SUBTYPE_SH7763)
145 #define SH_ETH_HAS_TSU 1
146 static void sh_eth_chip_reset(struct net_device *ndev)
148 struct sh_eth_private *mdp = netdev_priv(ndev);
151 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
155 static void sh_eth_reset(struct net_device *ndev)
159 sh_eth_write(ndev, EDSR_ENALL, EDSR);
160 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST, EDMR);
162 if (!(sh_eth_read(ndev, EDMR) & 0x3))
168 printk(KERN_ERR "Device reset fail\n");
171 sh_eth_write(ndev, 0x0, TDLAR);
172 sh_eth_write(ndev, 0x0, TDFAR);
173 sh_eth_write(ndev, 0x0, TDFXR);
174 sh_eth_write(ndev, 0x0, TDFFR);
175 sh_eth_write(ndev, 0x0, RDLAR);
176 sh_eth_write(ndev, 0x0, RDFAR);
177 sh_eth_write(ndev, 0x0, RDFXR);
178 sh_eth_write(ndev, 0x0, RDFFR);
181 static void sh_eth_set_duplex(struct net_device *ndev)
183 struct sh_eth_private *mdp = netdev_priv(ndev);
185 if (mdp->duplex) /* Full */
186 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
188 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
191 static void sh_eth_set_rate(struct net_device *ndev)
193 struct sh_eth_private *mdp = netdev_priv(ndev);
195 switch (mdp->speed) {
196 case 10: /* 10BASE */
197 sh_eth_write(ndev, GECMR_10, GECMR);
199 case 100:/* 100BASE */
200 sh_eth_write(ndev, GECMR_100, GECMR);
202 case 1000: /* 1000BASE */
203 sh_eth_write(ndev, GECMR_1000, GECMR);
211 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
212 .chip_reset = sh_eth_chip_reset,
213 .set_duplex = sh_eth_set_duplex,
214 .set_rate = sh_eth_set_rate,
216 .ecsr_value = ECSR_ICD | ECSR_MPD,
217 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
218 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
220 .tx_check = EESR_TC1 | EESR_FTC,
221 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
222 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
224 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
237 #elif defined(CONFIG_CPU_SUBTYPE_SH7619)
238 #define SH_ETH_RESET_DEFAULT 1
239 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
240 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
247 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
248 #define SH_ETH_RESET_DEFAULT 1
249 #define SH_ETH_HAS_TSU 1
250 static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
251 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
256 static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
259 cd->ecsr_value = DEFAULT_ECSR_INIT;
261 if (!cd->ecsipr_value)
262 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
264 if (!cd->fcftr_value)
265 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
266 DEFAULT_FIFO_F_D_RFD;
269 cd->fdr_value = DEFAULT_FDR_INIT;
272 cd->rmcr_value = DEFAULT_RMCR_VALUE;
275 cd->tx_check = DEFAULT_TX_CHECK;
277 if (!cd->eesr_err_check)
278 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
280 if (!cd->tx_error_check)
281 cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
284 #if defined(SH_ETH_RESET_DEFAULT)
286 static void sh_eth_reset(struct net_device *ndev)
288 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST, EDMR);
290 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST, EDMR);
294 #if defined(CONFIG_CPU_SH4)
295 static void sh_eth_set_receive_align(struct sk_buff *skb)
299 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
301 skb_reserve(skb, reserve);
304 static void sh_eth_set_receive_align(struct sk_buff *skb)
306 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
311 /* CPU <-> EDMAC endian convert */
312 static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
314 switch (mdp->edmac_endian) {
315 case EDMAC_LITTLE_ENDIAN:
316 return cpu_to_le32(x);
317 case EDMAC_BIG_ENDIAN:
318 return cpu_to_be32(x);
323 static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
325 switch (mdp->edmac_endian) {
326 case EDMAC_LITTLE_ENDIAN:
327 return le32_to_cpu(x);
328 case EDMAC_BIG_ENDIAN:
329 return be32_to_cpu(x);
335 * Program the hardware MAC address from dev->dev_addr.
337 static void update_mac_address(struct net_device *ndev)
340 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
341 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
343 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
347 * Get MAC address from SuperH MAC address register
349 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
350 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
351 * When you want use this device, you must set MAC address in bootloader.
354 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
356 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
357 memcpy(ndev->dev_addr, mac, 6);
359 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
360 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
361 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
362 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
363 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
364 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
369 struct mdiobb_ctrl ctrl;
371 u32 mmd_msk;/* MMD */
378 static void bb_set(u32 addr, u32 msk)
380 writel(readl(addr) | msk, addr);
384 static void bb_clr(u32 addr, u32 msk)
386 writel((readl(addr) & ~msk), addr);
390 static int bb_read(u32 addr, u32 msk)
392 return (readl(addr) & msk) != 0;
395 /* Data I/O pin control */
396 static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
398 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
400 bb_set(bitbang->addr, bitbang->mmd_msk);
402 bb_clr(bitbang->addr, bitbang->mmd_msk);
406 static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
408 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
411 bb_set(bitbang->addr, bitbang->mdo_msk);
413 bb_clr(bitbang->addr, bitbang->mdo_msk);
417 static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
419 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
420 return bb_read(bitbang->addr, bitbang->mdi_msk);
423 /* MDC pin control */
424 static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
426 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
429 bb_set(bitbang->addr, bitbang->mdc_msk);
431 bb_clr(bitbang->addr, bitbang->mdc_msk);
434 /* mdio bus control struct */
435 static struct mdiobb_ops bb_ops = {
436 .owner = THIS_MODULE,
437 .set_mdc = sh_mdc_ctrl,
438 .set_mdio_dir = sh_mmd_ctrl,
439 .set_mdio_data = sh_set_mdio,
440 .get_mdio_data = sh_get_mdio,
443 /* free skb and descriptor buffer */
444 static void sh_eth_ring_free(struct net_device *ndev)
446 struct sh_eth_private *mdp = netdev_priv(ndev);
449 /* Free Rx skb ringbuffer */
450 if (mdp->rx_skbuff) {
451 for (i = 0; i < RX_RING_SIZE; i++) {
452 if (mdp->rx_skbuff[i])
453 dev_kfree_skb(mdp->rx_skbuff[i]);
456 kfree(mdp->rx_skbuff);
458 /* Free Tx skb ringbuffer */
459 if (mdp->tx_skbuff) {
460 for (i = 0; i < TX_RING_SIZE; i++) {
461 if (mdp->tx_skbuff[i])
462 dev_kfree_skb(mdp->tx_skbuff[i]);
465 kfree(mdp->tx_skbuff);
468 /* format skb and descriptor buffer */
469 static void sh_eth_ring_format(struct net_device *ndev)
471 struct sh_eth_private *mdp = netdev_priv(ndev);
474 struct sh_eth_rxdesc *rxdesc = NULL;
475 struct sh_eth_txdesc *txdesc = NULL;
476 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
477 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
479 mdp->cur_rx = mdp->cur_tx = 0;
480 mdp->dirty_rx = mdp->dirty_tx = 0;
482 memset(mdp->rx_ring, 0, rx_ringsize);
484 /* build Rx ring buffer */
485 for (i = 0; i < RX_RING_SIZE; i++) {
487 mdp->rx_skbuff[i] = NULL;
488 skb = dev_alloc_skb(mdp->rx_buf_sz);
489 mdp->rx_skbuff[i] = skb;
492 dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
494 skb->dev = ndev; /* Mark as being used by this device. */
495 sh_eth_set_receive_align(skb);
498 rxdesc = &mdp->rx_ring[i];
499 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
500 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
502 /* The size of the buffer is 16 byte boundary. */
503 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
504 /* Rx descriptor address set */
506 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
507 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
508 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
513 mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
515 /* Mark the last entry as wrapping the ring. */
516 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
518 memset(mdp->tx_ring, 0, tx_ringsize);
520 /* build Tx ring buffer */
521 for (i = 0; i < TX_RING_SIZE; i++) {
522 mdp->tx_skbuff[i] = NULL;
523 txdesc = &mdp->tx_ring[i];
524 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
525 txdesc->buffer_length = 0;
527 /* Tx descriptor address set */
528 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
529 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
530 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
535 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
538 /* Get skb and descriptor buffer */
539 static int sh_eth_ring_init(struct net_device *ndev)
541 struct sh_eth_private *mdp = netdev_priv(ndev);
542 int rx_ringsize, tx_ringsize, ret = 0;
545 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
546 * card needs room to do 8 byte alignment, +2 so we can reserve
547 * the first 2 bytes, and +16 gets room for the status word from the
550 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
551 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
553 mdp->rx_buf_sz += NET_IP_ALIGN;
555 /* Allocate RX and TX skb rings */
556 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
558 if (!mdp->rx_skbuff) {
559 dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
564 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
566 if (!mdp->tx_skbuff) {
567 dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
572 /* Allocate all Rx descriptors. */
573 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
574 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
578 dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
586 /* Allocate all Tx descriptors. */
587 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
588 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
591 dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
599 /* free DMA buffer */
600 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
603 /* Free Rx and Tx skb ring buffer */
604 sh_eth_ring_free(ndev);
609 static int sh_eth_dev_init(struct net_device *ndev)
612 struct sh_eth_private *mdp = netdev_priv(ndev);
613 u_int32_t rx_int_var, tx_int_var;
619 /* Descriptor format */
620 sh_eth_ring_format(ndev);
622 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
624 /* all sh_eth int mask */
625 sh_eth_write(ndev, 0, EESIPR);
627 #if defined(__LITTLE_ENDIAN__)
628 if (mdp->cd->hw_swap)
629 sh_eth_write(ndev, EDMR_EL, EDMR);
632 sh_eth_write(ndev, 0, EDMR);
635 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
636 sh_eth_write(ndev, 0, TFTR);
638 /* Frame recv control */
639 sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
641 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
642 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
643 sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
646 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
648 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
650 if (!mdp->cd->no_trimd)
651 sh_eth_write(ndev, 0, TRIMD);
653 /* Recv frame limit set register */
654 sh_eth_write(ndev, RFLR_VALUE, RFLR);
656 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
657 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
659 /* PAUSE Prohibition */
660 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
661 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
663 sh_eth_write(ndev, val, ECMR);
665 if (mdp->cd->set_rate)
666 mdp->cd->set_rate(ndev);
668 /* E-MAC Status Register clear */
669 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
671 /* E-MAC Interrupt Enable register */
672 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
674 /* Set MAC address */
675 update_mac_address(ndev);
679 sh_eth_write(ndev, APR_AP, APR);
681 sh_eth_write(ndev, MPR_MP, MPR);
682 if (mdp->cd->tpauser)
683 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
685 /* Setting the Rx mode will start the Rx process. */
686 sh_eth_write(ndev, EDRRR_R, EDRRR);
688 netif_start_queue(ndev);
693 /* free Tx skb function */
694 static int sh_eth_txfree(struct net_device *ndev)
696 struct sh_eth_private *mdp = netdev_priv(ndev);
697 struct sh_eth_txdesc *txdesc;
701 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
702 entry = mdp->dirty_tx % TX_RING_SIZE;
703 txdesc = &mdp->tx_ring[entry];
704 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
706 /* Free the original skb. */
707 if (mdp->tx_skbuff[entry]) {
708 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
709 mdp->tx_skbuff[entry] = NULL;
712 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
713 if (entry >= TX_RING_SIZE - 1)
714 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
716 mdp->stats.tx_packets++;
717 mdp->stats.tx_bytes += txdesc->buffer_length;
722 /* Packet receive function */
723 static int sh_eth_rx(struct net_device *ndev)
725 struct sh_eth_private *mdp = netdev_priv(ndev);
726 struct sh_eth_rxdesc *rxdesc;
728 int entry = mdp->cur_rx % RX_RING_SIZE;
729 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
734 rxdesc = &mdp->rx_ring[entry];
735 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
736 desc_status = edmac_to_cpu(mdp, rxdesc->status);
737 pkt_len = rxdesc->frame_length;
742 if (!(desc_status & RDFEND))
743 mdp->stats.rx_length_errors++;
745 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
746 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
747 mdp->stats.rx_errors++;
748 if (desc_status & RD_RFS1)
749 mdp->stats.rx_crc_errors++;
750 if (desc_status & RD_RFS2)
751 mdp->stats.rx_frame_errors++;
752 if (desc_status & RD_RFS3)
753 mdp->stats.rx_length_errors++;
754 if (desc_status & RD_RFS4)
755 mdp->stats.rx_length_errors++;
756 if (desc_status & RD_RFS6)
757 mdp->stats.rx_missed_errors++;
758 if (desc_status & RD_RFS10)
759 mdp->stats.rx_over_errors++;
761 if (!mdp->cd->hw_swap)
763 phys_to_virt(ALIGN(rxdesc->addr, 4)),
765 skb = mdp->rx_skbuff[entry];
766 mdp->rx_skbuff[entry] = NULL;
768 skb_reserve(skb, NET_IP_ALIGN);
769 skb_put(skb, pkt_len);
770 skb->protocol = eth_type_trans(skb, ndev);
772 mdp->stats.rx_packets++;
773 mdp->stats.rx_bytes += pkt_len;
775 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
776 entry = (++mdp->cur_rx) % RX_RING_SIZE;
777 rxdesc = &mdp->rx_ring[entry];
780 /* Refill the Rx ring buffers. */
781 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
782 entry = mdp->dirty_rx % RX_RING_SIZE;
783 rxdesc = &mdp->rx_ring[entry];
784 /* The size of the buffer is 16 byte boundary. */
785 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
787 if (mdp->rx_skbuff[entry] == NULL) {
788 skb = dev_alloc_skb(mdp->rx_buf_sz);
789 mdp->rx_skbuff[entry] = skb;
791 break; /* Better luck next round. */
792 dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
795 sh_eth_set_receive_align(skb);
797 skb_checksum_none_assert(skb);
798 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
800 if (entry >= RX_RING_SIZE - 1)
802 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
805 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
808 /* Restart Rx engine if stopped. */
809 /* If we don't need to check status, don't. -KDU */
810 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
811 sh_eth_write(ndev, EDRRR_R, EDRRR);
816 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
818 /* disable tx and rx */
819 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
820 ~(ECMR_RE | ECMR_TE), ECMR);
823 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
825 /* enable tx and rx */
826 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
827 (ECMR_RE | ECMR_TE), ECMR);
830 /* error control function */
831 static void sh_eth_error(struct net_device *ndev, int intr_status)
833 struct sh_eth_private *mdp = netdev_priv(ndev);
838 if (intr_status & EESR_ECI) {
839 felic_stat = sh_eth_read(ndev, ECSR);
840 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
841 if (felic_stat & ECSR_ICD)
842 mdp->stats.tx_carrier_errors++;
843 if (felic_stat & ECSR_LCHNG) {
845 if (mdp->cd->no_psr || mdp->no_ether_link) {
846 if (mdp->link == PHY_DOWN)
849 link_stat = PHY_ST_LINK;
851 link_stat = (sh_eth_read(ndev, PSR));
852 if (mdp->ether_link_active_low)
853 link_stat = ~link_stat;
855 if (!(link_stat & PHY_ST_LINK))
856 sh_eth_rcv_snd_disable(ndev);
859 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
860 ~DMAC_M_ECI, EESIPR);
862 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
864 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
866 /* enable tx and rx */
867 sh_eth_rcv_snd_enable(ndev);
872 if (intr_status & EESR_TWB) {
873 /* Write buck end. unused write back interrupt */
874 if (intr_status & EESR_TABT) /* Transmit Abort int */
875 mdp->stats.tx_aborted_errors++;
876 if (netif_msg_tx_err(mdp))
877 dev_err(&ndev->dev, "Transmit Abort\n");
880 if (intr_status & EESR_RABT) {
881 /* Receive Abort int */
882 if (intr_status & EESR_RFRMER) {
883 /* Receive Frame Overflow int */
884 mdp->stats.rx_frame_errors++;
885 if (netif_msg_rx_err(mdp))
886 dev_err(&ndev->dev, "Receive Abort\n");
890 if (intr_status & EESR_TDE) {
891 /* Transmit Descriptor Empty int */
892 mdp->stats.tx_fifo_errors++;
893 if (netif_msg_tx_err(mdp))
894 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
897 if (intr_status & EESR_TFE) {
898 /* FIFO under flow */
899 mdp->stats.tx_fifo_errors++;
900 if (netif_msg_tx_err(mdp))
901 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
904 if (intr_status & EESR_RDE) {
905 /* Receive Descriptor Empty int */
906 mdp->stats.rx_over_errors++;
908 if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
909 sh_eth_write(ndev, EDRRR_R, EDRRR);
910 if (netif_msg_rx_err(mdp))
911 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
914 if (intr_status & EESR_RFE) {
915 /* Receive FIFO Overflow int */
916 mdp->stats.rx_fifo_errors++;
917 if (netif_msg_rx_err(mdp))
918 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
921 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
923 mdp->stats.tx_fifo_errors++;
924 if (netif_msg_tx_err(mdp))
925 dev_err(&ndev->dev, "Address Error\n");
928 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
931 if (intr_status & mask) {
933 u32 edtrr = sh_eth_read(ndev, EDTRR);
935 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
936 intr_status, mdp->cur_tx);
937 dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
938 mdp->dirty_tx, (u32) ndev->state, edtrr);
939 /* dirty buffer free */
943 if (edtrr ^ EDTRR_TRNS) {
945 sh_eth_write(ndev, EDTRR_TRNS, EDTRR);
948 netif_wake_queue(ndev);
952 static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
954 struct net_device *ndev = netdev;
955 struct sh_eth_private *mdp = netdev_priv(ndev);
956 struct sh_eth_cpu_data *cd = mdp->cd;
957 irqreturn_t ret = IRQ_NONE;
960 spin_lock(&mdp->lock);
962 /* Get interrpt stat */
963 intr_status = sh_eth_read(ndev, EESR);
964 /* Clear interrupt */
965 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
966 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
967 cd->tx_check | cd->eesr_err_check)) {
968 sh_eth_write(ndev, intr_status, EESR);
973 if (intr_status & (EESR_FRC | /* Frame recv*/
974 EESR_RMAF | /* Multi cast address recv*/
975 EESR_RRF | /* Bit frame recv */
976 EESR_RTLF | /* Long frame recv*/
977 EESR_RTSF | /* short frame recv */
978 EESR_PRE | /* PHY-LSI recv error */
979 EESR_CERF)){ /* recv frame CRC error */
984 if (intr_status & cd->tx_check) {
986 netif_wake_queue(ndev);
989 if (intr_status & cd->eesr_err_check)
990 sh_eth_error(ndev, intr_status);
993 spin_unlock(&mdp->lock);
998 static void sh_eth_timer(unsigned long data)
1000 struct net_device *ndev = (struct net_device *)data;
1001 struct sh_eth_private *mdp = netdev_priv(ndev);
1003 mod_timer(&mdp->timer, jiffies + (10 * HZ));
1006 /* PHY state control function */
1007 static void sh_eth_adjust_link(struct net_device *ndev)
1009 struct sh_eth_private *mdp = netdev_priv(ndev);
1010 struct phy_device *phydev = mdp->phydev;
1013 if (phydev->link != PHY_DOWN) {
1014 if (phydev->duplex != mdp->duplex) {
1016 mdp->duplex = phydev->duplex;
1017 if (mdp->cd->set_duplex)
1018 mdp->cd->set_duplex(ndev);
1021 if (phydev->speed != mdp->speed) {
1023 mdp->speed = phydev->speed;
1024 if (mdp->cd->set_rate)
1025 mdp->cd->set_rate(ndev);
1027 if (mdp->link == PHY_DOWN) {
1028 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF)
1031 mdp->link = phydev->link;
1033 } else if (mdp->link) {
1035 mdp->link = PHY_DOWN;
1040 if (new_state && netif_msg_link(mdp))
1041 phy_print_status(phydev);
1044 /* PHY init function */
1045 static int sh_eth_phy_init(struct net_device *ndev)
1047 struct sh_eth_private *mdp = netdev_priv(ndev);
1048 char phy_id[MII_BUS_ID_SIZE + 3];
1049 struct phy_device *phydev = NULL;
1051 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1052 mdp->mii_bus->id , mdp->phy_id);
1054 mdp->link = PHY_DOWN;
1058 /* Try connect to PHY */
1059 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1060 0, PHY_INTERFACE_MODE_MII);
1061 if (IS_ERR(phydev)) {
1062 dev_err(&ndev->dev, "phy_connect failed\n");
1063 return PTR_ERR(phydev);
1066 dev_info(&ndev->dev, "attached phy %i to driver %s\n",
1067 phydev->addr, phydev->drv->name);
1069 mdp->phydev = phydev;
1074 /* PHY control start function */
1075 static int sh_eth_phy_start(struct net_device *ndev)
1077 struct sh_eth_private *mdp = netdev_priv(ndev);
1080 ret = sh_eth_phy_init(ndev);
1084 /* reset phy - this also wakes it from PDOWN */
1085 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1086 phy_start(mdp->phydev);
1091 static int sh_eth_get_settings(struct net_device *ndev,
1092 struct ethtool_cmd *ecmd)
1094 struct sh_eth_private *mdp = netdev_priv(ndev);
1095 unsigned long flags;
1098 spin_lock_irqsave(&mdp->lock, flags);
1099 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1100 spin_unlock_irqrestore(&mdp->lock, flags);
1105 static int sh_eth_set_settings(struct net_device *ndev,
1106 struct ethtool_cmd *ecmd)
1108 struct sh_eth_private *mdp = netdev_priv(ndev);
1109 unsigned long flags;
1112 spin_lock_irqsave(&mdp->lock, flags);
1114 /* disable tx and rx */
1115 sh_eth_rcv_snd_disable(ndev);
1117 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1121 if (ecmd->duplex == DUPLEX_FULL)
1126 if (mdp->cd->set_duplex)
1127 mdp->cd->set_duplex(ndev);
1132 /* enable tx and rx */
1133 sh_eth_rcv_snd_enable(ndev);
1135 spin_unlock_irqrestore(&mdp->lock, flags);
1140 static int sh_eth_nway_reset(struct net_device *ndev)
1142 struct sh_eth_private *mdp = netdev_priv(ndev);
1143 unsigned long flags;
1146 spin_lock_irqsave(&mdp->lock, flags);
1147 ret = phy_start_aneg(mdp->phydev);
1148 spin_unlock_irqrestore(&mdp->lock, flags);
1153 static u32 sh_eth_get_msglevel(struct net_device *ndev)
1155 struct sh_eth_private *mdp = netdev_priv(ndev);
1156 return mdp->msg_enable;
1159 static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1161 struct sh_eth_private *mdp = netdev_priv(ndev);
1162 mdp->msg_enable = value;
1165 static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1166 "rx_current", "tx_current",
1167 "rx_dirty", "tx_dirty",
1169 #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1171 static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1175 return SH_ETH_STATS_LEN;
1181 static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1182 struct ethtool_stats *stats, u64 *data)
1184 struct sh_eth_private *mdp = netdev_priv(ndev);
1187 /* device-specific stats */
1188 data[i++] = mdp->cur_rx;
1189 data[i++] = mdp->cur_tx;
1190 data[i++] = mdp->dirty_rx;
1191 data[i++] = mdp->dirty_tx;
1194 static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1196 switch (stringset) {
1198 memcpy(data, *sh_eth_gstrings_stats,
1199 sizeof(sh_eth_gstrings_stats));
1204 static struct ethtool_ops sh_eth_ethtool_ops = {
1205 .get_settings = sh_eth_get_settings,
1206 .set_settings = sh_eth_set_settings,
1207 .nway_reset = sh_eth_nway_reset,
1208 .get_msglevel = sh_eth_get_msglevel,
1209 .set_msglevel = sh_eth_set_msglevel,
1210 .get_link = ethtool_op_get_link,
1211 .get_strings = sh_eth_get_strings,
1212 .get_ethtool_stats = sh_eth_get_ethtool_stats,
1213 .get_sset_count = sh_eth_get_sset_count,
1216 /* network device open function */
1217 static int sh_eth_open(struct net_device *ndev)
1220 struct sh_eth_private *mdp = netdev_priv(ndev);
1222 pm_runtime_get_sync(&mdp->pdev->dev);
1224 ret = request_irq(ndev->irq, sh_eth_interrupt,
1225 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1226 defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1227 defined(CONFIG_CPU_SUBTYPE_SH7757)
1234 dev_err(&ndev->dev, "Can not assign IRQ number\n");
1238 /* Descriptor set */
1239 ret = sh_eth_ring_init(ndev);
1244 ret = sh_eth_dev_init(ndev);
1248 /* PHY control start*/
1249 ret = sh_eth_phy_start(ndev);
1253 /* Set the timer to check for link beat. */
1254 init_timer(&mdp->timer);
1255 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1256 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
1261 free_irq(ndev->irq, ndev);
1262 pm_runtime_put_sync(&mdp->pdev->dev);
1266 /* Timeout function */
1267 static void sh_eth_tx_timeout(struct net_device *ndev)
1269 struct sh_eth_private *mdp = netdev_priv(ndev);
1270 struct sh_eth_rxdesc *rxdesc;
1273 netif_stop_queue(ndev);
1275 if (netif_msg_timer(mdp))
1276 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1277 " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1279 /* tx_errors count up */
1280 mdp->stats.tx_errors++;
1283 del_timer_sync(&mdp->timer);
1285 /* Free all the skbuffs in the Rx queue. */
1286 for (i = 0; i < RX_RING_SIZE; i++) {
1287 rxdesc = &mdp->rx_ring[i];
1289 rxdesc->addr = 0xBADF00D0;
1290 if (mdp->rx_skbuff[i])
1291 dev_kfree_skb(mdp->rx_skbuff[i]);
1292 mdp->rx_skbuff[i] = NULL;
1294 for (i = 0; i < TX_RING_SIZE; i++) {
1295 if (mdp->tx_skbuff[i])
1296 dev_kfree_skb(mdp->tx_skbuff[i]);
1297 mdp->tx_skbuff[i] = NULL;
1301 sh_eth_dev_init(ndev);
1304 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1305 add_timer(&mdp->timer);
1308 /* Packet transmit function */
1309 static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1311 struct sh_eth_private *mdp = netdev_priv(ndev);
1312 struct sh_eth_txdesc *txdesc;
1314 unsigned long flags;
1316 spin_lock_irqsave(&mdp->lock, flags);
1317 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1318 if (!sh_eth_txfree(ndev)) {
1319 if (netif_msg_tx_queued(mdp))
1320 dev_warn(&ndev->dev, "TxFD exhausted.\n");
1321 netif_stop_queue(ndev);
1322 spin_unlock_irqrestore(&mdp->lock, flags);
1323 return NETDEV_TX_BUSY;
1326 spin_unlock_irqrestore(&mdp->lock, flags);
1328 entry = mdp->cur_tx % TX_RING_SIZE;
1329 mdp->tx_skbuff[entry] = skb;
1330 txdesc = &mdp->tx_ring[entry];
1331 txdesc->addr = virt_to_phys(skb->data);
1333 if (!mdp->cd->hw_swap)
1334 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1337 __flush_purge_region(skb->data, skb->len);
1338 if (skb->len < ETHERSMALL)
1339 txdesc->buffer_length = ETHERSMALL;
1341 txdesc->buffer_length = skb->len;
1343 if (entry >= TX_RING_SIZE - 1)
1344 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1346 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
1350 if (!(sh_eth_read(ndev, EDTRR) & EDTRR_TRNS))
1351 sh_eth_write(ndev, EDTRR_TRNS, EDTRR);
1353 return NETDEV_TX_OK;
1356 /* device close function */
1357 static int sh_eth_close(struct net_device *ndev)
1359 struct sh_eth_private *mdp = netdev_priv(ndev);
1362 netif_stop_queue(ndev);
1364 /* Disable interrupts by clearing the interrupt mask. */
1365 sh_eth_write(ndev, 0x0000, EESIPR);
1367 /* Stop the chip's Tx and Rx processes. */
1368 sh_eth_write(ndev, 0, EDTRR);
1369 sh_eth_write(ndev, 0, EDRRR);
1371 /* PHY Disconnect */
1373 phy_stop(mdp->phydev);
1374 phy_disconnect(mdp->phydev);
1377 free_irq(ndev->irq, ndev);
1379 del_timer_sync(&mdp->timer);
1381 /* Free all the skbuffs in the Rx queue. */
1382 sh_eth_ring_free(ndev);
1384 /* free DMA buffer */
1385 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
1386 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1388 /* free DMA buffer */
1389 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1390 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1392 pm_runtime_put_sync(&mdp->pdev->dev);
1397 static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1399 struct sh_eth_private *mdp = netdev_priv(ndev);
1401 pm_runtime_get_sync(&mdp->pdev->dev);
1403 mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR);
1404 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
1405 mdp->stats.collisions += sh_eth_read(ndev, CDCR);
1406 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
1407 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
1408 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
1409 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
1410 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);/* CERCR */
1411 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
1412 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);/* CEECR */
1413 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
1415 mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
1416 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
1418 pm_runtime_put_sync(&mdp->pdev->dev);
1423 /* ioctl to device funciotn*/
1424 static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1427 struct sh_eth_private *mdp = netdev_priv(ndev);
1428 struct phy_device *phydev = mdp->phydev;
1430 if (!netif_running(ndev))
1436 return phy_mii_ioctl(phydev, rq, cmd);
1439 #if defined(SH_ETH_HAS_TSU)
1440 /* Multicast reception directions set */
1441 static void sh_eth_set_multicast_list(struct net_device *ndev)
1443 if (ndev->flags & IFF_PROMISC) {
1444 /* Set promiscuous. */
1445 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
1448 /* Normal, unicast/broadcast-only mode. */
1449 sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
1453 #endif /* SH_ETH_HAS_TSU */
1455 /* SuperH's TSU register init function */
1456 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
1458 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
1459 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
1460 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
1461 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
1462 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
1463 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
1464 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
1465 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
1466 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
1467 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
1468 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
1469 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
1470 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
1472 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
1473 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
1475 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
1476 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
1477 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
1478 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
1479 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
1480 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
1481 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
1484 /* MDIO bus release function */
1485 static int sh_mdio_release(struct net_device *ndev)
1487 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
1489 /* unregister mdio bus */
1490 mdiobus_unregister(bus);
1492 /* remove mdio bus info from net_device */
1493 dev_set_drvdata(&ndev->dev, NULL);
1495 /* free interrupts memory */
1498 /* free bitbang info */
1499 free_mdio_bitbang(bus);
1504 /* MDIO bus init function */
1505 static int sh_mdio_init(struct net_device *ndev, int id)
1508 struct bb_info *bitbang;
1509 struct sh_eth_private *mdp = netdev_priv(ndev);
1511 /* create bit control struct for PHY */
1512 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
1519 bitbang->addr = ndev->base_addr + mdp->reg_offset[PIR];
1520 bitbang->mdi_msk = 0x08;
1521 bitbang->mdo_msk = 0x04;
1522 bitbang->mmd_msk = 0x02;/* MMD */
1523 bitbang->mdc_msk = 0x01;
1524 bitbang->ctrl.ops = &bb_ops;
1526 /* MII controller setting */
1527 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
1528 if (!mdp->mii_bus) {
1530 goto out_free_bitbang;
1533 /* Hook up MII support for ethtool */
1534 mdp->mii_bus->name = "sh_mii";
1535 mdp->mii_bus->parent = &ndev->dev;
1536 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
1539 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1540 if (!mdp->mii_bus->irq) {
1545 for (i = 0; i < PHY_MAX_ADDR; i++)
1546 mdp->mii_bus->irq[i] = PHY_POLL;
1548 /* regist mdio bus */
1549 ret = mdiobus_register(mdp->mii_bus);
1553 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
1558 kfree(mdp->mii_bus->irq);
1561 free_mdio_bitbang(mdp->mii_bus);
1570 static const u16 *sh_eth_get_register_offset(int register_type)
1572 const u16 *reg_offset = NULL;
1574 switch (register_type) {
1575 case SH_ETH_REG_GIGABIT:
1576 reg_offset = sh_eth_offset_gigabit;
1578 case SH_ETH_REG_FAST_SH4:
1579 reg_offset = sh_eth_offset_fast_sh4;
1581 case SH_ETH_REG_FAST_SH3_SH2:
1582 reg_offset = sh_eth_offset_fast_sh3_sh2;
1585 printk(KERN_ERR "Unknown register type (%d)\n", register_type);
1592 static const struct net_device_ops sh_eth_netdev_ops = {
1593 .ndo_open = sh_eth_open,
1594 .ndo_stop = sh_eth_close,
1595 .ndo_start_xmit = sh_eth_start_xmit,
1596 .ndo_get_stats = sh_eth_get_stats,
1597 #if defined(SH_ETH_HAS_TSU)
1598 .ndo_set_multicast_list = sh_eth_set_multicast_list,
1600 .ndo_tx_timeout = sh_eth_tx_timeout,
1601 .ndo_do_ioctl = sh_eth_do_ioctl,
1602 .ndo_validate_addr = eth_validate_addr,
1603 .ndo_set_mac_address = eth_mac_addr,
1604 .ndo_change_mtu = eth_change_mtu,
1607 static int sh_eth_drv_probe(struct platform_device *pdev)
1610 struct resource *res;
1611 struct net_device *ndev = NULL;
1612 struct sh_eth_private *mdp;
1613 struct sh_eth_plat_data *pd;
1616 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1617 if (unlikely(res == NULL)) {
1618 dev_err(&pdev->dev, "invalid resource\n");
1623 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
1625 dev_err(&pdev->dev, "Could not allocate device.\n");
1630 /* The sh Ether-specific entries in the device structure. */
1631 ndev->base_addr = res->start;
1637 ret = platform_get_irq(pdev, 0);
1644 SET_NETDEV_DEV(ndev, &pdev->dev);
1646 /* Fill in the fields of the device structure with ethernet values. */
1649 mdp = netdev_priv(ndev);
1650 spin_lock_init(&mdp->lock);
1652 pm_runtime_enable(&pdev->dev);
1653 pm_runtime_resume(&pdev->dev);
1655 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
1657 mdp->phy_id = pd->phy;
1659 mdp->edmac_endian = pd->edmac_endian;
1660 mdp->no_ether_link = pd->no_ether_link;
1661 mdp->ether_link_active_low = pd->ether_link_active_low;
1662 mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
1665 mdp->cd = &sh_eth_my_cpu_data;
1666 sh_eth_set_default_cpu_data(mdp->cd);
1669 ndev->netdev_ops = &sh_eth_netdev_ops;
1670 SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
1671 ndev->watchdog_timeo = TX_TIMEOUT;
1673 /* debug message level */
1674 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
1675 mdp->post_rx = POST_RX >> (devno << 1);
1676 mdp->post_fw = POST_FW >> (devno << 1);
1678 /* read and set MAC address */
1679 read_mac_address(ndev, pd->mac_addr);
1681 /* First device only init */
1684 struct resource *rtsu;
1685 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1687 dev_err(&pdev->dev, "Not found TSU resource\n");
1690 mdp->tsu_addr = ioremap(rtsu->start,
1691 resource_size(rtsu));
1693 if (mdp->cd->chip_reset)
1694 mdp->cd->chip_reset(ndev);
1697 /* TSU init (Init only)*/
1698 sh_eth_tsu_init(mdp);
1702 /* network device register */
1703 ret = register_netdev(ndev);
1708 ret = sh_mdio_init(ndev, pdev->id);
1710 goto out_unregister;
1712 /* print device infomation */
1713 pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
1714 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
1716 platform_set_drvdata(pdev, ndev);
1721 unregister_netdev(ndev);
1726 iounmap(mdp->tsu_addr);
1734 static int sh_eth_drv_remove(struct platform_device *pdev)
1736 struct net_device *ndev = platform_get_drvdata(pdev);
1737 struct sh_eth_private *mdp = netdev_priv(ndev);
1739 iounmap(mdp->tsu_addr);
1740 sh_mdio_release(ndev);
1741 unregister_netdev(ndev);
1742 pm_runtime_disable(&pdev->dev);
1744 platform_set_drvdata(pdev, NULL);
1749 static int sh_eth_runtime_nop(struct device *dev)
1752 * Runtime PM callback shared between ->runtime_suspend()
1753 * and ->runtime_resume(). Simply returns success.
1755 * This driver re-initializes all registers after
1756 * pm_runtime_get_sync() anyway so there is no need
1757 * to save and restore registers here.
1762 static struct dev_pm_ops sh_eth_dev_pm_ops = {
1763 .runtime_suspend = sh_eth_runtime_nop,
1764 .runtime_resume = sh_eth_runtime_nop,
1767 static struct platform_driver sh_eth_driver = {
1768 .probe = sh_eth_drv_probe,
1769 .remove = sh_eth_drv_remove,
1772 .pm = &sh_eth_dev_pm_ops,
1776 static int __init sh_eth_init(void)
1778 return platform_driver_register(&sh_eth_driver);
1781 static void __exit sh_eth_cleanup(void)
1783 platform_driver_unregister(&sh_eth_driver);
1786 module_init(sh_eth_init);
1787 module_exit(sh_eth_cleanup);
1789 MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1790 MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1791 MODULE_LICENSE("GPL v2");