2 * drivers/net/ethernet/ibm/emac/core.c
4 * Driver for PowerPC 4xx on-chip ethernet controller.
6 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
7 * <benh@kernel.crashing.org>
9 * Based on the arch/ppc version of the driver:
11 * Copyright (c) 2004, 2005 Zultys Technologies.
12 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 * Based on original work by
15 * Matt Porter <mporter@kernel.crashing.org>
16 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
17 * Armin Kuster <akuster@mvista.com>
18 * Johnnie Peters <jpeters@mvista.com>
20 * This program is free software; you can redistribute it and/or modify it
21 * under the terms of the GNU General Public License as published by the
22 * Free Software Foundation; either version 2 of the License, or (at your
23 * option) any later version.
27 #include <linux/module.h>
28 #include <linux/sched.h>
29 #include <linux/string.h>
30 #include <linux/errno.h>
31 #include <linux/delay.h>
32 #include <linux/types.h>
33 #include <linux/pci.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40 #include <linux/workqueue.h>
42 #include <linux/of_address.h>
43 #include <linux/of_irq.h>
44 #include <linux/of_net.h>
45 #include <linux/of_mdio.h>
46 #include <linux/slab.h>
48 #include <asm/processor.h>
51 #include <linux/uaccess.h>
53 #include <asm/dcr-regs.h>
58 * Lack of dma_unmap_???? calls is intentional.
60 * API-correct usage requires additional support state information to be
61 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
62 * EMAC design (e.g. TX buffer passed from network stack can be split into
63 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
64 * maintaining such information will add additional overhead.
65 * Current DMA API implementation for 4xx processors only ensures cache coherency
66 * and dma_unmap_???? routines are empty and are likely to stay this way.
67 * I decided to omit dma_unmap_??? calls because I don't want to add additional
68 * complexity just for the sake of following some abstract API, when it doesn't
69 * add any real benefit to the driver. I understand that this decision maybe
70 * controversial, but I really tried to make code API-correct and efficient
71 * at the same time and didn't come up with code I liked :(. --ebs
74 #define DRV_NAME "emac"
75 #define DRV_VERSION "3.54"
76 #define DRV_DESC "PPC 4xx OCP EMAC driver"
78 MODULE_DESCRIPTION(DRV_DESC);
80 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
81 MODULE_LICENSE("GPL");
83 /* minimum number of free TX descriptors required to wake up TX process */
84 #define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
86 /* If packet size is less than this number, we allocate small skb and copy packet
87 * contents into it instead of just sending original big skb up
89 #define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
91 /* Since multiple EMACs share MDIO lines in various ways, we need
92 * to avoid re-using the same PHY ID in cases where the arch didn't
93 * setup precise phy_map entries
95 * XXX This is something that needs to be reworked as we can have multiple
96 * EMAC "sets" (multiple ASICs containing several EMACs) though we can
97 * probably require in that case to have explicit PHY IDs in the device-tree
99 static u32 busy_phy_map;
100 static DEFINE_MUTEX(emac_phy_map_lock);
102 /* This is the wait queue used to wait on any event related to probe, that
103 * is discovery of MALs, other EMACs, ZMII/RGMIIs, etc...
105 static DECLARE_WAIT_QUEUE_HEAD(emac_probe_wait);
107 /* Having stable interface names is a doomed idea. However, it would be nice
108 * if we didn't have completely random interface names at boot too :-) It's
109 * just a matter of making everybody's life easier. Since we are doing
110 * threaded probing, it's a bit harder though. The base idea here is that
111 * we make up a list of all emacs in the device-tree before we register the
112 * driver. Every emac will then wait for the previous one in the list to
113 * initialize before itself. We should also keep that list ordered by
115 * That list is only 4 entries long, meaning that additional EMACs don't
116 * get ordering guarantees unless EMAC_BOOT_LIST_SIZE is increased.
119 #define EMAC_BOOT_LIST_SIZE 4
120 static struct device_node *emac_boot_list[EMAC_BOOT_LIST_SIZE];
122 /* How long should I wait for dependent devices ? */
123 #define EMAC_PROBE_DEP_TIMEOUT (HZ * 5)
125 /* I don't want to litter system log with timeout errors
126 * when we have brain-damaged PHY.
128 static inline void emac_report_timeout_error(struct emac_instance *dev,
131 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX |
132 EMAC_FTR_460EX_PHY_CLK_FIX |
133 EMAC_FTR_440EP_PHY_CLK_FIX))
134 DBG(dev, "%s" NL, error);
135 else if (net_ratelimit())
136 printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name,
140 /* EMAC PHY clock workaround:
141 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX,
142 * which allows controlling each EMAC clock
144 static inline void emac_rx_clk_tx(struct emac_instance *dev)
146 #ifdef CONFIG_PPC_DCR_NATIVE
147 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
148 dcri_clrset(SDR0, SDR0_MFR,
149 0, SDR0_MFR_ECS >> dev->cell_index);
153 static inline void emac_rx_clk_default(struct emac_instance *dev)
155 #ifdef CONFIG_PPC_DCR_NATIVE
156 if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
157 dcri_clrset(SDR0, SDR0_MFR,
158 SDR0_MFR_ECS >> dev->cell_index, 0);
162 /* PHY polling intervals */
163 #define PHY_POLL_LINK_ON HZ
164 #define PHY_POLL_LINK_OFF (HZ / 5)
166 /* Graceful stop timeouts in us.
167 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
169 #define STOP_TIMEOUT_10 1230
170 #define STOP_TIMEOUT_100 124
171 #define STOP_TIMEOUT_1000 13
172 #define STOP_TIMEOUT_1000_JUMBO 73
174 static unsigned char default_mcast_addr[] = {
175 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01
178 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
179 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
180 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
181 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
182 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
183 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
184 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
185 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
186 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
187 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
188 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
189 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
190 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
191 "tx_bd_excessive_collisions", "tx_bd_late_collision",
192 "tx_bd_multple_collisions", "tx_bd_single_collision",
193 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
197 static irqreturn_t emac_irq(int irq, void *dev_instance);
198 static void emac_clean_tx_ring(struct emac_instance *dev);
199 static void __emac_set_multicast_list(struct emac_instance *dev);
201 static inline int emac_phy_supports_gige(int phy_mode)
203 return phy_mode == PHY_MODE_GMII ||
204 phy_mode == PHY_MODE_RGMII ||
205 phy_mode == PHY_MODE_SGMII ||
206 phy_mode == PHY_MODE_TBI ||
207 phy_mode == PHY_MODE_RTBI;
210 static inline int emac_phy_gpcs(int phy_mode)
212 return phy_mode == PHY_MODE_SGMII ||
213 phy_mode == PHY_MODE_TBI ||
214 phy_mode == PHY_MODE_RTBI;
217 static inline void emac_tx_enable(struct emac_instance *dev)
219 struct emac_regs __iomem *p = dev->emacp;
222 DBG(dev, "tx_enable" NL);
224 r = in_be32(&p->mr0);
225 if (!(r & EMAC_MR0_TXE))
226 out_be32(&p->mr0, r | EMAC_MR0_TXE);
229 static void emac_tx_disable(struct emac_instance *dev)
231 struct emac_regs __iomem *p = dev->emacp;
234 DBG(dev, "tx_disable" NL);
236 r = in_be32(&p->mr0);
237 if (r & EMAC_MR0_TXE) {
238 int n = dev->stop_timeout;
239 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
240 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
245 emac_report_timeout_error(dev, "TX disable timeout");
249 static void emac_rx_enable(struct emac_instance *dev)
251 struct emac_regs __iomem *p = dev->emacp;
254 if (unlikely(test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags)))
257 DBG(dev, "rx_enable" NL);
259 r = in_be32(&p->mr0);
260 if (!(r & EMAC_MR0_RXE)) {
261 if (unlikely(!(r & EMAC_MR0_RXI))) {
262 /* Wait if previous async disable is still in progress */
263 int n = dev->stop_timeout;
264 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
269 emac_report_timeout_error(dev,
270 "RX disable timeout");
272 out_be32(&p->mr0, r | EMAC_MR0_RXE);
278 static void emac_rx_disable(struct emac_instance *dev)
280 struct emac_regs __iomem *p = dev->emacp;
283 DBG(dev, "rx_disable" NL);
285 r = in_be32(&p->mr0);
286 if (r & EMAC_MR0_RXE) {
287 int n = dev->stop_timeout;
288 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
289 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
294 emac_report_timeout_error(dev, "RX disable timeout");
298 static inline void emac_netif_stop(struct emac_instance *dev)
300 netif_tx_lock_bh(dev->ndev);
301 netif_addr_lock(dev->ndev);
303 netif_addr_unlock(dev->ndev);
304 netif_tx_unlock_bh(dev->ndev);
305 netif_trans_update(dev->ndev); /* prevent tx timeout */
306 mal_poll_disable(dev->mal, &dev->commac);
307 netif_tx_disable(dev->ndev);
310 static inline void emac_netif_start(struct emac_instance *dev)
312 netif_tx_lock_bh(dev->ndev);
313 netif_addr_lock(dev->ndev);
315 if (dev->mcast_pending && netif_running(dev->ndev))
316 __emac_set_multicast_list(dev);
317 netif_addr_unlock(dev->ndev);
318 netif_tx_unlock_bh(dev->ndev);
320 netif_wake_queue(dev->ndev);
322 /* NOTE: unconditional netif_wake_queue is only appropriate
323 * so long as all callers are assured to have free tx slots
324 * (taken from tg3... though the case where that is wrong is
325 * not terribly harmful)
327 mal_poll_enable(dev->mal, &dev->commac);
330 static inline void emac_rx_disable_async(struct emac_instance *dev)
332 struct emac_regs __iomem *p = dev->emacp;
335 DBG(dev, "rx_disable_async" NL);
337 r = in_be32(&p->mr0);
338 if (r & EMAC_MR0_RXE)
339 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
342 static int emac_reset(struct emac_instance *dev)
344 struct emac_regs __iomem *p = dev->emacp;
346 bool __maybe_unused try_internal_clock = false;
348 DBG(dev, "reset" NL);
350 if (!dev->reset_failed) {
351 /* 40x erratum suggests stopping RX channel before reset,
354 emac_rx_disable(dev);
355 emac_tx_disable(dev);
358 #ifdef CONFIG_PPC_DCR_NATIVE
361 * PPC460EX/GT Embedded Processor Advanced User's Manual
362 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
363 * Note: The PHY must provide a TX Clk in order to perform a soft reset
364 * of the EMAC. If none is present, select the internal clock
365 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
366 * After a soft reset, select the external clock.
368 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
369 * ethernet cable is not attached. This causes the reset to timeout
370 * and the PHY detection code in emac_init_phy() is unable to
371 * communicate and detect the AR8035-A PHY. As a result, the emac
372 * driver bails out early and the user has no ethernet.
373 * In order to stay compatible with existing configurations, the
374 * driver will temporarily switch to the internal clock, after
375 * the first reset fails.
377 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
378 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
379 dev->phy_map == 0xffffffff)) {
380 /* No PHY: select internal loop clock before reset */
381 dcri_clrset(SDR0, SDR0_ETH_CFG,
382 0, SDR0_ETH_CFG_ECS << dev->cell_index);
384 /* PHY present: select external clock before reset */
385 dcri_clrset(SDR0, SDR0_ETH_CFG,
386 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
391 out_be32(&p->mr0, EMAC_MR0_SRST);
392 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
395 #ifdef CONFIG_PPC_DCR_NATIVE
396 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
397 if (!n && !try_internal_clock) {
398 /* first attempt has timed out. */
400 try_internal_clock = true;
404 if (try_internal_clock || (dev->phy_address == 0xffffffff &&
405 dev->phy_map == 0xffffffff)) {
406 /* No PHY: restore external clock source after reset */
407 dcri_clrset(SDR0, SDR0_ETH_CFG,
408 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
414 dev->reset_failed = 0;
417 emac_report_timeout_error(dev, "reset timeout");
418 dev->reset_failed = 1;
423 static void emac_hash_mc(struct emac_instance *dev)
425 const int regs = EMAC_XAHT_REGS(dev);
426 u32 *gaht_base = emac_gaht_base(dev);
428 struct netdev_hw_addr *ha;
431 DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
433 memset(gaht_temp, 0, sizeof (gaht_temp));
435 netdev_for_each_mc_addr(ha, dev->ndev) {
437 DBG2(dev, "mc %pM" NL, ha->addr);
439 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
440 ether_crc(ETH_ALEN, ha->addr));
441 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
442 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
444 gaht_temp[reg] |= mask;
447 for (i = 0; i < regs; i++)
448 out_be32(gaht_base + i, gaht_temp[i]);
451 static inline u32 emac_iff2rmr(struct net_device *ndev)
453 struct emac_instance *dev = netdev_priv(ndev);
456 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
458 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
463 if (ndev->flags & IFF_PROMISC)
465 else if (ndev->flags & IFF_ALLMULTI ||
466 (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
468 else if (!netdev_mc_empty(ndev))
471 if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
472 r &= ~EMAC4_RMR_MJS_MASK;
473 r |= EMAC4_RMR_MJS(ndev->mtu);
479 static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
481 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC_MR1_TR0_MULT;
483 DBG2(dev, "__emac_calc_base_mr1" NL);
487 ret |= EMAC_MR1_TFS_2K;
490 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
491 dev->ndev->name, tx_size);
496 ret |= EMAC_MR1_RFS_16K;
499 ret |= EMAC_MR1_RFS_4K;
502 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
503 dev->ndev->name, rx_size);
509 static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
511 u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
512 EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
514 DBG2(dev, "__emac4_calc_base_mr1" NL);
518 ret |= EMAC4_MR1_TFS_16K;
521 ret |= EMAC4_MR1_TFS_4K;
524 ret |= EMAC4_MR1_TFS_2K;
527 printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
528 dev->ndev->name, tx_size);
533 ret |= EMAC4_MR1_RFS_16K;
536 ret |= EMAC4_MR1_RFS_4K;
539 ret |= EMAC4_MR1_RFS_2K;
542 printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
543 dev->ndev->name, rx_size);
549 static u32 emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
551 return emac_has_feature(dev, EMAC_FTR_EMAC4) ?
552 __emac4_calc_base_mr1(dev, tx_size, rx_size) :
553 __emac_calc_base_mr1(dev, tx_size, rx_size);
556 static inline u32 emac_calc_trtr(struct emac_instance *dev, unsigned int size)
558 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
559 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT_EMAC4;
561 return ((size >> 6) - 1) << EMAC_TRTR_SHIFT;
564 static inline u32 emac_calc_rwmr(struct emac_instance *dev,
565 unsigned int low, unsigned int high)
567 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
568 return (low << 22) | ( (high & 0x3ff) << 6);
570 return (low << 23) | ( (high & 0x1ff) << 7);
573 static int emac_configure(struct emac_instance *dev)
575 struct emac_regs __iomem *p = dev->emacp;
576 struct net_device *ndev = dev->ndev;
577 int tx_size, rx_size, link = netif_carrier_ok(dev->ndev);
580 DBG(dev, "configure" NL);
583 out_be32(&p->mr1, in_be32(&p->mr1)
584 | EMAC_MR1_FDE | EMAC_MR1_ILE);
586 } else if (emac_reset(dev) < 0)
589 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
590 tah_reset(dev->tah_dev);
592 DBG(dev, " link = %d duplex = %d, pause = %d, asym_pause = %d\n",
593 link, dev->phy.duplex, dev->phy.pause, dev->phy.asym_pause);
595 /* Default fifo sizes */
596 tx_size = dev->tx_fifo_size;
597 rx_size = dev->rx_fifo_size;
599 /* No link, force loopback */
601 mr1 = EMAC_MR1_FDE | EMAC_MR1_ILE;
603 /* Check for full duplex */
604 else if (dev->phy.duplex == DUPLEX_FULL)
605 mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
607 /* Adjust fifo sizes, mr1 and timeouts based on link speed */
608 dev->stop_timeout = STOP_TIMEOUT_10;
609 switch (dev->phy.speed) {
611 if (emac_phy_gpcs(dev->phy.mode)) {
612 mr1 |= EMAC_MR1_MF_1000GPCS | EMAC_MR1_MF_IPPA(
613 (dev->phy.gpcs_address != 0xffffffff) ?
614 dev->phy.gpcs_address : dev->phy.address);
616 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
617 * identify this GPCS PHY later.
619 out_be32(&p->u1.emac4.ipcr, 0xdeadbeef);
621 mr1 |= EMAC_MR1_MF_1000;
623 /* Extended fifo sizes */
624 tx_size = dev->tx_fifo_size_gige;
625 rx_size = dev->rx_fifo_size_gige;
627 if (dev->ndev->mtu > ETH_DATA_LEN) {
628 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
629 mr1 |= EMAC4_MR1_JPSM;
631 mr1 |= EMAC_MR1_JPSM;
632 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
634 dev->stop_timeout = STOP_TIMEOUT_1000;
637 mr1 |= EMAC_MR1_MF_100;
638 dev->stop_timeout = STOP_TIMEOUT_100;
640 default: /* make gcc happy */
644 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
645 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_port,
647 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
648 zmii_set_speed(dev->zmii_dev, dev->zmii_port, dev->phy.speed);
650 /* on 40x erratum forces us to NOT use integrated flow control,
651 * let's hope it works on 44x ;)
653 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x) &&
654 dev->phy.duplex == DUPLEX_FULL) {
656 mr1 |= EMAC_MR1_EIFC | EMAC_MR1_APP;
657 else if (dev->phy.asym_pause)
661 /* Add base settings & fifo sizes & program MR1 */
662 mr1 |= emac_calc_base_mr1(dev, tx_size, rx_size);
663 out_be32(&p->mr1, mr1);
665 /* Set individual MAC address */
666 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
667 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
668 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
671 /* VLAN Tag Protocol ID */
672 out_be32(&p->vtpid, 0x8100);
674 /* Receive mode register */
675 r = emac_iff2rmr(ndev);
676 if (r & EMAC_RMR_MAE)
678 out_be32(&p->rmr, r);
680 /* FIFOs thresholds */
681 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
682 r = EMAC4_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
683 tx_size / 2 / dev->fifo_entry_size);
685 r = EMAC_TMR1((dev->mal_burst_size / dev->fifo_entry_size) + 1,
686 tx_size / 2 / dev->fifo_entry_size);
687 out_be32(&p->tmr1, r);
688 out_be32(&p->trtr, emac_calc_trtr(dev, tx_size / 2));
690 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
691 there should be still enough space in FIFO to allow the our link
692 partner time to process this frame and also time to send PAUSE
695 Here is the worst case scenario for the RX FIFO "headroom"
696 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
698 1) One maximum-length frame on TX 1522 bytes
699 2) One PAUSE frame time 64 bytes
700 3) PAUSE frame decode time allowance 64 bytes
701 4) One maximum-length frame on RX 1522 bytes
702 5) Round-trip propagation delay of the link (100Mb) 15 bytes
706 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
707 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
709 r = emac_calc_rwmr(dev, rx_size / 8 / dev->fifo_entry_size,
710 rx_size / 4 / dev->fifo_entry_size);
711 out_be32(&p->rwmr, r);
713 /* Set PAUSE timer to the maximum */
714 out_be32(&p->ptr, 0xffff);
717 r = EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
718 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
719 EMAC_ISR_IRE | EMAC_ISR_TE;
720 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
721 r |= EMAC4_ISR_TXPE | EMAC4_ISR_RXPE /* | EMAC4_ISR_TXUE |
723 out_be32(&p->iser, r);
725 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
726 if (emac_phy_gpcs(dev->phy.mode)) {
727 if (dev->phy.gpcs_address != 0xffffffff)
728 emac_mii_reset_gpcs(&dev->phy);
730 emac_mii_reset_phy(&dev->phy);
736 static void emac_reinitialize(struct emac_instance *dev)
738 DBG(dev, "reinitialize" NL);
740 emac_netif_stop(dev);
741 if (!emac_configure(dev)) {
745 emac_netif_start(dev);
748 static void emac_full_tx_reset(struct emac_instance *dev)
750 DBG(dev, "full_tx_reset" NL);
752 emac_tx_disable(dev);
753 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
754 emac_clean_tx_ring(dev);
755 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
759 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
764 static void emac_reset_work(struct work_struct *work)
766 struct emac_instance *dev = container_of(work, struct emac_instance, reset_work);
768 DBG(dev, "reset_work" NL);
770 mutex_lock(&dev->link_lock);
772 emac_netif_stop(dev);
773 emac_full_tx_reset(dev);
774 emac_netif_start(dev);
776 mutex_unlock(&dev->link_lock);
779 static void emac_tx_timeout(struct net_device *ndev)
781 struct emac_instance *dev = netdev_priv(ndev);
783 DBG(dev, "tx_timeout" NL);
785 schedule_work(&dev->reset_work);
789 static inline int emac_phy_done(struct emac_instance *dev, u32 stacr)
791 int done = !!(stacr & EMAC_STACR_OC);
793 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
799 static int __emac_mdio_read(struct emac_instance *dev, u8 id, u8 reg)
801 struct emac_regs __iomem *p = dev->emacp;
803 int n, err = -ETIMEDOUT;
805 mutex_lock(&dev->mdio_lock);
807 DBG2(dev, "mdio_read(%02x,%02x)" NL, id, reg);
809 /* Enable proper MDIO port */
810 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
811 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
812 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
813 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
815 /* Wait for management interface to become idle */
817 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
820 DBG2(dev, " -> timeout wait idle\n");
825 /* Issue read command */
826 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
827 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
829 r = EMAC_STACR_BASE(dev->opb_bus_freq);
830 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
832 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
833 r |= EMACX_STACR_STAC_READ;
835 r |= EMAC_STACR_STAC_READ;
836 r |= (reg & EMAC_STACR_PRA_MASK)
837 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT);
838 out_be32(&p->stacr, r);
840 /* Wait for read to complete */
842 while (!emac_phy_done(dev, (r = in_be32(&p->stacr)))) {
845 DBG2(dev, " -> timeout wait complete\n");
850 if (unlikely(r & EMAC_STACR_PHYE)) {
851 DBG(dev, "mdio_read(%02x, %02x) failed" NL, id, reg);
856 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
858 DBG2(dev, "mdio_read -> %04x" NL, r);
861 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
862 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
863 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
864 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
865 mutex_unlock(&dev->mdio_lock);
867 return err == 0 ? r : err;
870 static void __emac_mdio_write(struct emac_instance *dev, u8 id, u8 reg,
873 struct emac_regs __iomem *p = dev->emacp;
875 int n, err = -ETIMEDOUT;
877 mutex_lock(&dev->mdio_lock);
879 DBG2(dev, "mdio_write(%02x,%02x,%04x)" NL, id, reg, val);
881 /* Enable proper MDIO port */
882 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
883 zmii_get_mdio(dev->zmii_dev, dev->zmii_port);
884 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
885 rgmii_get_mdio(dev->rgmii_dev, dev->rgmii_port);
887 /* Wait for management interface to be idle */
889 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
892 DBG2(dev, " -> timeout wait idle\n");
897 /* Issue write command */
898 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
899 r = EMAC4_STACR_BASE(dev->opb_bus_freq);
901 r = EMAC_STACR_BASE(dev->opb_bus_freq);
902 if (emac_has_feature(dev, EMAC_FTR_STACR_OC_INVERT))
904 if (emac_has_feature(dev, EMAC_FTR_HAS_NEW_STACR))
905 r |= EMACX_STACR_STAC_WRITE;
907 r |= EMAC_STACR_STAC_WRITE;
908 r |= (reg & EMAC_STACR_PRA_MASK) |
909 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
910 (val << EMAC_STACR_PHYD_SHIFT);
911 out_be32(&p->stacr, r);
913 /* Wait for write to complete */
915 while (!emac_phy_done(dev, in_be32(&p->stacr))) {
918 DBG2(dev, " -> timeout wait complete\n");
924 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
925 rgmii_put_mdio(dev->rgmii_dev, dev->rgmii_port);
926 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
927 zmii_put_mdio(dev->zmii_dev, dev->zmii_port);
928 mutex_unlock(&dev->mdio_lock);
931 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
933 struct emac_instance *dev = netdev_priv(ndev);
936 res = __emac_mdio_read((dev->mdio_instance &&
937 dev->phy.gpcs_address != id) ?
938 dev->mdio_instance : dev,
943 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
945 struct emac_instance *dev = netdev_priv(ndev);
947 __emac_mdio_write((dev->mdio_instance &&
948 dev->phy.gpcs_address != id) ?
949 dev->mdio_instance : dev,
950 (u8) id, (u8) reg, (u16) val);
954 static void __emac_set_multicast_list(struct emac_instance *dev)
956 struct emac_regs __iomem *p = dev->emacp;
957 u32 rmr = emac_iff2rmr(dev->ndev);
959 DBG(dev, "__multicast %08x" NL, rmr);
961 /* I decided to relax register access rules here to avoid
964 * There is a real problem with EMAC4 core if we use MWSW_001 bit
965 * in MR1 register and do a full EMAC reset.
966 * One TX BD status update is delayed and, after EMAC reset, it
967 * never happens, resulting in TX hung (it'll be recovered by TX
968 * timeout handler eventually, but this is just gross).
969 * So we either have to do full TX reset or try to cheat here :)
971 * The only required change is to RX mode register, so I *think* all
972 * we need is just to stop RX channel. This seems to work on all
975 * If we need the full reset, we might just trigger the workqueue
976 * and do it async... a bit nasty but should work --BenH
978 dev->mcast_pending = 0;
979 emac_rx_disable(dev);
980 if (rmr & EMAC_RMR_MAE)
982 out_be32(&p->rmr, rmr);
987 static void emac_set_multicast_list(struct net_device *ndev)
989 struct emac_instance *dev = netdev_priv(ndev);
991 DBG(dev, "multicast" NL);
993 BUG_ON(!netif_running(dev->ndev));
996 dev->mcast_pending = 1;
1000 mutex_lock(&dev->link_lock);
1001 __emac_set_multicast_list(dev);
1002 mutex_unlock(&dev->link_lock);
1005 static int emac_set_mac_address(struct net_device *ndev, void *sa)
1007 struct emac_instance *dev = netdev_priv(ndev);
1008 struct sockaddr *addr = sa;
1009 struct emac_regs __iomem *p = dev->emacp;
1011 if (!is_valid_ether_addr(addr->sa_data))
1012 return -EADDRNOTAVAIL;
1014 mutex_lock(&dev->link_lock);
1016 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1018 emac_rx_disable(dev);
1019 emac_tx_disable(dev);
1020 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
1021 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
1022 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
1024 emac_tx_enable(dev);
1025 emac_rx_enable(dev);
1027 mutex_unlock(&dev->link_lock);
1032 static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
1034 int rx_sync_size = emac_rx_sync_size(new_mtu);
1035 int rx_skb_size = emac_rx_skb_size(new_mtu);
1037 int mr1_jumbo_bit_change = 0;
1039 mutex_lock(&dev->link_lock);
1040 emac_netif_stop(dev);
1041 emac_rx_disable(dev);
1042 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1044 if (dev->rx_sg_skb) {
1045 ++dev->estats.rx_dropped_resize;
1046 dev_kfree_skb(dev->rx_sg_skb);
1047 dev->rx_sg_skb = NULL;
1050 /* Make a first pass over RX ring and mark BDs ready, dropping
1051 * non-processed packets on the way. We need this as a separate pass
1052 * to simplify error recovery in the case of allocation failure later.
1054 for (i = 0; i < NUM_RX_BUFF; ++i) {
1055 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
1056 ++dev->estats.rx_dropped_resize;
1058 dev->rx_desc[i].data_len = 0;
1059 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
1060 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1063 /* Reallocate RX ring only if bigger skb buffers are required */
1064 if (rx_skb_size <= dev->rx_skb_size)
1067 /* Second pass, allocate new skbs */
1068 for (i = 0; i < NUM_RX_BUFF; ++i) {
1069 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
1075 BUG_ON(!dev->rx_skb[i]);
1076 dev_kfree_skb(dev->rx_skb[i]);
1078 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1079 dev->rx_desc[i].data_ptr =
1080 dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
1081 DMA_FROM_DEVICE) + 2;
1082 dev->rx_skb[i] = skb;
1085 /* Check if we need to change "Jumbo" bit in MR1 */
1086 if (emac_has_feature(dev, EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE)) {
1087 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ||
1088 (dev->ndev->mtu > ETH_DATA_LEN);
1090 mr1_jumbo_bit_change = (new_mtu > ETH_DATA_LEN) ^
1091 (dev->ndev->mtu > ETH_DATA_LEN);
1094 if (mr1_jumbo_bit_change) {
1095 /* This is to prevent starting RX channel in emac_rx_enable() */
1096 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1098 dev->ndev->mtu = new_mtu;
1099 emac_full_tx_reset(dev);
1102 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(new_mtu));
1105 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1107 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1108 emac_rx_enable(dev);
1109 emac_netif_start(dev);
1110 mutex_unlock(&dev->link_lock);
1115 /* Process ctx, rtnl_lock semaphore */
1116 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
1118 struct emac_instance *dev = netdev_priv(ndev);
1121 DBG(dev, "change_mtu(%d)" NL, new_mtu);
1123 if (netif_running(ndev)) {
1124 /* Check if we really need to reinitialize RX ring */
1125 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
1126 ret = emac_resize_rx_ring(dev, new_mtu);
1130 ndev->mtu = new_mtu;
1131 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
1132 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
1138 static void emac_clean_tx_ring(struct emac_instance *dev)
1142 for (i = 0; i < NUM_TX_BUFF; ++i) {
1143 if (dev->tx_skb[i]) {
1144 dev_kfree_skb(dev->tx_skb[i]);
1145 dev->tx_skb[i] = NULL;
1146 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
1147 ++dev->estats.tx_dropped;
1149 dev->tx_desc[i].ctrl = 0;
1150 dev->tx_desc[i].data_ptr = 0;
1154 static void emac_clean_rx_ring(struct emac_instance *dev)
1158 for (i = 0; i < NUM_RX_BUFF; ++i)
1159 if (dev->rx_skb[i]) {
1160 dev->rx_desc[i].ctrl = 0;
1161 dev_kfree_skb(dev->rx_skb[i]);
1162 dev->rx_skb[i] = NULL;
1163 dev->rx_desc[i].data_ptr = 0;
1166 if (dev->rx_sg_skb) {
1167 dev_kfree_skb(dev->rx_sg_skb);
1168 dev->rx_sg_skb = NULL;
1172 static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
1175 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
1179 dev->rx_skb[slot] = skb;
1180 dev->rx_desc[slot].data_len = 0;
1182 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
1183 dev->rx_desc[slot].data_ptr =
1184 dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
1185 DMA_FROM_DEVICE) + 2;
1187 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1188 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1193 static void emac_print_link_status(struct emac_instance *dev)
1195 if (netif_carrier_ok(dev->ndev))
1196 printk(KERN_INFO "%s: link is up, %d %s%s\n",
1197 dev->ndev->name, dev->phy.speed,
1198 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
1199 dev->phy.pause ? ", pause enabled" :
1200 dev->phy.asym_pause ? ", asymmetric pause enabled" : "");
1202 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
1205 /* Process ctx, rtnl_lock semaphore */
1206 static int emac_open(struct net_device *ndev)
1208 struct emac_instance *dev = netdev_priv(ndev);
1211 DBG(dev, "open" NL);
1213 /* Setup error IRQ handler */
1214 err = request_irq(dev->emac_irq, emac_irq, 0, "EMAC", dev);
1216 printk(KERN_ERR "%s: failed to request IRQ %d\n",
1217 ndev->name, dev->emac_irq);
1221 /* Allocate RX ring */
1222 for (i = 0; i < NUM_RX_BUFF; ++i)
1223 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
1224 printk(KERN_ERR "%s: failed to allocate RX ring\n",
1229 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
1230 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1231 dev->rx_sg_skb = NULL;
1233 mutex_lock(&dev->link_lock);
1236 /* Start PHY polling now.
1238 if (dev->phy.address >= 0) {
1239 int link_poll_interval;
1240 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1241 dev->phy.def->ops->read_link(&dev->phy);
1242 emac_rx_clk_default(dev);
1243 netif_carrier_on(dev->ndev);
1244 link_poll_interval = PHY_POLL_LINK_ON;
1246 emac_rx_clk_tx(dev);
1247 netif_carrier_off(dev->ndev);
1248 link_poll_interval = PHY_POLL_LINK_OFF;
1250 dev->link_polling = 1;
1252 schedule_delayed_work(&dev->link_work, link_poll_interval);
1253 emac_print_link_status(dev);
1255 netif_carrier_on(dev->ndev);
1257 /* Required for Pause packet support in EMAC */
1258 dev_mc_add_global(ndev, default_mcast_addr);
1260 emac_configure(dev);
1261 mal_poll_add(dev->mal, &dev->commac);
1262 mal_enable_tx_channel(dev->mal, dev->mal_tx_chan);
1263 mal_set_rcbs(dev->mal, dev->mal_rx_chan, emac_rx_size(ndev->mtu));
1264 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1265 emac_tx_enable(dev);
1266 emac_rx_enable(dev);
1267 emac_netif_start(dev);
1269 mutex_unlock(&dev->link_lock);
1273 emac_clean_rx_ring(dev);
1274 free_irq(dev->emac_irq, dev);
1281 static int emac_link_differs(struct emac_instance *dev)
1283 u32 r = in_be32(&dev->emacp->mr1);
1285 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
1286 int speed, pause, asym_pause;
1288 if (r & EMAC_MR1_MF_1000)
1290 else if (r & EMAC_MR1_MF_100)
1295 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
1296 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
1305 pause = asym_pause = 0;
1307 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
1308 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
1312 static void emac_link_timer(struct work_struct *work)
1314 struct emac_instance *dev =
1315 container_of(to_delayed_work(work),
1316 struct emac_instance, link_work);
1317 int link_poll_interval;
1319 mutex_lock(&dev->link_lock);
1320 DBG2(dev, "link timer" NL);
1325 if (dev->phy.def->ops->poll_link(&dev->phy)) {
1326 if (!netif_carrier_ok(dev->ndev)) {
1327 emac_rx_clk_default(dev);
1328 /* Get new link parameters */
1329 dev->phy.def->ops->read_link(&dev->phy);
1331 netif_carrier_on(dev->ndev);
1332 emac_netif_stop(dev);
1333 emac_full_tx_reset(dev);
1334 emac_netif_start(dev);
1335 emac_print_link_status(dev);
1337 link_poll_interval = PHY_POLL_LINK_ON;
1339 if (netif_carrier_ok(dev->ndev)) {
1340 emac_rx_clk_tx(dev);
1341 netif_carrier_off(dev->ndev);
1342 netif_tx_disable(dev->ndev);
1343 emac_reinitialize(dev);
1344 emac_print_link_status(dev);
1346 link_poll_interval = PHY_POLL_LINK_OFF;
1348 schedule_delayed_work(&dev->link_work, link_poll_interval);
1350 mutex_unlock(&dev->link_lock);
1353 static void emac_force_link_update(struct emac_instance *dev)
1355 netif_carrier_off(dev->ndev);
1357 if (dev->link_polling) {
1358 cancel_delayed_work_sync(&dev->link_work);
1359 if (dev->link_polling)
1360 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1364 /* Process ctx, rtnl_lock semaphore */
1365 static int emac_close(struct net_device *ndev)
1367 struct emac_instance *dev = netdev_priv(ndev);
1369 DBG(dev, "close" NL);
1371 if (dev->phy.address >= 0) {
1372 dev->link_polling = 0;
1373 cancel_delayed_work_sync(&dev->link_work);
1375 mutex_lock(&dev->link_lock);
1376 emac_netif_stop(dev);
1378 mutex_unlock(&dev->link_lock);
1380 emac_rx_disable(dev);
1381 emac_tx_disable(dev);
1382 mal_disable_rx_channel(dev->mal, dev->mal_rx_chan);
1383 mal_disable_tx_channel(dev->mal, dev->mal_tx_chan);
1384 mal_poll_del(dev->mal, &dev->commac);
1386 emac_clean_tx_ring(dev);
1387 emac_clean_rx_ring(dev);
1389 free_irq(dev->emac_irq, dev);
1391 netif_carrier_off(ndev);
1396 static inline u16 emac_tx_csum(struct emac_instance *dev,
1397 struct sk_buff *skb)
1399 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
1400 (skb->ip_summed == CHECKSUM_PARTIAL)) {
1401 ++dev->stats.tx_packets_csum;
1402 return EMAC_TX_CTRL_TAH_CSUM;
1407 static inline int emac_xmit_finish(struct emac_instance *dev, int len)
1409 struct emac_regs __iomem *p = dev->emacp;
1410 struct net_device *ndev = dev->ndev;
1412 /* Send the packet out. If the if makes a significant perf
1413 * difference, then we can store the TMR0 value in "dev"
1416 if (emac_has_feature(dev, EMAC_FTR_EMAC4))
1417 out_be32(&p->tmr0, EMAC4_TMR0_XMIT);
1419 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1421 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1422 netif_stop_queue(ndev);
1423 DBG2(dev, "stopped TX queue" NL);
1426 netif_trans_update(ndev);
1427 ++dev->stats.tx_packets;
1428 dev->stats.tx_bytes += len;
1430 return NETDEV_TX_OK;
1434 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1436 struct emac_instance *dev = netdev_priv(ndev);
1437 unsigned int len = skb->len;
1440 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1441 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1443 slot = dev->tx_slot++;
1444 if (dev->tx_slot == NUM_TX_BUFF) {
1446 ctrl |= MAL_TX_CTRL_WRAP;
1449 DBG2(dev, "xmit(%u) %d" NL, len, slot);
1451 dev->tx_skb[slot] = skb;
1452 dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
1455 dev->tx_desc[slot].data_len = (u16) len;
1457 dev->tx_desc[slot].ctrl = ctrl;
1459 return emac_xmit_finish(dev, len);
1462 static inline int emac_xmit_split(struct emac_instance *dev, int slot,
1463 u32 pd, int len, int last, u16 base_ctrl)
1466 u16 ctrl = base_ctrl;
1467 int chunk = min(len, MAL_MAX_TX_SIZE);
1470 slot = (slot + 1) % NUM_TX_BUFF;
1473 ctrl |= MAL_TX_CTRL_LAST;
1474 if (slot == NUM_TX_BUFF - 1)
1475 ctrl |= MAL_TX_CTRL_WRAP;
1477 dev->tx_skb[slot] = NULL;
1478 dev->tx_desc[slot].data_ptr = pd;
1479 dev->tx_desc[slot].data_len = (u16) chunk;
1480 dev->tx_desc[slot].ctrl = ctrl;
1491 /* Tx lock BH disabled (SG version for TAH equipped EMACs) */
1492 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1494 struct emac_instance *dev = netdev_priv(ndev);
1495 int nr_frags = skb_shinfo(skb)->nr_frags;
1496 int len = skb->len, chunk;
1501 /* This is common "fast" path */
1502 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1503 return emac_start_xmit(skb, ndev);
1505 len -= skb->data_len;
1507 /* Note, this is only an *estimation*, we can still run out of empty
1508 * slots because of the additional fragmentation into
1509 * MAL_MAX_TX_SIZE-sized chunks
1511 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1514 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1515 emac_tx_csum(dev, skb);
1516 slot = dev->tx_slot;
1519 dev->tx_skb[slot] = NULL;
1520 chunk = min(len, MAL_MAX_TX_SIZE);
1521 dev->tx_desc[slot].data_ptr = pd =
1522 dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
1523 dev->tx_desc[slot].data_len = (u16) chunk;
1526 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1529 for (i = 0; i < nr_frags; ++i) {
1530 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1531 len = skb_frag_size(frag);
1533 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1536 pd = skb_frag_dma_map(&dev->ofdev->dev, frag, 0, len,
1539 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1543 DBG2(dev, "xmit_sg(%u) %d - %d" NL, skb->len, dev->tx_slot, slot);
1545 /* Attach skb to the last slot so we don't release it too early */
1546 dev->tx_skb[slot] = skb;
1548 /* Send the packet out */
1549 if (dev->tx_slot == NUM_TX_BUFF - 1)
1550 ctrl |= MAL_TX_CTRL_WRAP;
1552 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1553 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1555 return emac_xmit_finish(dev, skb->len);
1558 /* Well, too bad. Our previous estimation was overly optimistic.
1561 while (slot != dev->tx_slot) {
1562 dev->tx_desc[slot].ctrl = 0;
1565 slot = NUM_TX_BUFF - 1;
1567 ++dev->estats.tx_undo;
1570 netif_stop_queue(ndev);
1571 DBG2(dev, "stopped TX queue" NL);
1572 return NETDEV_TX_BUSY;
1576 static void emac_parse_tx_error(struct emac_instance *dev, u16 ctrl)
1578 struct emac_error_stats *st = &dev->estats;
1580 DBG(dev, "BD TX error %04x" NL, ctrl);
1583 if (ctrl & EMAC_TX_ST_BFCS)
1584 ++st->tx_bd_bad_fcs;
1585 if (ctrl & EMAC_TX_ST_LCS)
1586 ++st->tx_bd_carrier_loss;
1587 if (ctrl & EMAC_TX_ST_ED)
1588 ++st->tx_bd_excessive_deferral;
1589 if (ctrl & EMAC_TX_ST_EC)
1590 ++st->tx_bd_excessive_collisions;
1591 if (ctrl & EMAC_TX_ST_LC)
1592 ++st->tx_bd_late_collision;
1593 if (ctrl & EMAC_TX_ST_MC)
1594 ++st->tx_bd_multple_collisions;
1595 if (ctrl & EMAC_TX_ST_SC)
1596 ++st->tx_bd_single_collision;
1597 if (ctrl & EMAC_TX_ST_UR)
1598 ++st->tx_bd_underrun;
1599 if (ctrl & EMAC_TX_ST_SQE)
1603 static void emac_poll_tx(void *param)
1605 struct emac_instance *dev = param;
1608 DBG2(dev, "poll_tx, %d %d" NL, dev->tx_cnt, dev->ack_slot);
1610 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
1611 bad_mask = EMAC_IS_BAD_TX_TAH;
1613 bad_mask = EMAC_IS_BAD_TX;
1615 netif_tx_lock_bh(dev->ndev);
1618 int slot = dev->ack_slot, n = 0;
1620 ctrl = dev->tx_desc[slot].ctrl;
1621 if (!(ctrl & MAL_TX_CTRL_READY)) {
1622 struct sk_buff *skb = dev->tx_skb[slot];
1627 dev->tx_skb[slot] = NULL;
1629 slot = (slot + 1) % NUM_TX_BUFF;
1631 if (unlikely(ctrl & bad_mask))
1632 emac_parse_tx_error(dev, ctrl);
1638 dev->ack_slot = slot;
1639 if (netif_queue_stopped(dev->ndev) &&
1640 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1641 netif_wake_queue(dev->ndev);
1643 DBG2(dev, "tx %d pkts" NL, n);
1646 netif_tx_unlock_bh(dev->ndev);
1649 static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
1652 struct sk_buff *skb = dev->rx_skb[slot];
1654 DBG2(dev, "recycle %d %d" NL, slot, len);
1657 dma_map_single(&dev->ofdev->dev, skb->data - 2,
1658 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1660 dev->rx_desc[slot].data_len = 0;
1662 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1663 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1666 static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
1668 struct emac_error_stats *st = &dev->estats;
1670 DBG(dev, "BD RX error %04x" NL, ctrl);
1673 if (ctrl & EMAC_RX_ST_OE)
1674 ++st->rx_bd_overrun;
1675 if (ctrl & EMAC_RX_ST_BP)
1676 ++st->rx_bd_bad_packet;
1677 if (ctrl & EMAC_RX_ST_RP)
1678 ++st->rx_bd_runt_packet;
1679 if (ctrl & EMAC_RX_ST_SE)
1680 ++st->rx_bd_short_event;
1681 if (ctrl & EMAC_RX_ST_AE)
1682 ++st->rx_bd_alignment_error;
1683 if (ctrl & EMAC_RX_ST_BFCS)
1684 ++st->rx_bd_bad_fcs;
1685 if (ctrl & EMAC_RX_ST_PTL)
1686 ++st->rx_bd_packet_too_long;
1687 if (ctrl & EMAC_RX_ST_ORE)
1688 ++st->rx_bd_out_of_range;
1689 if (ctrl & EMAC_RX_ST_IRE)
1690 ++st->rx_bd_in_range;
1693 static inline void emac_rx_csum(struct emac_instance *dev,
1694 struct sk_buff *skb, u16 ctrl)
1696 #ifdef CONFIG_IBM_EMAC_TAH
1697 if (!ctrl && dev->tah_dev) {
1698 skb->ip_summed = CHECKSUM_UNNECESSARY;
1699 ++dev->stats.rx_packets_csum;
1704 static inline int emac_rx_sg_append(struct emac_instance *dev, int slot)
1706 if (likely(dev->rx_sg_skb != NULL)) {
1707 int len = dev->rx_desc[slot].data_len;
1708 int tot_len = dev->rx_sg_skb->len + len;
1710 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1711 ++dev->estats.rx_dropped_mtu;
1712 dev_kfree_skb(dev->rx_sg_skb);
1713 dev->rx_sg_skb = NULL;
1715 memcpy(skb_tail_pointer(dev->rx_sg_skb),
1716 dev->rx_skb[slot]->data, len);
1717 skb_put(dev->rx_sg_skb, len);
1718 emac_recycle_rx_skb(dev, slot, len);
1722 emac_recycle_rx_skb(dev, slot, 0);
1726 /* NAPI poll context */
1727 static int emac_poll_rx(void *param, int budget)
1729 struct emac_instance *dev = param;
1730 int slot = dev->rx_slot, received = 0;
1732 DBG2(dev, "poll_rx(%d)" NL, budget);
1735 while (budget > 0) {
1737 struct sk_buff *skb;
1738 u16 ctrl = dev->rx_desc[slot].ctrl;
1740 if (ctrl & MAL_RX_CTRL_EMPTY)
1743 skb = dev->rx_skb[slot];
1745 len = dev->rx_desc[slot].data_len;
1747 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1750 ctrl &= EMAC_BAD_RX_MASK;
1751 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1752 emac_parse_rx_error(dev, ctrl);
1753 ++dev->estats.rx_dropped_error;
1754 emac_recycle_rx_skb(dev, slot, 0);
1759 if (len < ETH_HLEN) {
1760 ++dev->estats.rx_dropped_stack;
1761 emac_recycle_rx_skb(dev, slot, len);
1765 if (len && len < EMAC_RX_COPY_THRESH) {
1766 struct sk_buff *copy_skb =
1767 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1768 if (unlikely(!copy_skb))
1771 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1772 memcpy(copy_skb->data - 2, skb->data - 2, len + 2);
1773 emac_recycle_rx_skb(dev, slot, len);
1775 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1780 skb->protocol = eth_type_trans(skb, dev->ndev);
1781 emac_rx_csum(dev, skb, ctrl);
1783 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1784 ++dev->estats.rx_dropped_stack;
1786 ++dev->stats.rx_packets;
1788 dev->stats.rx_bytes += len;
1789 slot = (slot + 1) % NUM_RX_BUFF;
1794 if (ctrl & MAL_RX_CTRL_FIRST) {
1795 BUG_ON(dev->rx_sg_skb);
1796 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1797 DBG(dev, "rx OOM %d" NL, slot);
1798 ++dev->estats.rx_dropped_oom;
1799 emac_recycle_rx_skb(dev, slot, 0);
1801 dev->rx_sg_skb = skb;
1804 } else if (!emac_rx_sg_append(dev, slot) &&
1805 (ctrl & MAL_RX_CTRL_LAST)) {
1807 skb = dev->rx_sg_skb;
1808 dev->rx_sg_skb = NULL;
1810 ctrl &= EMAC_BAD_RX_MASK;
1811 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1812 emac_parse_rx_error(dev, ctrl);
1813 ++dev->estats.rx_dropped_error;
1821 DBG(dev, "rx OOM %d" NL, slot);
1822 /* Drop the packet and recycle skb */
1823 ++dev->estats.rx_dropped_oom;
1824 emac_recycle_rx_skb(dev, slot, 0);
1829 DBG2(dev, "rx %d BDs" NL, received);
1830 dev->rx_slot = slot;
1833 if (unlikely(budget && test_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags))) {
1835 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1836 DBG2(dev, "rx restart" NL);
1841 if (dev->rx_sg_skb) {
1842 DBG2(dev, "dropping partial rx packet" NL);
1843 ++dev->estats.rx_dropped_error;
1844 dev_kfree_skb(dev->rx_sg_skb);
1845 dev->rx_sg_skb = NULL;
1848 clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
1849 mal_enable_rx_channel(dev->mal, dev->mal_rx_chan);
1850 emac_rx_enable(dev);
1856 /* NAPI poll context */
1857 static int emac_peek_rx(void *param)
1859 struct emac_instance *dev = param;
1861 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1864 /* NAPI poll context */
1865 static int emac_peek_rx_sg(void *param)
1867 struct emac_instance *dev = param;
1869 int slot = dev->rx_slot;
1871 u16 ctrl = dev->rx_desc[slot].ctrl;
1872 if (ctrl & MAL_RX_CTRL_EMPTY)
1874 else if (ctrl & MAL_RX_CTRL_LAST)
1877 slot = (slot + 1) % NUM_RX_BUFF;
1879 /* I'm just being paranoid here :) */
1880 if (unlikely(slot == dev->rx_slot))
1886 static void emac_rxde(void *param)
1888 struct emac_instance *dev = param;
1890 ++dev->estats.rx_stopped;
1891 emac_rx_disable_async(dev);
1895 static irqreturn_t emac_irq(int irq, void *dev_instance)
1897 struct emac_instance *dev = dev_instance;
1898 struct emac_regs __iomem *p = dev->emacp;
1899 struct emac_error_stats *st = &dev->estats;
1902 spin_lock(&dev->lock);
1904 isr = in_be32(&p->isr);
1905 out_be32(&p->isr, isr);
1907 DBG(dev, "isr = %08x" NL, isr);
1909 if (isr & EMAC4_ISR_TXPE)
1911 if (isr & EMAC4_ISR_RXPE)
1913 if (isr & EMAC4_ISR_TXUE)
1915 if (isr & EMAC4_ISR_RXOE)
1916 ++st->rx_fifo_overrun;
1917 if (isr & EMAC_ISR_OVR)
1919 if (isr & EMAC_ISR_BP)
1920 ++st->rx_bad_packet;
1921 if (isr & EMAC_ISR_RP)
1922 ++st->rx_runt_packet;
1923 if (isr & EMAC_ISR_SE)
1924 ++st->rx_short_event;
1925 if (isr & EMAC_ISR_ALE)
1926 ++st->rx_alignment_error;
1927 if (isr & EMAC_ISR_BFCS)
1929 if (isr & EMAC_ISR_PTLE)
1930 ++st->rx_packet_too_long;
1931 if (isr & EMAC_ISR_ORE)
1932 ++st->rx_out_of_range;
1933 if (isr & EMAC_ISR_IRE)
1935 if (isr & EMAC_ISR_SQE)
1937 if (isr & EMAC_ISR_TE)
1940 spin_unlock(&dev->lock);
1945 static struct net_device_stats *emac_stats(struct net_device *ndev)
1947 struct emac_instance *dev = netdev_priv(ndev);
1948 struct emac_stats *st = &dev->stats;
1949 struct emac_error_stats *est = &dev->estats;
1950 struct net_device_stats *nst = &ndev->stats;
1951 unsigned long flags;
1953 DBG2(dev, "stats" NL);
1955 /* Compute "legacy" statistics */
1956 spin_lock_irqsave(&dev->lock, flags);
1957 nst->rx_packets = (unsigned long)st->rx_packets;
1958 nst->rx_bytes = (unsigned long)st->rx_bytes;
1959 nst->tx_packets = (unsigned long)st->tx_packets;
1960 nst->tx_bytes = (unsigned long)st->tx_bytes;
1961 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1962 est->rx_dropped_error +
1963 est->rx_dropped_resize +
1964 est->rx_dropped_mtu);
1965 nst->tx_dropped = (unsigned long)est->tx_dropped;
1967 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1968 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1969 est->rx_fifo_overrun +
1971 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1972 est->rx_alignment_error);
1973 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1975 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1976 est->rx_bd_short_event +
1977 est->rx_bd_packet_too_long +
1978 est->rx_bd_out_of_range +
1979 est->rx_bd_in_range +
1980 est->rx_runt_packet +
1981 est->rx_short_event +
1982 est->rx_packet_too_long +
1983 est->rx_out_of_range +
1986 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1987 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1989 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1990 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1991 est->tx_bd_excessive_collisions +
1992 est->tx_bd_late_collision +
1993 est->tx_bd_multple_collisions);
1994 spin_unlock_irqrestore(&dev->lock, flags);
1998 static struct mal_commac_ops emac_commac_ops = {
1999 .poll_tx = &emac_poll_tx,
2000 .poll_rx = &emac_poll_rx,
2001 .peek_rx = &emac_peek_rx,
2005 static struct mal_commac_ops emac_commac_sg_ops = {
2006 .poll_tx = &emac_poll_tx,
2007 .poll_rx = &emac_poll_rx,
2008 .peek_rx = &emac_peek_rx_sg,
2012 /* Ethtool support */
2013 static int emac_ethtool_get_link_ksettings(struct net_device *ndev,
2014 struct ethtool_link_ksettings *cmd)
2016 struct emac_instance *dev = netdev_priv(ndev);
2017 u32 supported, advertising;
2019 supported = dev->phy.features;
2020 cmd->base.port = PORT_MII;
2021 cmd->base.phy_address = dev->phy.address;
2023 mutex_lock(&dev->link_lock);
2024 advertising = dev->phy.advertising;
2025 cmd->base.autoneg = dev->phy.autoneg;
2026 cmd->base.speed = dev->phy.speed;
2027 cmd->base.duplex = dev->phy.duplex;
2028 mutex_unlock(&dev->link_lock);
2030 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2032 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2039 emac_ethtool_set_link_ksettings(struct net_device *ndev,
2040 const struct ethtool_link_ksettings *cmd)
2042 struct emac_instance *dev = netdev_priv(ndev);
2043 u32 f = dev->phy.features;
2046 ethtool_convert_link_mode_to_legacy_u32(&advertising,
2047 cmd->link_modes.advertising);
2049 DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
2050 cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising);
2052 /* Basic sanity checks */
2053 if (dev->phy.address < 0)
2055 if (cmd->base.autoneg != AUTONEG_ENABLE &&
2056 cmd->base.autoneg != AUTONEG_DISABLE)
2058 if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0)
2060 if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL)
2063 if (cmd->base.autoneg == AUTONEG_DISABLE) {
2064 switch (cmd->base.speed) {
2066 if (cmd->base.duplex == DUPLEX_HALF &&
2067 !(f & SUPPORTED_10baseT_Half))
2069 if (cmd->base.duplex == DUPLEX_FULL &&
2070 !(f & SUPPORTED_10baseT_Full))
2074 if (cmd->base.duplex == DUPLEX_HALF &&
2075 !(f & SUPPORTED_100baseT_Half))
2077 if (cmd->base.duplex == DUPLEX_FULL &&
2078 !(f & SUPPORTED_100baseT_Full))
2082 if (cmd->base.duplex == DUPLEX_HALF &&
2083 !(f & SUPPORTED_1000baseT_Half))
2085 if (cmd->base.duplex == DUPLEX_FULL &&
2086 !(f & SUPPORTED_1000baseT_Full))
2093 mutex_lock(&dev->link_lock);
2094 dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed,
2096 mutex_unlock(&dev->link_lock);
2099 if (!(f & SUPPORTED_Autoneg))
2102 mutex_lock(&dev->link_lock);
2103 dev->phy.def->ops->setup_aneg(&dev->phy,
2105 (dev->phy.advertising &
2107 ADVERTISED_Asym_Pause)));
2108 mutex_unlock(&dev->link_lock);
2110 emac_force_link_update(dev);
2115 static void emac_ethtool_get_ringparam(struct net_device *ndev,
2116 struct ethtool_ringparam *rp)
2118 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
2119 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
2122 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
2123 struct ethtool_pauseparam *pp)
2125 struct emac_instance *dev = netdev_priv(ndev);
2127 mutex_lock(&dev->link_lock);
2128 if ((dev->phy.features & SUPPORTED_Autoneg) &&
2129 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
2132 if (dev->phy.duplex == DUPLEX_FULL) {
2134 pp->rx_pause = pp->tx_pause = 1;
2135 else if (dev->phy.asym_pause)
2138 mutex_unlock(&dev->link_lock);
2141 static int emac_get_regs_len(struct emac_instance *dev)
2143 return sizeof(struct emac_ethtool_regs_subhdr) +
2144 sizeof(struct emac_regs);
2147 static int emac_ethtool_get_regs_len(struct net_device *ndev)
2149 struct emac_instance *dev = netdev_priv(ndev);
2152 size = sizeof(struct emac_ethtool_regs_hdr) +
2153 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal);
2154 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
2155 size += zmii_get_regs_len(dev->zmii_dev);
2156 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
2157 size += rgmii_get_regs_len(dev->rgmii_dev);
2158 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2159 size += tah_get_regs_len(dev->tah_dev);
2164 static void *emac_dump_regs(struct emac_instance *dev, void *buf)
2166 struct emac_ethtool_regs_subhdr *hdr = buf;
2168 hdr->index = dev->cell_index;
2169 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2170 hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
2171 } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
2172 hdr->version = EMAC4_ETHTOOL_REGS_VER;
2174 hdr->version = EMAC_ETHTOOL_REGS_VER;
2176 memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
2177 return (void *)(hdr + 1) + sizeof(struct emac_regs);
2180 static void emac_ethtool_get_regs(struct net_device *ndev,
2181 struct ethtool_regs *regs, void *buf)
2183 struct emac_instance *dev = netdev_priv(ndev);
2184 struct emac_ethtool_regs_hdr *hdr = buf;
2186 hdr->components = 0;
2189 buf = mal_dump_regs(dev->mal, buf);
2190 buf = emac_dump_regs(dev, buf);
2191 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII)) {
2192 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
2193 buf = zmii_dump_regs(dev->zmii_dev, buf);
2195 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2196 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
2197 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
2199 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) {
2200 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
2201 buf = tah_dump_regs(dev->tah_dev, buf);
2205 static int emac_ethtool_nway_reset(struct net_device *ndev)
2207 struct emac_instance *dev = netdev_priv(ndev);
2210 DBG(dev, "nway_reset" NL);
2212 if (dev->phy.address < 0)
2215 mutex_lock(&dev->link_lock);
2216 if (!dev->phy.autoneg) {
2221 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
2223 mutex_unlock(&dev->link_lock);
2224 emac_force_link_update(dev);
2228 static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2230 if (stringset == ETH_SS_STATS)
2231 return EMAC_ETHTOOL_STATS_COUNT;
2236 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
2239 if (stringset == ETH_SS_STATS)
2240 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
2243 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
2244 struct ethtool_stats *estats,
2247 struct emac_instance *dev = netdev_priv(ndev);
2249 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
2250 tmp_stats += sizeof(dev->stats) / sizeof(u64);
2251 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
2254 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2255 struct ethtool_drvinfo *info)
2257 struct emac_instance *dev = netdev_priv(ndev);
2259 strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
2260 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2261 snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
2262 dev->cell_index, dev->ofdev->dev.of_node->full_name);
2265 static const struct ethtool_ops emac_ethtool_ops = {
2266 .get_drvinfo = emac_ethtool_get_drvinfo,
2268 .get_regs_len = emac_ethtool_get_regs_len,
2269 .get_regs = emac_ethtool_get_regs,
2271 .nway_reset = emac_ethtool_nway_reset,
2273 .get_ringparam = emac_ethtool_get_ringparam,
2274 .get_pauseparam = emac_ethtool_get_pauseparam,
2276 .get_strings = emac_ethtool_get_strings,
2277 .get_sset_count = emac_ethtool_get_sset_count,
2278 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2280 .get_link = ethtool_op_get_link,
2281 .get_link_ksettings = emac_ethtool_get_link_ksettings,
2282 .set_link_ksettings = emac_ethtool_set_link_ksettings,
2285 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2287 struct emac_instance *dev = netdev_priv(ndev);
2288 struct mii_ioctl_data *data = if_mii(rq);
2290 DBG(dev, "ioctl %08x" NL, cmd);
2292 if (dev->phy.address < 0)
2297 data->phy_id = dev->phy.address;
2300 data->val_out = emac_mdio_read(ndev, dev->phy.address,
2305 emac_mdio_write(ndev, dev->phy.address, data->reg_num,
2313 struct emac_depentry {
2315 struct device_node *node;
2316 struct platform_device *ofdev;
2320 #define EMAC_DEP_MAL_IDX 0
2321 #define EMAC_DEP_ZMII_IDX 1
2322 #define EMAC_DEP_RGMII_IDX 2
2323 #define EMAC_DEP_TAH_IDX 3
2324 #define EMAC_DEP_MDIO_IDX 4
2325 #define EMAC_DEP_PREV_IDX 5
2326 #define EMAC_DEP_COUNT 6
2328 static int emac_check_deps(struct emac_instance *dev,
2329 struct emac_depentry *deps)
2332 struct device_node *np;
2334 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2335 /* no dependency on that item, allright */
2336 if (deps[i].phandle == 0) {
2340 /* special case for blist as the dependency might go away */
2341 if (i == EMAC_DEP_PREV_IDX) {
2342 np = *(dev->blist - 1);
2344 deps[i].phandle = 0;
2348 if (deps[i].node == NULL)
2349 deps[i].node = of_node_get(np);
2351 if (deps[i].node == NULL)
2352 deps[i].node = of_find_node_by_phandle(deps[i].phandle);
2353 if (deps[i].node == NULL)
2355 if (deps[i].ofdev == NULL)
2356 deps[i].ofdev = of_find_device_by_node(deps[i].node);
2357 if (deps[i].ofdev == NULL)
2359 if (deps[i].drvdata == NULL)
2360 deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
2361 if (deps[i].drvdata != NULL)
2364 return there == EMAC_DEP_COUNT;
2367 static void emac_put_deps(struct emac_instance *dev)
2369 of_dev_put(dev->mal_dev);
2370 of_dev_put(dev->zmii_dev);
2371 of_dev_put(dev->rgmii_dev);
2372 of_dev_put(dev->mdio_dev);
2373 of_dev_put(dev->tah_dev);
2376 static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2379 /* We are only intereted in device addition */
2380 if (action == BUS_NOTIFY_BOUND_DRIVER)
2381 wake_up_all(&emac_probe_wait);
2385 static struct notifier_block emac_of_bus_notifier = {
2386 .notifier_call = emac_of_bus_notify
2389 static int emac_wait_deps(struct emac_instance *dev)
2391 struct emac_depentry deps[EMAC_DEP_COUNT];
2394 memset(&deps, 0, sizeof(deps));
2396 deps[EMAC_DEP_MAL_IDX].phandle = dev->mal_ph;
2397 deps[EMAC_DEP_ZMII_IDX].phandle = dev->zmii_ph;
2398 deps[EMAC_DEP_RGMII_IDX].phandle = dev->rgmii_ph;
2400 deps[EMAC_DEP_TAH_IDX].phandle = dev->tah_ph;
2402 deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph;
2403 if (dev->blist && dev->blist > emac_boot_list)
2404 deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu;
2405 bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier);
2406 wait_event_timeout(emac_probe_wait,
2407 emac_check_deps(dev, deps),
2408 EMAC_PROBE_DEP_TIMEOUT);
2409 bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier);
2410 err = emac_check_deps(dev, deps) ? 0 : -ENODEV;
2411 for (i = 0; i < EMAC_DEP_COUNT; i++) {
2412 of_node_put(deps[i].node);
2414 of_dev_put(deps[i].ofdev);
2417 dev->mal_dev = deps[EMAC_DEP_MAL_IDX].ofdev;
2418 dev->zmii_dev = deps[EMAC_DEP_ZMII_IDX].ofdev;
2419 dev->rgmii_dev = deps[EMAC_DEP_RGMII_IDX].ofdev;
2420 dev->tah_dev = deps[EMAC_DEP_TAH_IDX].ofdev;
2421 dev->mdio_dev = deps[EMAC_DEP_MDIO_IDX].ofdev;
2423 of_dev_put(deps[EMAC_DEP_PREV_IDX].ofdev);
2427 static int emac_read_uint_prop(struct device_node *np, const char *name,
2428 u32 *val, int fatal)
2431 const u32 *prop = of_get_property(np, name, &len);
2432 if (prop == NULL || len < sizeof(u32)) {
2434 printk(KERN_ERR "%s: missing %s property\n",
2435 np->full_name, name);
2442 static void emac_adjust_link(struct net_device *ndev)
2444 struct emac_instance *dev = netdev_priv(ndev);
2445 struct phy_device *phy = dev->phy_dev;
2447 dev->phy.autoneg = phy->autoneg;
2448 dev->phy.speed = phy->speed;
2449 dev->phy.duplex = phy->duplex;
2450 dev->phy.pause = phy->pause;
2451 dev->phy.asym_pause = phy->asym_pause;
2452 dev->phy.advertising = phy->advertising;
2455 static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
2457 int ret = emac_mdio_read(bus->priv, addr, regnum);
2458 /* This is a workaround for powered down ports/phys.
2459 * In the wild, this was seen on the Cisco Meraki MX60(W).
2460 * This hardware disables ports as part of the handoff
2461 * procedure. Accessing the ports will lead to errors
2462 * (-ETIMEDOUT, -EREMOTEIO) that do more harm than good.
2464 return ret < 0 ? 0xffff : ret;
2467 static int emac_mii_bus_write(struct mii_bus *bus, int addr,
2468 int regnum, u16 val)
2470 emac_mdio_write(bus->priv, addr, regnum, val);
2474 static int emac_mii_bus_reset(struct mii_bus *bus)
2476 struct emac_instance *dev = netdev_priv(bus->priv);
2478 return emac_reset(dev);
2481 static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
2482 struct phy_device *phy_dev)
2484 phy_dev->autoneg = phy->autoneg;
2485 phy_dev->speed = phy->speed;
2486 phy_dev->duplex = phy->duplex;
2487 phy_dev->advertising = phy->advertising;
2488 return phy_start_aneg(phy_dev);
2491 static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise)
2493 struct net_device *ndev = phy->dev;
2494 struct emac_instance *dev = netdev_priv(ndev);
2496 phy->autoneg = AUTONEG_ENABLE;
2497 phy->advertising = advertise;
2498 return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2501 static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd)
2503 struct net_device *ndev = phy->dev;
2504 struct emac_instance *dev = netdev_priv(ndev);
2506 phy->autoneg = AUTONEG_DISABLE;
2509 return emac_mdio_phy_start_aneg(phy, dev->phy_dev);
2512 static int emac_mdio_poll_link(struct mii_phy *phy)
2514 struct net_device *ndev = phy->dev;
2515 struct emac_instance *dev = netdev_priv(ndev);
2518 res = phy_read_status(dev->phy_dev);
2520 dev_err(&dev->ofdev->dev, "link update failed (%d).", res);
2521 return ethtool_op_get_link(ndev);
2524 return dev->phy_dev->link;
2527 static int emac_mdio_read_link(struct mii_phy *phy)
2529 struct net_device *ndev = phy->dev;
2530 struct emac_instance *dev = netdev_priv(ndev);
2531 struct phy_device *phy_dev = dev->phy_dev;
2534 res = phy_read_status(phy_dev);
2538 phy->speed = phy_dev->speed;
2539 phy->duplex = phy_dev->duplex;
2540 phy->pause = phy_dev->pause;
2541 phy->asym_pause = phy_dev->asym_pause;
2545 static int emac_mdio_init_phy(struct mii_phy *phy)
2547 struct net_device *ndev = phy->dev;
2548 struct emac_instance *dev = netdev_priv(ndev);
2550 phy_start(dev->phy_dev);
2551 return phy_init_hw(dev->phy_dev);
2554 static const struct mii_phy_ops emac_dt_mdio_phy_ops = {
2555 .init = emac_mdio_init_phy,
2556 .setup_aneg = emac_mdio_setup_aneg,
2557 .setup_forced = emac_mdio_setup_forced,
2558 .poll_link = emac_mdio_poll_link,
2559 .read_link = emac_mdio_read_link,
2562 static int emac_dt_mdio_probe(struct emac_instance *dev)
2564 struct device_node *mii_np;
2567 mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio");
2569 dev_err(&dev->ofdev->dev, "no mdio definition found.");
2573 if (!of_device_is_available(mii_np)) {
2578 dev->mii_bus = devm_mdiobus_alloc(&dev->ofdev->dev);
2579 if (!dev->mii_bus) {
2584 dev->mii_bus->priv = dev->ndev;
2585 dev->mii_bus->parent = dev->ndev->dev.parent;
2586 dev->mii_bus->name = "emac_mdio";
2587 dev->mii_bus->read = &emac_mii_bus_read;
2588 dev->mii_bus->write = &emac_mii_bus_write;
2589 dev->mii_bus->reset = &emac_mii_bus_reset;
2590 snprintf(dev->mii_bus->id, MII_BUS_ID_SIZE, "%s", dev->ofdev->name);
2591 res = of_mdiobus_register(dev->mii_bus, mii_np);
2593 dev_err(&dev->ofdev->dev, "cannot register MDIO bus %s (%d)",
2594 dev->mii_bus->name, res);
2598 of_node_put(mii_np);
2602 static int emac_dt_phy_connect(struct emac_instance *dev,
2603 struct device_node *phy_handle)
2605 dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
2610 dev->phy_dev = of_phy_connect(dev->ndev, phy_handle, &emac_adjust_link,
2612 if (!dev->phy_dev) {
2613 dev_err(&dev->ofdev->dev, "failed to connect to PHY.\n");
2617 dev->phy.def->phy_id = dev->phy_dev->drv->phy_id;
2618 dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
2619 dev->phy.def->name = dev->phy_dev->drv->name;
2620 dev->phy.def->ops = &emac_dt_mdio_phy_ops;
2621 dev->phy.features = dev->phy_dev->supported;
2622 dev->phy.address = dev->phy_dev->mdio.addr;
2623 dev->phy.mode = dev->phy_dev->interface;
2627 static int emac_dt_phy_probe(struct emac_instance *dev)
2629 struct device_node *np = dev->ofdev->dev.of_node;
2630 struct device_node *phy_handle;
2633 phy_handle = of_parse_phandle(np, "phy-handle", 0);
2636 res = emac_dt_mdio_probe(dev);
2638 res = emac_dt_phy_connect(dev, phy_handle);
2640 mdiobus_unregister(dev->mii_bus);
2644 of_node_put(phy_handle);
2648 static int emac_init_phy(struct emac_instance *dev)
2650 struct device_node *np = dev->ofdev->dev.of_node;
2651 struct net_device *ndev = dev->ndev;
2655 dev->phy.dev = ndev;
2656 dev->phy.mode = dev->phy_mode;
2658 /* PHY-less configuration. */
2659 if ((dev->phy_address == 0xffffffff && dev->phy_map == 0xffffffff) ||
2660 of_phy_is_fixed_link(np)) {
2663 /* PHY-less configuration. */
2664 dev->phy.address = -1;
2665 dev->phy.features = SUPPORTED_MII;
2666 if (emac_phy_supports_gige(dev->phy_mode))
2667 dev->phy.features |= SUPPORTED_1000baseT_Full;
2669 dev->phy.features |= SUPPORTED_100baseT_Full;
2672 if (of_phy_is_fixed_link(np)) {
2673 int res = emac_dt_mdio_probe(dev);
2676 res = of_phy_register_fixed_link(np);
2678 mdiobus_unregister(dev->mii_bus);
2685 mutex_lock(&emac_phy_map_lock);
2686 phy_map = dev->phy_map | busy_phy_map;
2688 DBG(dev, "PHY maps %08x %08x" NL, dev->phy_map, busy_phy_map);
2690 dev->phy.mdio_read = emac_mdio_read;
2691 dev->phy.mdio_write = emac_mdio_write;
2693 /* Enable internal clock source */
2694 #ifdef CONFIG_PPC_DCR_NATIVE
2695 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2696 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2698 /* PHY clock workaround */
2699 emac_rx_clk_tx(dev);
2701 /* Enable internal clock source on 440GX*/
2702 #ifdef CONFIG_PPC_DCR_NATIVE
2703 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2704 dcri_clrset(SDR0, SDR0_MFR, 0, SDR0_MFR_ECS);
2706 /* Configure EMAC with defaults so we can at least use MDIO
2707 * This is needed mostly for 440GX
2709 if (emac_phy_gpcs(dev->phy.mode)) {
2711 * Make GPCS PHY address equal to EMAC index.
2712 * We probably should take into account busy_phy_map
2713 * and/or phy_map here.
2715 * Note that the busy_phy_map is currently global
2716 * while it should probably be per-ASIC...
2718 dev->phy.gpcs_address = dev->gpcs_address;
2719 if (dev->phy.gpcs_address == 0xffffffff)
2720 dev->phy.address = dev->cell_index;
2723 emac_configure(dev);
2725 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
2726 int res = emac_dt_phy_probe(dev);
2730 /* No phy-handle property configured.
2731 * Continue with the existing phy probe
2737 mutex_unlock(&emac_phy_map_lock);
2741 mutex_unlock(&emac_phy_map_lock);
2742 dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
2748 if (dev->phy_address != 0xffffffff)
2749 phy_map = ~(1 << dev->phy_address);
2751 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2752 if (!(phy_map & 1)) {
2754 busy_phy_map |= 1 << i;
2756 /* Quick check if there is a PHY at the address */
2757 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2758 if (r == 0xffff || r < 0)
2760 if (!emac_mii_phy_probe(&dev->phy, i))
2764 /* Enable external clock source */
2765 #ifdef CONFIG_PPC_DCR_NATIVE
2766 if (emac_has_feature(dev, EMAC_FTR_440GX_PHY_CLK_FIX))
2767 dcri_clrset(SDR0, SDR0_MFR, SDR0_MFR_ECS, 0);
2769 mutex_unlock(&emac_phy_map_lock);
2771 printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name);
2777 if (dev->phy.def->ops->init)
2778 dev->phy.def->ops->init(&dev->phy);
2780 /* Disable any PHY features not supported by the platform */
2781 dev->phy.def->features &= ~dev->phy_feat_exc;
2782 dev->phy.features &= ~dev->phy_feat_exc;
2784 /* Setup initial link parameters */
2785 if (dev->phy.features & SUPPORTED_Autoneg) {
2786 adv = dev->phy.features;
2787 if (!emac_has_feature(dev, EMAC_FTR_NO_FLOW_CONTROL_40x))
2788 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2789 /* Restart autonegotiation */
2790 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2792 u32 f = dev->phy.def->features;
2793 int speed = SPEED_10, fd = DUPLEX_HALF;
2795 /* Select highest supported speed/duplex */
2796 if (f & SUPPORTED_1000baseT_Full) {
2799 } else if (f & SUPPORTED_1000baseT_Half)
2801 else if (f & SUPPORTED_100baseT_Full) {
2804 } else if (f & SUPPORTED_100baseT_Half)
2806 else if (f & SUPPORTED_10baseT_Full)
2809 /* Force link parameters */
2810 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2815 static int emac_init_config(struct emac_instance *dev)
2817 struct device_node *np = dev->ofdev->dev.of_node;
2820 /* Read config from device-tree */
2821 if (emac_read_uint_prop(np, "mal-device", &dev->mal_ph, 1))
2823 if (emac_read_uint_prop(np, "mal-tx-channel", &dev->mal_tx_chan, 1))
2825 if (emac_read_uint_prop(np, "mal-rx-channel", &dev->mal_rx_chan, 1))
2827 if (emac_read_uint_prop(np, "cell-index", &dev->cell_index, 1))
2829 if (emac_read_uint_prop(np, "max-frame-size", &dev->max_mtu, 0))
2830 dev->max_mtu = ETH_DATA_LEN;
2831 if (emac_read_uint_prop(np, "rx-fifo-size", &dev->rx_fifo_size, 0))
2832 dev->rx_fifo_size = 2048;
2833 if (emac_read_uint_prop(np, "tx-fifo-size", &dev->tx_fifo_size, 0))
2834 dev->tx_fifo_size = 2048;
2835 if (emac_read_uint_prop(np, "rx-fifo-size-gige", &dev->rx_fifo_size_gige, 0))
2836 dev->rx_fifo_size_gige = dev->rx_fifo_size;
2837 if (emac_read_uint_prop(np, "tx-fifo-size-gige", &dev->tx_fifo_size_gige, 0))
2838 dev->tx_fifo_size_gige = dev->tx_fifo_size;
2839 if (emac_read_uint_prop(np, "phy-address", &dev->phy_address, 0))
2840 dev->phy_address = 0xffffffff;
2841 if (emac_read_uint_prop(np, "phy-map", &dev->phy_map, 0))
2842 dev->phy_map = 0xffffffff;
2843 if (emac_read_uint_prop(np, "gpcs-address", &dev->gpcs_address, 0))
2844 dev->gpcs_address = 0xffffffff;
2845 if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
2847 if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
2849 if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
2851 if (emac_read_uint_prop(np, "mdio-device", &dev->mdio_ph, 0))
2853 if (emac_read_uint_prop(np, "zmii-device", &dev->zmii_ph, 0))
2855 if (emac_read_uint_prop(np, "zmii-channel", &dev->zmii_port, 0))
2856 dev->zmii_port = 0xffffffff;
2857 if (emac_read_uint_prop(np, "rgmii-device", &dev->rgmii_ph, 0))
2859 if (emac_read_uint_prop(np, "rgmii-channel", &dev->rgmii_port, 0))
2860 dev->rgmii_port = 0xffffffff;
2861 if (emac_read_uint_prop(np, "fifo-entry-size", &dev->fifo_entry_size, 0))
2862 dev->fifo_entry_size = 16;
2863 if (emac_read_uint_prop(np, "mal-burst-size", &dev->mal_burst_size, 0))
2864 dev->mal_burst_size = 256;
2866 /* PHY mode needs some decoding */
2867 dev->phy_mode = of_get_phy_mode(np);
2868 if (dev->phy_mode < 0)
2869 dev->phy_mode = PHY_MODE_NA;
2871 /* Check EMAC version */
2872 if (of_device_is_compatible(np, "ibm,emac4sync")) {
2873 dev->features |= (EMAC_FTR_EMAC4 | EMAC_FTR_EMAC4SYNC);
2874 if (of_device_is_compatible(np, "ibm,emac-460ex") ||
2875 of_device_is_compatible(np, "ibm,emac-460gt"))
2876 dev->features |= EMAC_FTR_460EX_PHY_CLK_FIX;
2877 if (of_device_is_compatible(np, "ibm,emac-405ex") ||
2878 of_device_is_compatible(np, "ibm,emac-405exr"))
2879 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2880 if (of_device_is_compatible(np, "ibm,emac-apm821xx")) {
2881 dev->features |= (EMAC_APM821XX_REQ_JUMBO_FRAME_SIZE |
2882 EMAC_FTR_APM821XX_NO_HALF_DUPLEX |
2883 EMAC_FTR_460EX_PHY_CLK_FIX);
2885 } else if (of_device_is_compatible(np, "ibm,emac4")) {
2886 dev->features |= EMAC_FTR_EMAC4;
2887 if (of_device_is_compatible(np, "ibm,emac-440gx"))
2888 dev->features |= EMAC_FTR_440GX_PHY_CLK_FIX;
2890 if (of_device_is_compatible(np, "ibm,emac-440ep") ||
2891 of_device_is_compatible(np, "ibm,emac-440gr"))
2892 dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
2893 if (of_device_is_compatible(np, "ibm,emac-405ez")) {
2894 #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL
2895 dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
2897 printk(KERN_ERR "%s: Flow control not disabled!\n",
2905 /* Fixup some feature bits based on the device tree */
2906 if (of_get_property(np, "has-inverted-stacr-oc", NULL))
2907 dev->features |= EMAC_FTR_STACR_OC_INVERT;
2908 if (of_get_property(np, "has-new-stacr-staopc", NULL))
2909 dev->features |= EMAC_FTR_HAS_NEW_STACR;
2911 /* CAB lacks the appropriate properties */
2912 if (of_device_is_compatible(np, "ibm,emac-axon"))
2913 dev->features |= EMAC_FTR_HAS_NEW_STACR |
2914 EMAC_FTR_STACR_OC_INVERT;
2916 /* Enable TAH/ZMII/RGMII features as found */
2917 if (dev->tah_ph != 0) {
2918 #ifdef CONFIG_IBM_EMAC_TAH
2919 dev->features |= EMAC_FTR_HAS_TAH;
2921 printk(KERN_ERR "%s: TAH support not enabled !\n",
2927 if (dev->zmii_ph != 0) {
2928 #ifdef CONFIG_IBM_EMAC_ZMII
2929 dev->features |= EMAC_FTR_HAS_ZMII;
2931 printk(KERN_ERR "%s: ZMII support not enabled !\n",
2937 if (dev->rgmii_ph != 0) {
2938 #ifdef CONFIG_IBM_EMAC_RGMII
2939 dev->features |= EMAC_FTR_HAS_RGMII;
2941 printk(KERN_ERR "%s: RGMII support not enabled !\n",
2947 /* Read MAC-address */
2948 p = of_get_property(np, "local-mac-address", NULL);
2950 printk(KERN_ERR "%s: Can't find local-mac-address property\n",
2954 memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
2956 /* IAHT and GAHT filter parameterization */
2957 if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
2958 dev->xaht_slots_shift = EMAC4SYNC_XAHT_SLOTS_SHIFT;
2959 dev->xaht_width_shift = EMAC4SYNC_XAHT_WIDTH_SHIFT;
2961 dev->xaht_slots_shift = EMAC4_XAHT_SLOTS_SHIFT;
2962 dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT;
2965 DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE);
2966 DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige);
2967 DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige);
2968 DBG(dev, "max_mtu : %d\n", dev->max_mtu);
2969 DBG(dev, "OPB freq : %d\n", dev->opb_bus_freq);
2974 static const struct net_device_ops emac_netdev_ops = {
2975 .ndo_open = emac_open,
2976 .ndo_stop = emac_close,
2977 .ndo_get_stats = emac_stats,
2978 .ndo_set_rx_mode = emac_set_multicast_list,
2979 .ndo_do_ioctl = emac_ioctl,
2980 .ndo_tx_timeout = emac_tx_timeout,
2981 .ndo_validate_addr = eth_validate_addr,
2982 .ndo_set_mac_address = emac_set_mac_address,
2983 .ndo_start_xmit = emac_start_xmit,
2986 static const struct net_device_ops emac_gige_netdev_ops = {
2987 .ndo_open = emac_open,
2988 .ndo_stop = emac_close,
2989 .ndo_get_stats = emac_stats,
2990 .ndo_set_rx_mode = emac_set_multicast_list,
2991 .ndo_do_ioctl = emac_ioctl,
2992 .ndo_tx_timeout = emac_tx_timeout,
2993 .ndo_validate_addr = eth_validate_addr,
2994 .ndo_set_mac_address = emac_set_mac_address,
2995 .ndo_start_xmit = emac_start_xmit_sg,
2996 .ndo_change_mtu = emac_change_mtu,
2999 static int emac_probe(struct platform_device *ofdev)
3001 struct net_device *ndev;
3002 struct emac_instance *dev;
3003 struct device_node *np = ofdev->dev.of_node;
3004 struct device_node **blist = NULL;
3007 /* Skip unused/unwired EMACS. We leave the check for an unused
3008 * property here for now, but new flat device trees should set a
3009 * status property to "disabled" instead.
3011 if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
3014 /* Find ourselves in the bootlist if we are there */
3015 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3016 if (emac_boot_list[i] == np)
3017 blist = &emac_boot_list[i];
3019 /* Allocate our net_device structure */
3021 ndev = alloc_etherdev(sizeof(struct emac_instance));
3025 dev = netdev_priv(ndev);
3029 SET_NETDEV_DEV(ndev, &ofdev->dev);
3031 /* Initialize some embedded data structures */
3032 mutex_init(&dev->mdio_lock);
3033 mutex_init(&dev->link_lock);
3034 spin_lock_init(&dev->lock);
3035 INIT_WORK(&dev->reset_work, emac_reset_work);
3037 /* Init various config data based on device-tree */
3038 err = emac_init_config(dev);
3042 /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
3043 dev->emac_irq = irq_of_parse_and_map(np, 0);
3044 dev->wol_irq = irq_of_parse_and_map(np, 1);
3045 if (!dev->emac_irq) {
3046 printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
3049 ndev->irq = dev->emac_irq;
3052 if (of_address_to_resource(np, 0, &dev->rsrc_regs)) {
3053 printk(KERN_ERR "%s: Can't get registers address\n",
3057 // TODO : request_mem_region
3058 dev->emacp = ioremap(dev->rsrc_regs.start,
3059 resource_size(&dev->rsrc_regs));
3060 if (dev->emacp == NULL) {
3061 printk(KERN_ERR "%s: Can't map device registers!\n",
3067 /* Wait for dependent devices */
3068 err = emac_wait_deps(dev);
3071 "%s: Timeout waiting for dependent devices\n",
3073 /* display more info about what's missing ? */
3076 dev->mal = platform_get_drvdata(dev->mal_dev);
3077 if (dev->mdio_dev != NULL)
3078 dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
3080 /* Register with MAL */
3081 dev->commac.ops = &emac_commac_ops;
3082 dev->commac.dev = dev;
3083 dev->commac.tx_chan_mask = MAL_CHAN_MASK(dev->mal_tx_chan);
3084 dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan);
3085 err = mal_register_commac(dev->mal, &dev->commac);
3087 printk(KERN_ERR "%s: failed to register with mal %s!\n",
3088 np->full_name, dev->mal_dev->dev.of_node->full_name);
3091 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
3092 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
3094 /* Get pointers to BD rings */
3096 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
3098 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal, dev->mal_rx_chan);
3100 DBG(dev, "tx_desc %p" NL, dev->tx_desc);
3101 DBG(dev, "rx_desc %p" NL, dev->rx_desc);
3104 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
3105 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
3106 memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
3107 memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
3109 /* Attach to ZMII, if needed */
3110 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
3111 (err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
3112 goto err_unreg_commac;
3114 /* Attach to RGMII, if needed */
3115 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII) &&
3116 (err = rgmii_attach(dev->rgmii_dev, dev->rgmii_port, dev->phy_mode)) != 0)
3117 goto err_detach_zmii;
3119 /* Attach to TAH, if needed */
3120 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
3121 (err = tah_attach(dev->tah_dev, dev->tah_port)) != 0)
3122 goto err_detach_rgmii;
3124 /* Set some link defaults before we can find out real parameters */
3125 dev->phy.speed = SPEED_100;
3126 dev->phy.duplex = DUPLEX_FULL;
3127 dev->phy.autoneg = AUTONEG_DISABLE;
3128 dev->phy.pause = dev->phy.asym_pause = 0;
3129 dev->stop_timeout = STOP_TIMEOUT_100;
3130 INIT_DELAYED_WORK(&dev->link_work, emac_link_timer);
3132 /* Some SoCs like APM821xx does not support Half Duplex mode. */
3133 if (emac_has_feature(dev, EMAC_FTR_APM821XX_NO_HALF_DUPLEX)) {
3134 dev->phy_feat_exc = (SUPPORTED_1000baseT_Half |
3135 SUPPORTED_100baseT_Half |
3136 SUPPORTED_10baseT_Half);
3139 /* Find PHY if any */
3140 err = emac_init_phy(dev);
3142 goto err_detach_tah;
3145 ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
3146 ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
3148 ndev->watchdog_timeo = 5 * HZ;
3149 if (emac_phy_supports_gige(dev->phy_mode)) {
3150 ndev->netdev_ops = &emac_gige_netdev_ops;
3151 dev->commac.ops = &emac_commac_sg_ops;
3153 ndev->netdev_ops = &emac_netdev_ops;
3154 ndev->ethtool_ops = &emac_ethtool_ops;
3156 /* MTU range: 46 - 1500 or whatever is in OF */
3157 ndev->min_mtu = EMAC_MIN_MTU;
3158 ndev->max_mtu = dev->max_mtu;
3160 netif_carrier_off(ndev);
3162 err = register_netdev(ndev);
3164 printk(KERN_ERR "%s: failed to register net device (%d)!\n",
3165 np->full_name, err);
3166 goto err_detach_tah;
3169 /* Set our drvdata last as we don't want them visible until we are
3173 platform_set_drvdata(ofdev, dev);
3175 /* There's a new kid in town ! Let's tell everybody */
3176 wake_up_all(&emac_probe_wait);
3179 printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
3180 ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
3182 if (dev->phy_mode == PHY_MODE_SGMII)
3183 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
3185 if (dev->phy.address >= 0)
3186 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
3187 dev->phy.def->name, dev->phy.address);
3192 /* I have a bad feeling about this ... */
3195 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3196 tah_detach(dev->tah_dev, dev->tah_port);
3198 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3199 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3201 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3202 zmii_detach(dev->zmii_dev, dev->zmii_port);
3204 mal_unregister_commac(dev->mal, &dev->commac);
3208 iounmap(dev->emacp);
3211 irq_dispose_mapping(dev->wol_irq);
3213 irq_dispose_mapping(dev->emac_irq);
3217 /* if we were on the bootlist, remove us as we won't show up and
3218 * wake up all waiters to notify them in case they were waiting
3223 wake_up_all(&emac_probe_wait);
3228 static int emac_remove(struct platform_device *ofdev)
3230 struct emac_instance *dev = platform_get_drvdata(ofdev);
3232 DBG(dev, "remove" NL);
3234 unregister_netdev(dev->ndev);
3236 cancel_work_sync(&dev->reset_work);
3238 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
3239 tah_detach(dev->tah_dev, dev->tah_port);
3240 if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII))
3241 rgmii_detach(dev->rgmii_dev, dev->rgmii_port);
3242 if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
3243 zmii_detach(dev->zmii_dev, dev->zmii_port);
3246 phy_disconnect(dev->phy_dev);
3249 mdiobus_unregister(dev->mii_bus);
3251 busy_phy_map &= ~(1 << dev->phy.address);
3252 DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
3254 mal_unregister_commac(dev->mal, &dev->commac);
3257 iounmap(dev->emacp);
3260 irq_dispose_mapping(dev->wol_irq);
3262 irq_dispose_mapping(dev->emac_irq);
3264 free_netdev(dev->ndev);
3269 /* XXX Features in here should be replaced by properties... */
3270 static const struct of_device_id emac_match[] =
3274 .compatible = "ibm,emac",
3278 .compatible = "ibm,emac4",
3282 .compatible = "ibm,emac4sync",
3286 MODULE_DEVICE_TABLE(of, emac_match);
3288 static struct platform_driver emac_driver = {
3291 .of_match_table = emac_match,
3293 .probe = emac_probe,
3294 .remove = emac_remove,
3297 static void __init emac_make_bootlist(void)
3299 struct device_node *np = NULL;
3301 int cell_indices[EMAC_BOOT_LIST_SIZE];
3304 while((np = of_find_all_nodes(np)) != NULL) {
3307 if (of_match_node(emac_match, np) == NULL)
3309 if (of_get_property(np, "unused", NULL))
3311 idx = of_get_property(np, "cell-index", NULL);
3314 cell_indices[i] = *idx;
3315 emac_boot_list[i++] = of_node_get(np);
3316 if (i >= EMAC_BOOT_LIST_SIZE) {
3323 /* Bubble sort them (doh, what a creative algorithm :-) */
3324 for (i = 0; max > 1 && (i < (max - 1)); i++)
3325 for (j = i; j < max; j++) {
3326 if (cell_indices[i] > cell_indices[j]) {
3327 swap(emac_boot_list[i], emac_boot_list[j]);
3328 swap(cell_indices[i], cell_indices[j]);
3333 static int __init emac_init(void)
3337 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
3339 /* Build EMAC boot list */
3340 emac_make_bootlist();
3342 /* Init submodules */
3355 rc = platform_driver_register(&emac_driver);
3373 static void __exit emac_exit(void)
3377 platform_driver_unregister(&emac_driver);
3384 /* Destroy EMAC boot list */
3385 for (i = 0; i < EMAC_BOOT_LIST_SIZE; i++)
3386 of_node_put(emac_boot_list[i]);
3389 module_init(emac_init);
3390 module_exit(emac_exit);