2 * Broadcom GENET (Gigabit Ethernet) controller driver
4 * Copyright (c) 2014 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) "bcmgenet: " fmt
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/string.h>
20 #include <linux/if_ether.h>
21 #include <linux/init.h>
22 #include <linux/errno.h>
23 #include <linux/delay.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/clk.h>
29 #include <linux/of_address.h>
30 #include <linux/of_irq.h>
31 #include <linux/of_net.h>
32 #include <linux/of_platform.h>
35 #include <linux/mii.h>
36 #include <linux/ethtool.h>
37 #include <linux/netdevice.h>
38 #include <linux/inetdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
43 #include <linux/ipv6.h>
44 #include <linux/phy.h>
45 #include <linux/platform_data/bcmgenet.h>
47 #include <asm/unaligned.h>
51 /* Maximum number of hardware queues, downsized if needed */
52 #define GENET_MAX_MQ_CNT 4
54 /* Default highest priority queue for multi queue support */
55 #define GENET_Q0_PRIORITY 0
57 #define GENET_Q16_TX_BD_CNT \
58 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
60 #define RX_BUF_LENGTH 2048
61 #define SKB_ALIGNMENT 32
63 /* Tx/Rx DMA register offset, skip 256 descriptors */
64 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
65 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
67 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
68 TOTAL_DESC * DMA_DESC_SIZE)
70 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
71 TOTAL_DESC * DMA_DESC_SIZE)
73 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
74 void __iomem *d, u32 value)
76 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
79 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
82 return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
85 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
89 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
91 /* Register writes to GISB bus can take couple hundred nanoseconds
92 * and are done for each packet, save these expensive writes unless
93 * the platform is explicitly configured for 64-bits/LPAE.
95 #ifdef CONFIG_PHYS_ADDR_T_64BIT
96 if (priv->hw_params->flags & GENET_HAS_40BITS)
97 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
101 /* Combined address + length/status setter */
102 static inline void dmadesc_set(struct bcmgenet_priv *priv,
103 void __iomem *d, dma_addr_t addr, u32 val)
105 dmadesc_set_length_status(priv, d, val);
106 dmadesc_set_addr(priv, d, addr);
109 static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
114 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
116 /* Register writes to GISB bus can take couple hundred nanoseconds
117 * and are done for each packet, save these expensive writes unless
118 * the platform is explicitly configured for 64-bits/LPAE.
120 #ifdef CONFIG_PHYS_ADDR_T_64BIT
121 if (priv->hw_params->flags & GENET_HAS_40BITS)
122 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
127 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
129 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
132 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
134 if (GENET_IS_V1(priv))
135 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
137 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
140 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
142 if (GENET_IS_V1(priv))
143 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
145 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
148 /* These macros are defined to deal with register map change
149 * between GENET1.1 and GENET2. Only those currently being used
150 * by driver are defined.
152 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
154 if (GENET_IS_V1(priv))
155 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
157 return __raw_readl(priv->base +
158 priv->hw_params->tbuf_offset + TBUF_CTRL);
161 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
163 if (GENET_IS_V1(priv))
164 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
166 __raw_writel(val, priv->base +
167 priv->hw_params->tbuf_offset + TBUF_CTRL);
170 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
172 if (GENET_IS_V1(priv))
173 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
175 return __raw_readl(priv->base +
176 priv->hw_params->tbuf_offset + TBUF_BP_MC);
179 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
181 if (GENET_IS_V1(priv))
182 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
184 __raw_writel(val, priv->base +
185 priv->hw_params->tbuf_offset + TBUF_BP_MC);
188 /* RX/TX DMA register accessors */
200 static const u8 bcmgenet_dma_regs_v3plus[] = {
201 [DMA_RING_CFG] = 0x00,
204 [DMA_SCB_BURST_SIZE] = 0x0C,
205 [DMA_ARB_CTRL] = 0x2C,
206 [DMA_PRIORITY_0] = 0x30,
207 [DMA_PRIORITY_1] = 0x34,
208 [DMA_PRIORITY_2] = 0x38,
211 static const u8 bcmgenet_dma_regs_v2[] = {
212 [DMA_RING_CFG] = 0x00,
215 [DMA_SCB_BURST_SIZE] = 0x0C,
216 [DMA_ARB_CTRL] = 0x30,
217 [DMA_PRIORITY_0] = 0x34,
218 [DMA_PRIORITY_1] = 0x38,
219 [DMA_PRIORITY_2] = 0x3C,
222 static const u8 bcmgenet_dma_regs_v1[] = {
225 [DMA_SCB_BURST_SIZE] = 0x0C,
226 [DMA_ARB_CTRL] = 0x30,
227 [DMA_PRIORITY_0] = 0x34,
228 [DMA_PRIORITY_1] = 0x38,
229 [DMA_PRIORITY_2] = 0x3C,
232 /* Set at runtime once bcmgenet version is known */
233 static const u8 *bcmgenet_dma_regs;
235 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
237 return netdev_priv(dev_get_drvdata(dev));
240 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
243 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
244 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
247 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
248 u32 val, enum dma_reg r)
250 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
251 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
254 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
257 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
258 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
261 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
262 u32 val, enum dma_reg r)
264 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
265 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
268 /* RDMA/TDMA ring registers and accessors
269 * we merge the common fields and just prefix with T/D the registers
270 * having different meaning depending on the direction
274 RDMA_WRITE_PTR = TDMA_READ_PTR,
276 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
278 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
280 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
286 DMA_MBUF_DONE_THRESH,
288 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
290 RDMA_READ_PTR = TDMA_WRITE_PTR,
292 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
295 /* GENET v4 supports 40-bits pointer addressing
296 * for obvious reasons the LO and HI word parts
297 * are contiguous, but this offsets the other
300 static const u8 genet_dma_ring_regs_v4[] = {
301 [TDMA_READ_PTR] = 0x00,
302 [TDMA_READ_PTR_HI] = 0x04,
303 [TDMA_CONS_INDEX] = 0x08,
304 [TDMA_PROD_INDEX] = 0x0C,
305 [DMA_RING_BUF_SIZE] = 0x10,
306 [DMA_START_ADDR] = 0x14,
307 [DMA_START_ADDR_HI] = 0x18,
308 [DMA_END_ADDR] = 0x1C,
309 [DMA_END_ADDR_HI] = 0x20,
310 [DMA_MBUF_DONE_THRESH] = 0x24,
311 [TDMA_FLOW_PERIOD] = 0x28,
312 [TDMA_WRITE_PTR] = 0x2C,
313 [TDMA_WRITE_PTR_HI] = 0x30,
316 static const u8 genet_dma_ring_regs_v123[] = {
317 [TDMA_READ_PTR] = 0x00,
318 [TDMA_CONS_INDEX] = 0x04,
319 [TDMA_PROD_INDEX] = 0x08,
320 [DMA_RING_BUF_SIZE] = 0x0C,
321 [DMA_START_ADDR] = 0x10,
322 [DMA_END_ADDR] = 0x14,
323 [DMA_MBUF_DONE_THRESH] = 0x18,
324 [TDMA_FLOW_PERIOD] = 0x1C,
325 [TDMA_WRITE_PTR] = 0x20,
328 /* Set at runtime once GENET version is known */
329 static const u8 *genet_dma_ring_regs;
331 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
335 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
336 (DMA_RING_SIZE * ring) +
337 genet_dma_ring_regs[r]);
340 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
341 unsigned int ring, u32 val,
344 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
345 (DMA_RING_SIZE * ring) +
346 genet_dma_ring_regs[r]);
349 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
353 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
354 (DMA_RING_SIZE * ring) +
355 genet_dma_ring_regs[r]);
358 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
359 unsigned int ring, u32 val,
362 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
363 (DMA_RING_SIZE * ring) +
364 genet_dma_ring_regs[r]);
367 static int bcmgenet_get_settings(struct net_device *dev,
368 struct ethtool_cmd *cmd)
370 struct bcmgenet_priv *priv = netdev_priv(dev);
372 if (!netif_running(dev))
378 return phy_ethtool_gset(priv->phydev, cmd);
381 static int bcmgenet_set_settings(struct net_device *dev,
382 struct ethtool_cmd *cmd)
384 struct bcmgenet_priv *priv = netdev_priv(dev);
386 if (!netif_running(dev))
392 return phy_ethtool_sset(priv->phydev, cmd);
395 static int bcmgenet_set_rx_csum(struct net_device *dev,
396 netdev_features_t wanted)
398 struct bcmgenet_priv *priv = netdev_priv(dev);
402 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
404 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
406 /* enable rx checksumming */
408 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
410 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
411 priv->desc_rxchk_en = rx_csum_en;
413 /* If UniMAC forwards CRC, we need to skip over it to get
414 * a valid CHK bit to be set in the per-packet status word
416 if (rx_csum_en && priv->crc_fwd_en)
417 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
419 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
421 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
426 static int bcmgenet_set_tx_csum(struct net_device *dev,
427 netdev_features_t wanted)
429 struct bcmgenet_priv *priv = netdev_priv(dev);
431 u32 tbuf_ctrl, rbuf_ctrl;
433 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
434 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
436 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
438 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
440 tbuf_ctrl |= RBUF_64B_EN;
441 rbuf_ctrl |= RBUF_64B_EN;
443 tbuf_ctrl &= ~RBUF_64B_EN;
444 rbuf_ctrl &= ~RBUF_64B_EN;
446 priv->desc_64b_en = desc_64b_en;
448 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
449 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
454 static int bcmgenet_set_features(struct net_device *dev,
455 netdev_features_t features)
457 netdev_features_t changed = features ^ dev->features;
458 netdev_features_t wanted = dev->wanted_features;
461 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
462 ret = bcmgenet_set_tx_csum(dev, wanted);
463 if (changed & (NETIF_F_RXCSUM))
464 ret = bcmgenet_set_rx_csum(dev, wanted);
469 static u32 bcmgenet_get_msglevel(struct net_device *dev)
471 struct bcmgenet_priv *priv = netdev_priv(dev);
473 return priv->msg_enable;
476 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
478 struct bcmgenet_priv *priv = netdev_priv(dev);
480 priv->msg_enable = level;
483 /* standard ethtool support functions. */
484 enum bcmgenet_stat_type {
485 BCMGENET_STAT_NETDEV = -1,
486 BCMGENET_STAT_MIB_RX,
487 BCMGENET_STAT_MIB_TX,
493 struct bcmgenet_stats {
494 char stat_string[ETH_GSTRING_LEN];
497 enum bcmgenet_stat_type type;
498 /* reg offset from UMAC base for misc counters */
502 #define STAT_NETDEV(m) { \
503 .stat_string = __stringify(m), \
504 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
505 .stat_offset = offsetof(struct net_device_stats, m), \
506 .type = BCMGENET_STAT_NETDEV, \
509 #define STAT_GENET_MIB(str, m, _type) { \
510 .stat_string = str, \
511 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
512 .stat_offset = offsetof(struct bcmgenet_priv, m), \
516 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
517 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
518 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
519 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
521 #define STAT_GENET_MISC(str, m, offset) { \
522 .stat_string = str, \
523 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
524 .stat_offset = offsetof(struct bcmgenet_priv, m), \
525 .type = BCMGENET_STAT_MISC, \
526 .reg_offset = offset, \
530 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
531 * between the end of TX stats and the beginning of the RX RUNT
533 #define BCMGENET_STAT_OFFSET 0xc
535 /* Hardware counters must be kept in sync because the order/offset
536 * is important here (order in structure declaration = order in hardware)
538 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
540 STAT_NETDEV(rx_packets),
541 STAT_NETDEV(tx_packets),
542 STAT_NETDEV(rx_bytes),
543 STAT_NETDEV(tx_bytes),
544 STAT_NETDEV(rx_errors),
545 STAT_NETDEV(tx_errors),
546 STAT_NETDEV(rx_dropped),
547 STAT_NETDEV(tx_dropped),
548 STAT_NETDEV(multicast),
549 /* UniMAC RSV counters */
550 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
551 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
552 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
553 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
554 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
555 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
556 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
557 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
558 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
559 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
560 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
561 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
562 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
563 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
564 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
565 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
566 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
567 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
568 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
569 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
570 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
571 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
572 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
573 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
574 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
575 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
576 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
577 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
578 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
579 /* UniMAC TSV counters */
580 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
581 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
582 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
583 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
584 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
585 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
586 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
587 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
588 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
589 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
590 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
591 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
592 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
593 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
594 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
595 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
596 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
597 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
598 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
599 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
600 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
601 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
602 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
603 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
604 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
605 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
606 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
607 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
608 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
609 /* UniMAC RUNT counters */
610 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
611 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
612 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
613 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
614 /* Misc UniMAC counters */
615 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
617 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
618 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
619 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
620 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
621 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
624 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
626 static void bcmgenet_get_drvinfo(struct net_device *dev,
627 struct ethtool_drvinfo *info)
629 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
630 strlcpy(info->version, "v2.0", sizeof(info->version));
631 info->n_stats = BCMGENET_STATS_LEN;
634 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
636 switch (string_set) {
638 return BCMGENET_STATS_LEN;
644 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
651 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
652 memcpy(data + i * ETH_GSTRING_LEN,
653 bcmgenet_gstrings_stats[i].stat_string,
660 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
664 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
665 const struct bcmgenet_stats *s;
670 s = &bcmgenet_gstrings_stats[i];
672 case BCMGENET_STAT_NETDEV:
673 case BCMGENET_STAT_SOFT:
675 case BCMGENET_STAT_MIB_RX:
676 case BCMGENET_STAT_MIB_TX:
677 case BCMGENET_STAT_RUNT:
678 if (s->type != BCMGENET_STAT_MIB_RX)
679 offset = BCMGENET_STAT_OFFSET;
680 val = bcmgenet_umac_readl(priv,
681 UMAC_MIB_START + j + offset);
683 case BCMGENET_STAT_MISC:
684 val = bcmgenet_umac_readl(priv, s->reg_offset);
685 /* clear if overflowed */
687 bcmgenet_umac_writel(priv, 0, s->reg_offset);
692 p = (char *)priv + s->stat_offset;
697 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
698 struct ethtool_stats *stats,
701 struct bcmgenet_priv *priv = netdev_priv(dev);
704 if (netif_running(dev))
705 bcmgenet_update_mib_counters(priv);
707 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
708 const struct bcmgenet_stats *s;
711 s = &bcmgenet_gstrings_stats[i];
712 if (s->type == BCMGENET_STAT_NETDEV)
713 p = (char *)&dev->stats;
721 static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
723 struct bcmgenet_priv *priv = netdev_priv(dev);
724 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
727 if (enable && !priv->clk_eee_enabled) {
728 clk_prepare_enable(priv->clk_eee);
729 priv->clk_eee_enabled = true;
732 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
737 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
739 /* Enable EEE and switch to a 27Mhz clock automatically */
740 reg = __raw_readl(priv->base + off);
742 reg |= TBUF_EEE_EN | TBUF_PM_EN;
744 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
745 __raw_writel(reg, priv->base + off);
747 /* Do the same for thing for RBUF */
748 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
750 reg |= RBUF_EEE_EN | RBUF_PM_EN;
752 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
753 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
755 if (!enable && priv->clk_eee_enabled) {
756 clk_disable_unprepare(priv->clk_eee);
757 priv->clk_eee_enabled = false;
760 priv->eee.eee_enabled = enable;
761 priv->eee.eee_active = enable;
764 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
766 struct bcmgenet_priv *priv = netdev_priv(dev);
767 struct ethtool_eee *p = &priv->eee;
769 if (GENET_IS_V1(priv))
772 e->eee_enabled = p->eee_enabled;
773 e->eee_active = p->eee_active;
774 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
776 return phy_ethtool_get_eee(priv->phydev, e);
779 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
781 struct bcmgenet_priv *priv = netdev_priv(dev);
782 struct ethtool_eee *p = &priv->eee;
785 if (GENET_IS_V1(priv))
788 p->eee_enabled = e->eee_enabled;
790 if (!p->eee_enabled) {
791 bcmgenet_eee_enable_set(dev, false);
793 ret = phy_init_eee(priv->phydev, 0);
795 netif_err(priv, hw, dev, "EEE initialization failed\n");
799 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
800 bcmgenet_eee_enable_set(dev, true);
803 return phy_ethtool_set_eee(priv->phydev, e);
806 static int bcmgenet_nway_reset(struct net_device *dev)
808 struct bcmgenet_priv *priv = netdev_priv(dev);
810 return genphy_restart_aneg(priv->phydev);
813 /* standard ethtool support functions. */
814 static struct ethtool_ops bcmgenet_ethtool_ops = {
815 .get_strings = bcmgenet_get_strings,
816 .get_sset_count = bcmgenet_get_sset_count,
817 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
818 .get_settings = bcmgenet_get_settings,
819 .set_settings = bcmgenet_set_settings,
820 .get_drvinfo = bcmgenet_get_drvinfo,
821 .get_link = ethtool_op_get_link,
822 .get_msglevel = bcmgenet_get_msglevel,
823 .set_msglevel = bcmgenet_set_msglevel,
824 .get_wol = bcmgenet_get_wol,
825 .set_wol = bcmgenet_set_wol,
826 .get_eee = bcmgenet_get_eee,
827 .set_eee = bcmgenet_set_eee,
828 .nway_reset = bcmgenet_nway_reset,
831 /* Power down the unimac, based on mode. */
832 static void bcmgenet_power_down(struct bcmgenet_priv *priv,
833 enum bcmgenet_power_mode mode)
838 case GENET_POWER_CABLE_SENSE:
839 phy_detach(priv->phydev);
842 case GENET_POWER_WOL_MAGIC:
843 bcmgenet_wol_power_down_cfg(priv, mode);
846 case GENET_POWER_PASSIVE:
848 if (priv->hw_params->flags & GENET_HAS_EXT) {
849 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
850 reg |= (EXT_PWR_DOWN_PHY |
851 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
852 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
860 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
861 enum bcmgenet_power_mode mode)
865 if (!(priv->hw_params->flags & GENET_HAS_EXT))
868 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
871 case GENET_POWER_PASSIVE:
872 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
875 case GENET_POWER_CABLE_SENSE:
877 reg |= EXT_PWR_DN_EN_LD;
879 case GENET_POWER_WOL_MAGIC:
880 bcmgenet_wol_power_up_cfg(priv, mode);
886 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
888 if (mode == GENET_POWER_PASSIVE)
889 bcmgenet_mii_reset(priv->dev);
892 /* ioctl handle special commands that are not present in ethtool. */
893 static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
895 struct bcmgenet_priv *priv = netdev_priv(dev);
898 if (!netif_running(dev))
908 val = phy_mii_ioctl(priv->phydev, rq, cmd);
919 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
920 struct bcmgenet_tx_ring *ring)
922 struct enet_cb *tx_cb_ptr;
924 tx_cb_ptr = ring->cbs;
925 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
927 /* Advancing local write pointer */
928 if (ring->write_ptr == ring->end_ptr)
929 ring->write_ptr = ring->cb_ptr;
936 /* Simple helper to free a control block's resources */
937 static void bcmgenet_free_cb(struct enet_cb *cb)
939 dev_kfree_skb_any(cb->skb);
941 dma_unmap_addr_set(cb, dma_addr, 0);
944 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
945 struct bcmgenet_tx_ring *ring)
947 bcmgenet_intrl2_0_writel(priv,
948 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
949 INTRL2_CPU_MASK_SET);
952 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
953 struct bcmgenet_tx_ring *ring)
955 bcmgenet_intrl2_0_writel(priv,
956 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
957 INTRL2_CPU_MASK_CLEAR);
960 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
961 struct bcmgenet_tx_ring *ring)
963 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
964 INTRL2_CPU_MASK_CLEAR);
965 priv->int1_mask &= ~(1 << ring->index);
968 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
969 struct bcmgenet_tx_ring *ring)
971 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
972 INTRL2_CPU_MASK_SET);
973 priv->int1_mask |= (1 << ring->index);
976 /* Unlocked version of the reclaim routine */
977 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
978 struct bcmgenet_tx_ring *ring)
980 struct bcmgenet_priv *priv = netdev_priv(dev);
981 struct enet_cb *tx_cb_ptr;
982 struct netdev_queue *txq;
983 unsigned int pkts_compl = 0;
984 unsigned int c_index;
985 unsigned int txbds_ready;
986 unsigned int txbds_processed = 0;
988 /* Compute how many buffers are transmitted since last xmit call */
989 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
990 c_index &= DMA_C_INDEX_MASK;
992 if (likely(c_index >= ring->c_index))
993 txbds_ready = c_index - ring->c_index;
995 txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
997 netif_dbg(priv, tx_done, dev,
998 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
999 __func__, ring->index, ring->c_index, c_index, txbds_ready);
1001 /* Reclaim transmitted buffers */
1002 while (txbds_processed < txbds_ready) {
1003 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
1004 if (tx_cb_ptr->skb) {
1006 dev->stats.tx_packets++;
1007 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1008 dma_unmap_single(&dev->dev,
1009 dma_unmap_addr(tx_cb_ptr, dma_addr),
1010 tx_cb_ptr->skb->len,
1012 bcmgenet_free_cb(tx_cb_ptr);
1013 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1014 dev->stats.tx_bytes +=
1015 dma_unmap_len(tx_cb_ptr, dma_len);
1016 dma_unmap_page(&dev->dev,
1017 dma_unmap_addr(tx_cb_ptr, dma_addr),
1018 dma_unmap_len(tx_cb_ptr, dma_len),
1020 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
1024 if (likely(ring->clean_ptr < ring->end_ptr))
1027 ring->clean_ptr = ring->cb_ptr;
1030 ring->free_bds += txbds_processed;
1031 ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
1033 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1034 txq = netdev_get_tx_queue(dev, ring->queue);
1035 if (netif_tx_queue_stopped(txq))
1036 netif_tx_wake_queue(txq);
1042 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1043 struct bcmgenet_tx_ring *ring)
1045 unsigned int released;
1046 unsigned long flags;
1048 spin_lock_irqsave(&ring->lock, flags);
1049 released = __bcmgenet_tx_reclaim(dev, ring);
1050 spin_unlock_irqrestore(&ring->lock, flags);
1055 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1057 struct bcmgenet_tx_ring *ring =
1058 container_of(napi, struct bcmgenet_tx_ring, napi);
1059 unsigned int work_done = 0;
1061 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1063 if (work_done == 0) {
1064 napi_complete(napi);
1065 ring->int_enable(ring->priv, ring);
1073 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1075 struct bcmgenet_priv *priv = netdev_priv(dev);
1078 if (netif_is_multiqueue(dev)) {
1079 for (i = 0; i < priv->hw_params->tx_queues; i++)
1080 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1083 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1086 /* Transmits a single SKB (either head of a fragment or a single SKB)
1087 * caller must hold priv->lock
1089 static int bcmgenet_xmit_single(struct net_device *dev,
1090 struct sk_buff *skb,
1092 struct bcmgenet_tx_ring *ring)
1094 struct bcmgenet_priv *priv = netdev_priv(dev);
1095 struct device *kdev = &priv->pdev->dev;
1096 struct enet_cb *tx_cb_ptr;
1097 unsigned int skb_len;
1102 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1104 if (unlikely(!tx_cb_ptr))
1107 tx_cb_ptr->skb = skb;
1109 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
1111 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1112 ret = dma_mapping_error(kdev, mapping);
1114 priv->mib.tx_dma_failed++;
1115 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1120 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1121 dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
1122 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1123 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1126 if (skb->ip_summed == CHECKSUM_PARTIAL)
1127 length_status |= DMA_TX_DO_CSUM;
1129 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1131 /* Decrement total BD count and advance our write pointer */
1132 ring->free_bds -= 1;
1133 ring->prod_index += 1;
1134 ring->prod_index &= DMA_P_INDEX_MASK;
1139 /* Transmit a SKB fragment */
1140 static int bcmgenet_xmit_frag(struct net_device *dev,
1143 struct bcmgenet_tx_ring *ring)
1145 struct bcmgenet_priv *priv = netdev_priv(dev);
1146 struct device *kdev = &priv->pdev->dev;
1147 struct enet_cb *tx_cb_ptr;
1151 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1153 if (unlikely(!tx_cb_ptr))
1155 tx_cb_ptr->skb = NULL;
1157 mapping = skb_frag_dma_map(kdev, frag, 0,
1158 skb_frag_size(frag), DMA_TO_DEVICE);
1159 ret = dma_mapping_error(kdev, mapping);
1161 priv->mib.tx_dma_failed++;
1162 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1167 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1168 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1170 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1171 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1172 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1175 ring->free_bds -= 1;
1176 ring->prod_index += 1;
1177 ring->prod_index &= DMA_P_INDEX_MASK;
1182 /* Reallocate the SKB to put enough headroom in front of it and insert
1183 * the transmit checksum offsets in the descriptors
1185 static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1186 struct sk_buff *skb)
1188 struct status_64 *status = NULL;
1189 struct sk_buff *new_skb;
1195 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1196 /* If 64 byte status block enabled, must make sure skb has
1197 * enough headroom for us to insert 64B status block.
1199 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1202 dev->stats.tx_errors++;
1203 dev->stats.tx_dropped++;
1209 skb_push(skb, sizeof(*status));
1210 status = (struct status_64 *)skb->data;
1212 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1213 ip_ver = htons(skb->protocol);
1216 ip_proto = ip_hdr(skb)->protocol;
1219 ip_proto = ipv6_hdr(skb)->nexthdr;
1225 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1226 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1227 (offset + skb->csum_offset);
1229 /* Set the length valid bit for TCP and UDP and just set
1230 * the special UDP flag for IPv4, else just set to 0.
1232 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1233 tx_csum_info |= STATUS_TX_CSUM_LV;
1234 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1235 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1240 status->tx_csum_info = tx_csum_info;
1246 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1248 struct bcmgenet_priv *priv = netdev_priv(dev);
1249 struct bcmgenet_tx_ring *ring = NULL;
1250 struct netdev_queue *txq;
1251 unsigned long flags = 0;
1252 int nr_frags, index;
1257 index = skb_get_queue_mapping(skb);
1258 /* Mapping strategy:
1259 * queue_mapping = 0, unclassified, packet xmited through ring16
1260 * queue_mapping = 1, goes to ring 0. (highest priority queue
1261 * queue_mapping = 2, goes to ring 1.
1262 * queue_mapping = 3, goes to ring 2.
1263 * queue_mapping = 4, goes to ring 3.
1270 nr_frags = skb_shinfo(skb)->nr_frags;
1271 ring = &priv->tx_rings[index];
1272 txq = netdev_get_tx_queue(dev, ring->queue);
1274 spin_lock_irqsave(&ring->lock, flags);
1275 if (ring->free_bds <= nr_frags + 1) {
1276 netif_tx_stop_queue(txq);
1277 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
1278 __func__, index, ring->queue);
1279 ret = NETDEV_TX_BUSY;
1283 if (skb_padto(skb, ETH_ZLEN)) {
1288 /* set the SKB transmit checksum */
1289 if (priv->desc_64b_en) {
1290 skb = bcmgenet_put_tx_csum(dev, skb);
1297 dma_desc_flags = DMA_SOP;
1299 dma_desc_flags |= DMA_EOP;
1301 /* Transmit single SKB or head of fragment list */
1302 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1309 for (i = 0; i < nr_frags; i++) {
1310 ret = bcmgenet_xmit_frag(dev,
1311 &skb_shinfo(skb)->frags[i],
1312 (i == nr_frags - 1) ? DMA_EOP : 0,
1320 skb_tx_timestamp(skb);
1322 /* we kept a software copy of how much we should advance the TDMA
1323 * producer index, now write it down to the hardware
1325 bcmgenet_tdma_ring_writel(priv, ring->index,
1326 ring->prod_index, TDMA_PROD_INDEX);
1328 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1329 netif_tx_stop_queue(txq);
1332 spin_unlock_irqrestore(&ring->lock, flags);
1338 static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
1340 struct device *kdev = &priv->pdev->dev;
1341 struct sk_buff *skb;
1345 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
1349 /* a caller did not release this control block */
1350 WARN_ON(cb->skb != NULL);
1352 mapping = dma_map_single(kdev, skb->data,
1353 priv->rx_buf_len, DMA_FROM_DEVICE);
1354 ret = dma_mapping_error(kdev, mapping);
1356 priv->mib.rx_dma_failed++;
1357 bcmgenet_free_cb(cb);
1358 netif_err(priv, rx_err, priv->dev,
1359 "%s DMA map failed\n", __func__);
1363 dma_unmap_addr_set(cb, dma_addr, mapping);
1364 /* assign packet, prepare descriptor, and advance pointer */
1366 dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
1368 /* turn on the newly assigned BD for DMA to use */
1369 priv->rx_bd_assign_index++;
1370 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
1372 priv->rx_bd_assign_ptr = priv->rx_bds +
1373 (priv->rx_bd_assign_index * DMA_DESC_SIZE);
1378 /* bcmgenet_desc_rx - descriptor based rx process.
1379 * this could be called from bottom half, or from NAPI polling method.
1381 static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1382 unsigned int budget)
1384 struct net_device *dev = priv->dev;
1386 struct sk_buff *skb;
1387 u32 dma_length_status;
1388 unsigned long dma_flag;
1390 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1391 unsigned int p_index;
1392 unsigned int chksum_ok = 0;
1394 p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX);
1395 p_index &= DMA_P_INDEX_MASK;
1397 if (p_index < priv->rx_c_index)
1398 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
1399 priv->rx_c_index + p_index;
1401 rxpkttoprocess = p_index - priv->rx_c_index;
1403 netif_dbg(priv, rx_status, dev,
1404 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1406 while ((rxpktprocessed < rxpkttoprocess) &&
1407 (rxpktprocessed < budget)) {
1408 cb = &priv->rx_cbs[priv->rx_read_ptr];
1411 /* We do not have a backing SKB, so we do not have a
1412 * corresponding DMA mapping for this incoming packet since
1413 * bcmgenet_rx_refill always either has both skb and mapping or
1416 if (unlikely(!skb)) {
1417 dev->stats.rx_dropped++;
1418 dev->stats.rx_errors++;
1422 /* Unmap the packet contents such that we can use the
1423 * RSV from the 64 bytes descriptor when enabled and save
1424 * a 32-bits register read
1426 dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
1427 priv->rx_buf_len, DMA_FROM_DEVICE);
1429 if (!priv->desc_64b_en) {
1431 dmadesc_get_length_status(priv,
1433 (priv->rx_read_ptr *
1436 struct status_64 *status;
1438 status = (struct status_64 *)skb->data;
1439 dma_length_status = status->length_status;
1442 /* DMA flags and length are still valid no matter how
1443 * we got the Receive Status Vector (64B RSB or register)
1445 dma_flag = dma_length_status & 0xffff;
1446 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1448 netif_dbg(priv, rx_status, dev,
1449 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1450 __func__, p_index, priv->rx_c_index,
1451 priv->rx_read_ptr, dma_length_status);
1453 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1454 netif_err(priv, rx_status, dev,
1455 "dropping fragmented packet!\n");
1456 dev->stats.rx_dropped++;
1457 dev->stats.rx_errors++;
1458 dev_kfree_skb_any(cb->skb);
1463 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1468 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1469 (unsigned int)dma_flag);
1470 if (dma_flag & DMA_RX_CRC_ERROR)
1471 dev->stats.rx_crc_errors++;
1472 if (dma_flag & DMA_RX_OV)
1473 dev->stats.rx_over_errors++;
1474 if (dma_flag & DMA_RX_NO)
1475 dev->stats.rx_frame_errors++;
1476 if (dma_flag & DMA_RX_LG)
1477 dev->stats.rx_length_errors++;
1478 dev->stats.rx_dropped++;
1479 dev->stats.rx_errors++;
1481 /* discard the packet and advance consumer index.*/
1482 dev_kfree_skb_any(cb->skb);
1485 } /* error packet */
1487 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1488 priv->desc_rxchk_en;
1491 if (priv->desc_64b_en) {
1496 if (likely(chksum_ok))
1497 skb->ip_summed = CHECKSUM_UNNECESSARY;
1499 /* remove hardware 2bytes added for IP alignment */
1503 if (priv->crc_fwd_en) {
1504 skb_trim(skb, len - ETH_FCS_LEN);
1508 /*Finish setting up the received SKB and send it to the kernel*/
1509 skb->protocol = eth_type_trans(skb, priv->dev);
1510 dev->stats.rx_packets++;
1511 dev->stats.rx_bytes += len;
1512 if (dma_flag & DMA_RX_MULT)
1513 dev->stats.multicast++;
1516 napi_gro_receive(&priv->napi, skb);
1518 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1520 /* refill RX path on the current control block */
1522 err = bcmgenet_rx_refill(priv, cb);
1524 priv->mib.alloc_rx_buff_failed++;
1525 netif_err(priv, rx_err, dev, "Rx refill failed\n");
1529 priv->rx_read_ptr++;
1530 priv->rx_read_ptr &= (priv->num_rx_bds - 1);
1533 return rxpktprocessed;
1536 /* Assign skb to RX DMA descriptor. */
1537 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
1543 netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
1545 /* loop here for each buffer needing assign */
1546 for (i = 0; i < priv->num_rx_bds; i++) {
1547 cb = &priv->rx_cbs[priv->rx_bd_assign_index];
1551 ret = bcmgenet_rx_refill(priv, cb);
1559 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1564 for (i = 0; i < priv->num_rx_bds; i++) {
1565 cb = &priv->rx_cbs[i];
1567 if (dma_unmap_addr(cb, dma_addr)) {
1568 dma_unmap_single(&priv->dev->dev,
1569 dma_unmap_addr(cb, dma_addr),
1570 priv->rx_buf_len, DMA_FROM_DEVICE);
1571 dma_unmap_addr_set(cb, dma_addr, 0);
1575 bcmgenet_free_cb(cb);
1579 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
1583 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1588 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1590 /* UniMAC stops on a packet boundary, wait for a full-size packet
1594 usleep_range(1000, 2000);
1597 static int reset_umac(struct bcmgenet_priv *priv)
1599 struct device *kdev = &priv->pdev->dev;
1600 unsigned int timeout = 0;
1603 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1604 bcmgenet_rbuf_ctrl_set(priv, 0);
1607 /* disable MAC while updating its registers */
1608 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1610 /* issue soft reset, wait for it to complete */
1611 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1612 while (timeout++ < 1000) {
1613 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1614 if (!(reg & CMD_SW_RESET))
1620 if (timeout == 1000) {
1622 "timeout waiting for MAC to come out of reset\n");
1629 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1631 /* Mask all interrupts.*/
1632 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1633 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1634 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1635 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1636 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1637 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1640 static int init_umac(struct bcmgenet_priv *priv)
1642 struct device *kdev = &priv->pdev->dev;
1644 u32 reg, cpu_mask_clear;
1647 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1649 ret = reset_umac(priv);
1653 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1654 /* clear tx/rx counter */
1655 bcmgenet_umac_writel(priv,
1656 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1658 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1660 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1662 /* init rx registers, enable ip header optimization */
1663 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1664 reg |= RBUF_ALIGN_2B;
1665 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1667 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1668 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1670 bcmgenet_intr_disable(priv);
1672 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
1674 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
1676 /* Monitor cable plug/unplugged event for internal PHY */
1677 if (phy_is_internal(priv->phydev)) {
1678 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1679 } else if (priv->ext_phy) {
1680 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1681 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1682 reg = bcmgenet_bp_mc_get(priv);
1683 reg |= BIT(priv->hw_params->bp_in_en_shift);
1685 /* bp_mask: back pressure mask */
1686 if (netif_is_multiqueue(priv->dev))
1687 reg |= priv->hw_params->bp_in_mask;
1689 reg &= ~priv->hw_params->bp_in_mask;
1690 bcmgenet_bp_mc_set(priv, reg);
1693 /* Enable MDIO interrupts on GENET v3+ */
1694 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1695 cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
1697 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
1699 for (index = 0; index < priv->hw_params->tx_queues; index++)
1700 bcmgenet_intrl2_1_writel(priv, (1 << index),
1701 INTRL2_CPU_MASK_CLEAR);
1703 /* Enable rx/tx engine.*/
1704 dev_dbg(kdev, "done init umac\n");
1709 /* Initialize a Tx ring along with corresponding hardware registers */
1710 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1711 unsigned int index, unsigned int size,
1712 unsigned int start_ptr, unsigned int end_ptr)
1714 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1715 u32 words_per_bd = WORDS_PER_BD(priv);
1716 u32 flow_period_val = 0;
1718 spin_lock_init(&ring->lock);
1720 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1721 ring->index = index;
1722 if (index == DESC_INDEX) {
1724 ring->int_enable = bcmgenet_tx_ring16_int_enable;
1725 ring->int_disable = bcmgenet_tx_ring16_int_disable;
1727 ring->queue = index + 1;
1728 ring->int_enable = bcmgenet_tx_ring_int_enable;
1729 ring->int_disable = bcmgenet_tx_ring_int_disable;
1731 ring->cbs = priv->tx_cbs + start_ptr;
1733 ring->clean_ptr = start_ptr;
1735 ring->free_bds = size;
1736 ring->write_ptr = start_ptr;
1737 ring->cb_ptr = start_ptr;
1738 ring->end_ptr = end_ptr - 1;
1739 ring->prod_index = 0;
1741 /* Set flow period for ring != 16 */
1742 if (index != DESC_INDEX)
1743 flow_period_val = ENET_MAX_MTU_SIZE << 16;
1745 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1746 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1747 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1748 /* Disable rate control for now */
1749 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
1751 bcmgenet_tdma_ring_writel(priv, index,
1752 ((size << DMA_RING_SIZE_SHIFT) |
1753 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1755 /* Set start and end address, read and write pointers */
1756 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1758 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1760 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
1762 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1765 napi_enable(&ring->napi);
1768 static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
1771 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1773 napi_disable(&ring->napi);
1774 netif_napi_del(&ring->napi);
1777 /* Initialize a RDMA ring */
1778 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1779 unsigned int index, unsigned int size)
1781 u32 words_per_bd = WORDS_PER_BD(priv);
1784 priv->num_rx_bds = TOTAL_DESC;
1785 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
1786 priv->rx_bd_assign_ptr = priv->rx_bds;
1787 priv->rx_bd_assign_index = 0;
1788 priv->rx_c_index = 0;
1789 priv->rx_read_ptr = 0;
1790 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
1795 ret = bcmgenet_alloc_rx_buffers(priv);
1797 kfree(priv->rx_cbs);
1801 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
1802 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
1803 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
1804 bcmgenet_rdma_ring_writel(priv, index,
1805 ((size << DMA_RING_SIZE_SHIFT) |
1806 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
1807 bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
1808 bcmgenet_rdma_ring_writel(priv, index,
1809 words_per_bd * size - 1, DMA_END_ADDR);
1810 bcmgenet_rdma_ring_writel(priv, index,
1811 (DMA_FC_THRESH_LO <<
1812 DMA_XOFF_THRESHOLD_SHIFT) |
1813 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
1814 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
1819 /* Initialize Tx queues
1821 * Queues 0-3 are priority-based, each one has 32 descriptors,
1822 * with queue 0 being the highest priority queue.
1824 * Queue 16 is the default Tx queue with
1825 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
1827 * The transmit control block pool is then partitioned as follows:
1828 * - Tx queue 0 uses tx_cbs[0..31]
1829 * - Tx queue 1 uses tx_cbs[32..63]
1830 * - Tx queue 2 uses tx_cbs[64..95]
1831 * - Tx queue 3 uses tx_cbs[96..127]
1832 * - Tx queue 16 uses tx_cbs[128..255]
1834 static void bcmgenet_init_tx_queues(struct net_device *dev)
1836 struct bcmgenet_priv *priv = netdev_priv(dev);
1838 u32 dma_ctrl, ring_cfg;
1839 u32 dma_priority[3] = {0, 0, 0};
1841 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
1842 dma_enable = dma_ctrl & DMA_EN;
1843 dma_ctrl &= ~DMA_EN;
1844 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1849 /* Enable strict priority arbiter mode */
1850 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
1852 /* Initialize Tx priority queues */
1853 for (i = 0; i < priv->hw_params->tx_queues; i++) {
1854 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
1855 i * priv->hw_params->tx_bds_per_q,
1856 (i + 1) * priv->hw_params->tx_bds_per_q);
1857 ring_cfg |= (1 << i);
1858 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
1859 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
1860 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
1863 /* Initialize Tx default queue 16 */
1864 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
1865 priv->hw_params->tx_queues *
1866 priv->hw_params->tx_bds_per_q,
1868 ring_cfg |= (1 << DESC_INDEX);
1869 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
1870 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
1871 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
1872 DMA_PRIO_REG_SHIFT(DESC_INDEX));
1874 /* Set Tx queue priorities */
1875 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
1876 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
1877 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
1879 /* Enable Tx queues */
1880 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
1885 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1888 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
1894 /* Disable TDMA to stop add more frames in TX DMA */
1895 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1897 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1899 /* Check TDMA status register to confirm TDMA is disabled */
1900 while (timeout++ < DMA_TIMEOUT_VAL) {
1901 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
1902 if (reg & DMA_DISABLED)
1908 if (timeout == DMA_TIMEOUT_VAL) {
1909 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
1913 /* Wait 10ms for packet drain in both tx and rx dma */
1914 usleep_range(10000, 20000);
1917 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
1919 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
1922 /* Check RDMA status register to confirm RDMA is disabled */
1923 while (timeout++ < DMA_TIMEOUT_VAL) {
1924 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
1925 if (reg & DMA_DISABLED)
1931 if (timeout == DMA_TIMEOUT_VAL) {
1932 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
1939 static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1944 bcmgenet_dma_teardown(priv);
1946 for (i = 0; i < priv->num_tx_bds; i++) {
1947 if (priv->tx_cbs[i].skb != NULL) {
1948 dev_kfree_skb(priv->tx_cbs[i].skb);
1949 priv->tx_cbs[i].skb = NULL;
1953 bcmgenet_free_rx_buffers(priv);
1954 kfree(priv->rx_cbs);
1955 kfree(priv->tx_cbs);
1958 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1962 bcmgenet_fini_tx_ring(priv, DESC_INDEX);
1964 for (i = 0; i < priv->hw_params->tx_queues; i++)
1965 bcmgenet_fini_tx_ring(priv, i);
1967 __bcmgenet_fini_dma(priv);
1970 /* init_edma: Initialize DMA control register */
1971 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1977 netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
1979 /* by default, enable ring 16 (descriptor based) */
1980 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
1982 netdev_err(priv->dev, "failed to initialize RX ring\n");
1987 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1990 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1992 /* Initialize common TX ring structures */
1993 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
1994 priv->num_tx_bds = TOTAL_DESC;
1995 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
1997 if (!priv->tx_cbs) {
1998 __bcmgenet_fini_dma(priv);
2002 for (i = 0; i < priv->num_tx_bds; i++) {
2003 cb = priv->tx_cbs + i;
2004 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2007 /* Initialize Tx queues */
2008 bcmgenet_init_tx_queues(priv->dev);
2013 /* NAPI polling method*/
2014 static int bcmgenet_poll(struct napi_struct *napi, int budget)
2016 struct bcmgenet_priv *priv = container_of(napi,
2017 struct bcmgenet_priv, napi);
2018 unsigned int work_done;
2020 work_done = bcmgenet_desc_rx(priv, budget);
2022 /* Advancing our consumer index*/
2023 priv->rx_c_index += work_done;
2024 priv->rx_c_index &= DMA_C_INDEX_MASK;
2025 bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
2026 priv->rx_c_index, RDMA_CONS_INDEX);
2027 if (work_done < budget) {
2028 napi_complete(napi);
2029 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
2030 INTRL2_CPU_MASK_CLEAR);
2036 /* Interrupt bottom half */
2037 static void bcmgenet_irq_task(struct work_struct *work)
2039 struct bcmgenet_priv *priv = container_of(
2040 work, struct bcmgenet_priv, bcmgenet_irq_work);
2042 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2044 if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
2045 priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
2046 netif_dbg(priv, wol, priv->dev,
2047 "magic packet detected, waking up\n");
2048 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2051 /* Link UP/DOWN event */
2052 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2053 (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
2054 phy_mac_interrupt(priv->phydev,
2055 priv->irq0_stat & UMAC_IRQ_LINK_UP);
2056 priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
2060 /* bcmgenet_isr1: interrupt handler for ring buffer. */
2061 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2063 struct bcmgenet_priv *priv = dev_id;
2064 struct bcmgenet_tx_ring *ring;
2067 /* Save irq status for bottom-half processing. */
2069 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2070 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2071 /* clear interrupts */
2072 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2074 netif_dbg(priv, intr, priv->dev,
2075 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2077 /* Check the MBDONE interrupts.
2078 * packet is done, reclaim descriptors
2080 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2081 if (!(priv->irq1_stat & BIT(index)))
2084 ring = &priv->tx_rings[index];
2086 if (likely(napi_schedule_prep(&ring->napi))) {
2087 ring->int_disable(priv, ring);
2088 __napi_schedule(&ring->napi);
2095 /* bcmgenet_isr0: Handle various interrupts. */
2096 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2098 struct bcmgenet_priv *priv = dev_id;
2100 /* Save irq status for bottom-half processing. */
2102 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
2103 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2104 /* clear interrupts */
2105 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
2107 netif_dbg(priv, intr, priv->dev,
2108 "IRQ=0x%x\n", priv->irq0_stat);
2110 if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
2111 /* We use NAPI(software interrupt throttling, if
2112 * Rx Descriptor throttling is not used.
2113 * Disable interrupt, will be enabled in the poll method.
2115 if (likely(napi_schedule_prep(&priv->napi))) {
2116 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
2117 INTRL2_CPU_MASK_SET);
2118 __napi_schedule(&priv->napi);
2121 if (priv->irq0_stat &
2122 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
2123 struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
2125 if (likely(napi_schedule_prep(&ring->napi))) {
2126 ring->int_disable(priv, ring);
2127 __napi_schedule(&ring->napi);
2130 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2131 UMAC_IRQ_PHY_DET_F |
2133 UMAC_IRQ_LINK_DOWN |
2137 /* all other interested interrupts handled in bottom half */
2138 schedule_work(&priv->bcmgenet_irq_work);
2141 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
2142 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
2143 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2150 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2152 struct bcmgenet_priv *priv = dev_id;
2154 pm_wakeup_event(&priv->pdev->dev, 0);
2159 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2163 reg = bcmgenet_rbuf_ctrl_get(priv);
2165 bcmgenet_rbuf_ctrl_set(priv, reg);
2169 bcmgenet_rbuf_ctrl_set(priv, reg);
2173 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
2174 unsigned char *addr)
2176 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2177 (addr[2] << 8) | addr[3], UMAC_MAC0);
2178 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2181 /* Returns a reusable dma control register value */
2182 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2188 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2189 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2191 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2193 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2195 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2197 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2199 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2204 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2208 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2210 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2212 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2214 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2217 static void bcmgenet_netif_start(struct net_device *dev)
2219 struct bcmgenet_priv *priv = netdev_priv(dev);
2221 /* Start the network engine */
2222 napi_enable(&priv->napi);
2224 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2226 if (phy_is_internal(priv->phydev))
2227 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2229 netif_tx_start_all_queues(dev);
2231 phy_start(priv->phydev);
2234 static int bcmgenet_open(struct net_device *dev)
2236 struct bcmgenet_priv *priv = netdev_priv(dev);
2237 unsigned long dma_ctrl;
2241 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2243 /* Turn on the clock */
2244 if (!IS_ERR(priv->clk))
2245 clk_prepare_enable(priv->clk);
2247 /* take MAC out of reset */
2248 bcmgenet_umac_reset(priv);
2250 ret = init_umac(priv);
2252 goto err_clk_disable;
2254 /* disable ethernet MAC while updating its registers */
2255 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2257 /* Make sure we reflect the value of CRC_CMD_FWD */
2258 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2259 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2261 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2263 if (phy_is_internal(priv->phydev)) {
2264 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2265 reg |= EXT_ENERGY_DET_MASK;
2266 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2269 /* Disable RX/TX DMA and flush TX queues */
2270 dma_ctrl = bcmgenet_dma_disable(priv);
2272 /* Reinitialize TDMA and RDMA and SW housekeeping */
2273 ret = bcmgenet_init_dma(priv);
2275 netdev_err(dev, "failed to initialize DMA\n");
2279 /* Always enable ring 16 - descriptor ring */
2280 bcmgenet_enable_dma(priv, dma_ctrl);
2282 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2285 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2289 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2292 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2296 /* Re-configure the port multiplexer towards the PHY device */
2297 bcmgenet_mii_config(priv->dev, false);
2299 phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
2300 priv->phy_interface);
2302 bcmgenet_netif_start(dev);
2307 free_irq(priv->irq0, dev);
2309 bcmgenet_fini_dma(priv);
2311 if (!IS_ERR(priv->clk))
2312 clk_disable_unprepare(priv->clk);
2316 static void bcmgenet_netif_stop(struct net_device *dev)
2318 struct bcmgenet_priv *priv = netdev_priv(dev);
2320 netif_tx_stop_all_queues(dev);
2321 napi_disable(&priv->napi);
2322 phy_stop(priv->phydev);
2324 bcmgenet_intr_disable(priv);
2326 /* Wait for pending work items to complete. Since interrupts are
2327 * disabled no new work will be scheduled.
2329 cancel_work_sync(&priv->bcmgenet_irq_work);
2331 priv->old_link = -1;
2332 priv->old_speed = -1;
2333 priv->old_duplex = -1;
2334 priv->old_pause = -1;
2337 static int bcmgenet_close(struct net_device *dev)
2339 struct bcmgenet_priv *priv = netdev_priv(dev);
2342 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2344 bcmgenet_netif_stop(dev);
2346 /* Really kill the PHY state machine and disconnect from it */
2347 phy_disconnect(priv->phydev);
2349 /* Disable MAC receive */
2350 umac_enable_set(priv, CMD_RX_EN, false);
2352 ret = bcmgenet_dma_teardown(priv);
2356 /* Disable MAC transmit. TX DMA disabled have to done before this */
2357 umac_enable_set(priv, CMD_TX_EN, false);
2360 bcmgenet_tx_reclaim_all(dev);
2361 bcmgenet_fini_dma(priv);
2363 free_irq(priv->irq0, priv);
2364 free_irq(priv->irq1, priv);
2366 if (phy_is_internal(priv->phydev))
2367 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2369 if (!IS_ERR(priv->clk))
2370 clk_disable_unprepare(priv->clk);
2375 static void bcmgenet_timeout(struct net_device *dev)
2377 struct bcmgenet_priv *priv = netdev_priv(dev);
2379 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
2381 dev->trans_start = jiffies;
2383 dev->stats.tx_errors++;
2385 netif_tx_wake_all_queues(dev);
2388 #define MAX_MC_COUNT 16
2390 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
2391 unsigned char *addr,
2397 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
2398 UMAC_MDF_ADDR + (*i * 4));
2399 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
2400 addr[4] << 8 | addr[5],
2401 UMAC_MDF_ADDR + ((*i + 1) * 4));
2402 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
2403 reg |= (1 << (MAX_MC_COUNT - *mc));
2404 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
2409 static void bcmgenet_set_rx_mode(struct net_device *dev)
2411 struct bcmgenet_priv *priv = netdev_priv(dev);
2412 struct netdev_hw_addr *ha;
2416 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
2418 /* Promiscuous mode */
2419 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2420 if (dev->flags & IFF_PROMISC) {
2422 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2423 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
2426 reg &= ~CMD_PROMISC;
2427 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2430 /* UniMac doesn't support ALLMULTI */
2431 if (dev->flags & IFF_ALLMULTI) {
2432 netdev_warn(dev, "ALLMULTI is not supported\n");
2436 /* update MDF filter */
2440 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
2441 /* my own address.*/
2442 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
2444 if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
2447 if (!netdev_uc_empty(dev))
2448 netdev_for_each_uc_addr(ha, dev)
2449 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2451 if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
2454 netdev_for_each_mc_addr(ha, dev)
2455 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2458 /* Set the hardware MAC address. */
2459 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
2461 struct sockaddr *addr = p;
2463 /* Setting the MAC address at the hardware level is not possible
2464 * without disabling the UniMAC RX/TX enable bits.
2466 if (netif_running(dev))
2469 ether_addr_copy(dev->dev_addr, addr->sa_data);
2474 static const struct net_device_ops bcmgenet_netdev_ops = {
2475 .ndo_open = bcmgenet_open,
2476 .ndo_stop = bcmgenet_close,
2477 .ndo_start_xmit = bcmgenet_xmit,
2478 .ndo_tx_timeout = bcmgenet_timeout,
2479 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
2480 .ndo_set_mac_address = bcmgenet_set_mac_addr,
2481 .ndo_do_ioctl = bcmgenet_ioctl,
2482 .ndo_set_features = bcmgenet_set_features,
2485 /* Array of GENET hardware parameters/characteristics */
2486 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
2491 .bp_in_en_shift = 16,
2492 .bp_in_mask = 0xffff,
2493 .hfb_filter_cnt = 16,
2495 .hfb_offset = 0x1000,
2496 .rdma_offset = 0x2000,
2497 .tdma_offset = 0x3000,
2504 .bp_in_en_shift = 16,
2505 .bp_in_mask = 0xffff,
2506 .hfb_filter_cnt = 16,
2508 .tbuf_offset = 0x0600,
2509 .hfb_offset = 0x1000,
2510 .hfb_reg_offset = 0x2000,
2511 .rdma_offset = 0x3000,
2512 .tdma_offset = 0x4000,
2514 .flags = GENET_HAS_EXT,
2520 .bp_in_en_shift = 17,
2521 .bp_in_mask = 0x1ffff,
2522 .hfb_filter_cnt = 48,
2524 .tbuf_offset = 0x0600,
2525 .hfb_offset = 0x8000,
2526 .hfb_reg_offset = 0xfc00,
2527 .rdma_offset = 0x10000,
2528 .tdma_offset = 0x11000,
2530 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2536 .bp_in_en_shift = 17,
2537 .bp_in_mask = 0x1ffff,
2538 .hfb_filter_cnt = 48,
2540 .tbuf_offset = 0x0600,
2541 .hfb_offset = 0x8000,
2542 .hfb_reg_offset = 0xfc00,
2543 .rdma_offset = 0x2000,
2544 .tdma_offset = 0x4000,
2546 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2550 /* Infer hardware parameters from the detected GENET version */
2551 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
2553 struct bcmgenet_hw_params *params;
2558 if (GENET_IS_V4(priv)) {
2559 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2560 genet_dma_ring_regs = genet_dma_ring_regs_v4;
2561 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2562 priv->version = GENET_V4;
2563 } else if (GENET_IS_V3(priv)) {
2564 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2565 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2566 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2567 priv->version = GENET_V3;
2568 } else if (GENET_IS_V2(priv)) {
2569 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
2570 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2571 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2572 priv->version = GENET_V2;
2573 } else if (GENET_IS_V1(priv)) {
2574 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
2575 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2576 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2577 priv->version = GENET_V1;
2580 /* enum genet_version starts at 1 */
2581 priv->hw_params = &bcmgenet_hw_params[priv->version];
2582 params = priv->hw_params;
2584 /* Read GENET HW version */
2585 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
2586 major = (reg >> 24 & 0x0f);
2589 else if (major == 0)
2591 if (major != priv->version) {
2592 dev_err(&priv->pdev->dev,
2593 "GENET version mismatch, got: %d, configured for: %d\n",
2594 major, priv->version);
2597 /* Print the GENET core version */
2598 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
2599 major, (reg >> 16) & 0x0f, reg & 0xffff);
2601 /* Store the integrated PHY revision for the MDIO probing function
2602 * to pass this information to the PHY driver. The PHY driver expects
2603 * to find the PHY major revision in bits 15:8 while the GENET register
2604 * stores that information in bits 7:0, account for that.
2606 * On newer chips, starting with PHY revision G0, a new scheme is
2607 * deployed similar to the Starfighter 2 switch with GPHY major
2608 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
2609 * is reserved as well as special value 0x01ff, we have a small
2610 * heuristic to check for the new GPHY revision and re-arrange things
2611 * so the GPHY driver is happy.
2613 gphy_rev = reg & 0xffff;
2615 /* This is the good old scheme, just GPHY major, no minor nor patch */
2616 if ((gphy_rev & 0xf0) != 0)
2617 priv->gphy_rev = gphy_rev << 8;
2619 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
2620 else if ((gphy_rev & 0xff00) != 0)
2621 priv->gphy_rev = gphy_rev;
2623 /* This is reserved so should require special treatment */
2624 else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
2625 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
2629 #ifdef CONFIG_PHYS_ADDR_T_64BIT
2630 if (!(params->flags & GENET_HAS_40BITS))
2631 pr_warn("GENET does not support 40-bits PA\n");
2634 pr_debug("Configuration for version: %d\n"
2635 "TXq: %1d, TXqBDs: %1d, RXq: %1d\n"
2636 "BP << en: %2d, BP msk: 0x%05x\n"
2637 "HFB count: %2d, QTAQ msk: 0x%05x\n"
2638 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
2639 "RDMA: 0x%05x, TDMA: 0x%05x\n"
2642 params->tx_queues, params->tx_bds_per_q,
2644 params->bp_in_en_shift, params->bp_in_mask,
2645 params->hfb_filter_cnt, params->qtag_mask,
2646 params->tbuf_offset, params->hfb_offset,
2647 params->hfb_reg_offset,
2648 params->rdma_offset, params->tdma_offset,
2649 params->words_per_bd);
2652 static const struct of_device_id bcmgenet_match[] = {
2653 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
2654 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
2655 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
2656 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
2660 static int bcmgenet_probe(struct platform_device *pdev)
2662 struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
2663 struct device_node *dn = pdev->dev.of_node;
2664 const struct of_device_id *of_id = NULL;
2665 struct bcmgenet_priv *priv;
2666 struct net_device *dev;
2667 const void *macaddr;
2671 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
2672 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
2674 dev_err(&pdev->dev, "can't allocate net device\n");
2679 of_id = of_match_node(bcmgenet_match, dn);
2684 priv = netdev_priv(dev);
2685 priv->irq0 = platform_get_irq(pdev, 0);
2686 priv->irq1 = platform_get_irq(pdev, 1);
2687 priv->wol_irq = platform_get_irq(pdev, 2);
2688 if (!priv->irq0 || !priv->irq1) {
2689 dev_err(&pdev->dev, "can't find IRQs\n");
2695 macaddr = of_get_mac_address(dn);
2697 dev_err(&pdev->dev, "can't find MAC address\n");
2702 macaddr = pd->mac_address;
2705 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2706 priv->base = devm_ioremap_resource(&pdev->dev, r);
2707 if (IS_ERR(priv->base)) {
2708 err = PTR_ERR(priv->base);
2712 SET_NETDEV_DEV(dev, &pdev->dev);
2713 dev_set_drvdata(&pdev->dev, dev);
2714 ether_addr_copy(dev->dev_addr, macaddr);
2715 dev->watchdog_timeo = 2 * HZ;
2716 dev->ethtool_ops = &bcmgenet_ethtool_ops;
2717 dev->netdev_ops = &bcmgenet_netdev_ops;
2718 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
2720 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
2722 /* Set hardware features */
2723 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
2724 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2726 /* Request the WOL interrupt and advertise suspend if available */
2727 priv->wol_irq_disabled = true;
2728 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
2731 device_set_wakeup_capable(&pdev->dev, 1);
2733 /* Set the needed headroom to account for any possible
2734 * features enabling/disabling at runtime
2736 dev->needed_headroom += 64;
2738 netdev_boot_setup_check(dev);
2743 priv->version = (enum bcmgenet_version)of_id->data;
2745 priv->version = pd->genet_version;
2747 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
2748 if (IS_ERR(priv->clk))
2749 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
2751 if (!IS_ERR(priv->clk))
2752 clk_prepare_enable(priv->clk);
2754 bcmgenet_set_hw_params(priv);
2756 /* Mii wait queue */
2757 init_waitqueue_head(&priv->wq);
2758 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
2759 priv->rx_buf_len = RX_BUF_LENGTH;
2760 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
2762 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
2763 if (IS_ERR(priv->clk_wol))
2764 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
2766 priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
2767 if (IS_ERR(priv->clk_eee)) {
2768 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
2769 priv->clk_eee = NULL;
2772 err = reset_umac(priv);
2774 goto err_clk_disable;
2776 err = bcmgenet_mii_init(dev);
2778 goto err_clk_disable;
2780 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
2781 * just the ring 16 descriptor based TX
2783 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
2784 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
2786 /* libphy will determine the link state */
2787 netif_carrier_off(dev);
2789 /* Turn off the main clock, WOL clock is handled separately */
2790 if (!IS_ERR(priv->clk))
2791 clk_disable_unprepare(priv->clk);
2793 err = register_netdev(dev);
2800 if (!IS_ERR(priv->clk))
2801 clk_disable_unprepare(priv->clk);
2807 static int bcmgenet_remove(struct platform_device *pdev)
2809 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
2811 dev_set_drvdata(&pdev->dev, NULL);
2812 unregister_netdev(priv->dev);
2813 bcmgenet_mii_exit(priv->dev);
2814 free_netdev(priv->dev);
2819 #ifdef CONFIG_PM_SLEEP
2820 static int bcmgenet_suspend(struct device *d)
2822 struct net_device *dev = dev_get_drvdata(d);
2823 struct bcmgenet_priv *priv = netdev_priv(dev);
2826 if (!netif_running(dev))
2829 bcmgenet_netif_stop(dev);
2831 phy_suspend(priv->phydev);
2833 netif_device_detach(dev);
2835 /* Disable MAC receive */
2836 umac_enable_set(priv, CMD_RX_EN, false);
2838 ret = bcmgenet_dma_teardown(priv);
2842 /* Disable MAC transmit. TX DMA disabled have to done before this */
2843 umac_enable_set(priv, CMD_TX_EN, false);
2846 bcmgenet_tx_reclaim_all(dev);
2847 bcmgenet_fini_dma(priv);
2849 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
2850 if (device_may_wakeup(d) && priv->wolopts) {
2851 bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
2852 clk_prepare_enable(priv->clk_wol);
2855 /* Turn off the clocks */
2856 clk_disable_unprepare(priv->clk);
2861 static int bcmgenet_resume(struct device *d)
2863 struct net_device *dev = dev_get_drvdata(d);
2864 struct bcmgenet_priv *priv = netdev_priv(dev);
2865 unsigned long dma_ctrl;
2869 if (!netif_running(dev))
2872 /* Turn on the clock */
2873 ret = clk_prepare_enable(priv->clk);
2877 bcmgenet_umac_reset(priv);
2879 ret = init_umac(priv);
2881 goto out_clk_disable;
2883 /* From WOL-enabled suspend, switch to regular clock */
2885 clk_disable_unprepare(priv->clk_wol);
2887 phy_init_hw(priv->phydev);
2888 /* Speed settings must be restored */
2889 bcmgenet_mii_config(priv->dev, false);
2891 /* disable ethernet MAC while updating its registers */
2892 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2894 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2896 if (phy_is_internal(priv->phydev)) {
2897 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2898 reg |= EXT_ENERGY_DET_MASK;
2899 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2903 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2905 /* Disable RX/TX DMA and flush TX queues */
2906 dma_ctrl = bcmgenet_dma_disable(priv);
2908 /* Reinitialize TDMA and RDMA and SW housekeeping */
2909 ret = bcmgenet_init_dma(priv);
2911 netdev_err(dev, "failed to initialize DMA\n");
2912 goto out_clk_disable;
2915 /* Always enable ring 16 - descriptor ring */
2916 bcmgenet_enable_dma(priv, dma_ctrl);
2918 netif_device_attach(dev);
2920 phy_resume(priv->phydev);
2922 if (priv->eee.eee_enabled)
2923 bcmgenet_eee_enable_set(dev, true);
2925 bcmgenet_netif_start(dev);
2930 clk_disable_unprepare(priv->clk);
2933 #endif /* CONFIG_PM_SLEEP */
2935 static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
2937 static struct platform_driver bcmgenet_driver = {
2938 .probe = bcmgenet_probe,
2939 .remove = bcmgenet_remove,
2942 .of_match_table = bcmgenet_match,
2943 .pm = &bcmgenet_pm_ops,
2946 module_platform_driver(bcmgenet_driver);
2948 MODULE_AUTHOR("Broadcom Corporation");
2949 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
2950 MODULE_ALIAS("platform:bcmgenet");
2951 MODULE_LICENSE("GPL");