2 * Broadcom Starfighter 2 DSA switch driver
4 * Copyright (C) 2014, Broadcom Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/platform_device.h>
18 #include <linux/phy.h>
19 #include <linux/phy_fixed.h>
20 #include <linux/mii.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_address.h>
24 #include <linux/of_net.h>
25 #include <linux/of_mdio.h>
27 #include <linux/ethtool.h>
28 #include <linux/if_bridge.h>
29 #include <linux/brcmphy.h>
30 #include <linux/etherdevice.h>
31 #include <net/switchdev.h>
34 #include "bcm_sf2_regs.h"
36 /* String, offset, and register size in bytes if different from 4 bytes */
37 static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = {
38 { "TxOctets", 0x000, 8 },
39 { "TxDropPkts", 0x020 },
40 { "TxQPKTQ0", 0x030 },
41 { "TxBroadcastPkts", 0x040 },
42 { "TxMulticastPkts", 0x050 },
43 { "TxUnicastPKts", 0x060 },
44 { "TxCollisions", 0x070 },
45 { "TxSingleCollision", 0x080 },
46 { "TxMultipleCollision", 0x090 },
47 { "TxDeferredCollision", 0x0a0 },
48 { "TxLateCollision", 0x0b0 },
49 { "TxExcessiveCollision", 0x0c0 },
50 { "TxFrameInDisc", 0x0d0 },
51 { "TxPausePkts", 0x0e0 },
52 { "TxQPKTQ1", 0x0f0 },
53 { "TxQPKTQ2", 0x100 },
54 { "TxQPKTQ3", 0x110 },
55 { "TxQPKTQ4", 0x120 },
56 { "TxQPKTQ5", 0x130 },
57 { "RxOctets", 0x140, 8 },
58 { "RxUndersizePkts", 0x160 },
59 { "RxPausePkts", 0x170 },
60 { "RxPkts64Octets", 0x180 },
61 { "RxPkts65to127Octets", 0x190 },
62 { "RxPkts128to255Octets", 0x1a0 },
63 { "RxPkts256to511Octets", 0x1b0 },
64 { "RxPkts512to1023Octets", 0x1c0 },
65 { "RxPkts1024toMaxPktsOctets", 0x1d0 },
66 { "RxOversizePkts", 0x1e0 },
67 { "RxJabbers", 0x1f0 },
68 { "RxAlignmentErrors", 0x200 },
69 { "RxFCSErrors", 0x210 },
70 { "RxGoodOctets", 0x220, 8 },
71 { "RxDropPkts", 0x240 },
72 { "RxUnicastPkts", 0x250 },
73 { "RxMulticastPkts", 0x260 },
74 { "RxBroadcastPkts", 0x270 },
75 { "RxSAChanges", 0x280 },
76 { "RxFragments", 0x290 },
77 { "RxJumboPkt", 0x2a0 },
78 { "RxSymblErr", 0x2b0 },
79 { "InRangeErrCount", 0x2c0 },
80 { "OutRangeErrCount", 0x2d0 },
81 { "EEELpiEvent", 0x2e0 },
82 { "EEELpiDuration", 0x2f0 },
83 { "RxDiscard", 0x300, 8 },
84 { "TxQPKTQ6", 0x320 },
85 { "TxQPKTQ7", 0x330 },
86 { "TxPkts64Octets", 0x340 },
87 { "TxPkts65to127Octets", 0x350 },
88 { "TxPkts128to255Octets", 0x360 },
89 { "TxPkts256to511Ocets", 0x370 },
90 { "TxPkts512to1023Ocets", 0x380 },
91 { "TxPkts1024toMaxPktOcets", 0x390 },
94 #define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib)
96 static void bcm_sf2_sw_get_strings(struct dsa_switch *ds,
97 int port, uint8_t *data)
101 for (i = 0; i < BCM_SF2_STATS_SIZE; i++)
102 memcpy(data + i * ETH_GSTRING_LEN,
103 bcm_sf2_mib[i].string, ETH_GSTRING_LEN);
106 static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds,
107 int port, uint64_t *data)
109 struct bcm_sf2_priv *priv = ds_to_priv(ds);
110 const struct bcm_sf2_hw_stats *s;
115 mutex_lock(&priv->stats_mutex);
117 /* Now fetch the per-port counters */
118 for (i = 0; i < BCM_SF2_STATS_SIZE; i++) {
121 /* Do a latched 64-bit read if needed */
122 offset = s->reg + CORE_P_MIB_OFFSET(port);
123 if (s->sizeof_stat == 8)
124 val = core_readq(priv, offset);
126 val = core_readl(priv, offset);
131 mutex_unlock(&priv->stats_mutex);
134 static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
136 return BCM_SF2_STATS_SIZE;
139 static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
141 struct bcm_sf2_priv *priv = ds_to_priv(ds);
145 /* Enable the IMP Port to be in the same VLAN as the other ports
146 * on a per-port basis such that we only have Port i and IMP in
149 for (i = 0; i < priv->hw_params.num_ports; i++) {
150 if (!((1 << i) & ds->enabled_port_mask))
153 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
154 reg |= (1 << cpu_port);
155 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
159 static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
161 struct bcm_sf2_priv *priv = ds_to_priv(ds);
164 /* Enable the port memories */
165 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
166 reg &= ~P_TXQ_PSM_VDD(port);
167 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
169 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
170 reg = core_readl(priv, CORE_IMP_CTL);
171 reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
172 reg &= ~(RX_DIS | TX_DIS);
173 core_writel(priv, reg, CORE_IMP_CTL);
175 /* Enable forwarding */
176 core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
178 /* Enable IMP port in dumb mode */
179 reg = core_readl(priv, CORE_SWITCH_CTRL);
180 reg |= MII_DUMB_FWDG_EN;
181 core_writel(priv, reg, CORE_SWITCH_CTRL);
183 /* Resolve which bit controls the Broadcom tag */
186 val = BRCM_HDR_EN_P8;
189 val = BRCM_HDR_EN_P7;
192 val = BRCM_HDR_EN_P5;
199 /* Enable Broadcom tags for IMP port */
200 reg = core_readl(priv, CORE_BRCM_HDR_CTRL);
202 core_writel(priv, reg, CORE_BRCM_HDR_CTRL);
204 /* Enable reception Broadcom tag for CPU TX (switch RX) to
205 * allow us to tag outgoing frames
207 reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS);
209 core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS);
211 /* Enable transmission of Broadcom tags from the switch (CPU RX) to
212 * allow delivering frames to the per-port net_devices
214 reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
216 core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
218 /* Force link status for IMP port */
219 reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
220 reg |= (MII_SW_OR | LINK_STS);
221 core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
224 static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
226 struct bcm_sf2_priv *priv = ds_to_priv(ds);
229 reg = core_readl(priv, CORE_EEE_EN_CTRL);
234 core_writel(priv, reg, CORE_EEE_EN_CTRL);
237 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
239 struct bcm_sf2_priv *priv = ds_to_priv(ds);
242 reg = reg_readl(priv, REG_SPHY_CNTRL);
245 reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
246 reg_writel(priv, reg, REG_SPHY_CNTRL);
248 reg = reg_readl(priv, REG_SPHY_CNTRL);
251 reg |= EXT_PWR_DOWN | IDDQ_BIAS | PHY_RESET;
252 reg_writel(priv, reg, REG_SPHY_CNTRL);
256 reg_writel(priv, reg, REG_SPHY_CNTRL);
258 /* Use PHY-driven LED signaling */
260 reg = reg_readl(priv, REG_LED_CNTRL(0));
261 reg |= SPDLNK_SRC_SEL;
262 reg_writel(priv, reg, REG_LED_CNTRL(0));
266 static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
276 /* Port 0 interrupts are located on the first bank */
277 intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
280 off = P_IRQ_OFF(port);
284 intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
287 static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
297 /* Port 0 interrupts are located on the first bank */
298 intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
299 intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
302 off = P_IRQ_OFF(port);
306 intrl2_1_mask_set(priv, P_IRQ_MASK(off));
307 intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
310 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
311 struct phy_device *phy)
313 struct bcm_sf2_priv *priv = ds_to_priv(ds);
314 s8 cpu_port = ds->dst[ds->index].cpu_port;
317 /* Clear the memory power down */
318 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
319 reg &= ~P_TXQ_PSM_VDD(port);
320 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
322 /* Clear the Rx and Tx disable bits and set to no spanning tree */
323 core_writel(priv, 0, CORE_G_PCTL_PORT(port));
325 /* Re-enable the GPHY and re-apply workarounds */
326 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
327 bcm_sf2_gphy_enable_set(ds, true);
329 /* if phy_stop() has been called before, phy
330 * will be in halted state, and phy_start()
333 * the resume path does not configure back
334 * autoneg settings, and since we hard reset
335 * the phy manually here, we need to reset the
336 * state machine also.
338 phy->state = PHY_READY;
343 /* Enable MoCA port interrupts to get notified */
344 if (port == priv->moca_port)
345 bcm_sf2_port_intr_enable(priv, port);
347 /* Set this port, and only this one to be in the default VLAN,
348 * if member of a bridge, restore its membership prior to
349 * bringing down this port.
351 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
352 reg &= ~PORT_VLAN_CTRL_MASK;
354 reg |= priv->port_sts[port].vlan_ctl_mask;
355 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
357 bcm_sf2_imp_vlan_setup(ds, cpu_port);
359 /* If EEE was enabled, restore it */
360 if (priv->port_sts[port].eee.eee_enabled)
361 bcm_sf2_eee_enable_set(ds, port, true);
366 static void bcm_sf2_port_disable(struct dsa_switch *ds, int port,
367 struct phy_device *phy)
369 struct bcm_sf2_priv *priv = ds_to_priv(ds);
372 if (priv->wol_ports_mask & (1 << port))
375 if (port == priv->moca_port)
376 bcm_sf2_port_intr_disable(priv, port);
378 if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
379 bcm_sf2_gphy_enable_set(ds, false);
381 if (dsa_is_cpu_port(ds, port))
384 off = CORE_G_PCTL_PORT(port);
386 reg = core_readl(priv, off);
387 reg |= RX_DIS | TX_DIS;
388 core_writel(priv, reg, off);
390 /* Power down the port memory */
391 reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
392 reg |= P_TXQ_PSM_VDD(port);
393 core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
396 /* Returns 0 if EEE was not enabled, or 1 otherwise
398 static int bcm_sf2_eee_init(struct dsa_switch *ds, int port,
399 struct phy_device *phy)
401 struct bcm_sf2_priv *priv = ds_to_priv(ds);
402 struct ethtool_eee *p = &priv->port_sts[port].eee;
405 p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
407 ret = phy_init_eee(phy, 0);
411 bcm_sf2_eee_enable_set(ds, port, true);
416 static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port,
417 struct ethtool_eee *e)
419 struct bcm_sf2_priv *priv = ds_to_priv(ds);
420 struct ethtool_eee *p = &priv->port_sts[port].eee;
423 reg = core_readl(priv, CORE_EEE_LPI_INDICATE);
424 e->eee_enabled = p->eee_enabled;
425 e->eee_active = !!(reg & (1 << port));
430 static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port,
431 struct phy_device *phydev,
432 struct ethtool_eee *e)
434 struct bcm_sf2_priv *priv = ds_to_priv(ds);
435 struct ethtool_eee *p = &priv->port_sts[port].eee;
437 p->eee_enabled = e->eee_enabled;
439 if (!p->eee_enabled) {
440 bcm_sf2_eee_enable_set(ds, port, false);
442 p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
450 static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv)
452 unsigned int timeout = 1000;
455 reg = core_readl(priv, CORE_FAST_AGE_CTRL);
456 reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
457 core_writel(priv, reg, CORE_FAST_AGE_CTRL);
460 reg = core_readl(priv, CORE_FAST_AGE_CTRL);
461 if (!(reg & FAST_AGE_STR_DONE))
470 core_writel(priv, 0, CORE_FAST_AGE_CTRL);
475 /* Fast-ageing of ARL entries for a given port, equivalent to an ARL
476 * flush for that port.
478 static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
480 struct bcm_sf2_priv *priv = ds_to_priv(ds);
482 core_writel(priv, port, CORE_FAST_AGE_PORT);
484 return bcm_sf2_fast_age_op(priv);
487 static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid)
489 core_writel(priv, vid, CORE_FAST_AGE_VID);
491 return bcm_sf2_fast_age_op(priv);
494 static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv)
496 unsigned int timeout = 10;
500 reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL);
501 if (!(reg & ARLA_VTBL_STDN))
504 usleep_range(1000, 2000);
510 static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op)
512 core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL);
514 return bcm_sf2_vlan_op_wait(priv);
517 static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
518 struct bcm_sf2_vlan *vlan)
522 core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
523 core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members,
524 CORE_ARLA_VTBL_ENTRY);
526 ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE);
528 pr_err("failed to write VLAN entry\n");
531 static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid,
532 struct bcm_sf2_vlan *vlan)
537 core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR);
539 ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ);
543 entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY);
544 vlan->members = entry & FWD_MAP_MASK;
545 vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK;
550 static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port,
551 struct net_device *bridge)
553 struct bcm_sf2_priv *priv = ds_to_priv(ds);
554 s8 cpu_port = ds->dst->cpu_port;
558 /* Make this port leave the all VLANs join since we will have proper
559 * VLAN entries from now on
561 reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
563 if ((reg & BIT(cpu_port)) == BIT(cpu_port))
564 reg &= ~BIT(cpu_port);
565 core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
567 priv->port_sts[port].bridge_dev = bridge;
568 p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
570 for (i = 0; i < priv->hw_params.num_ports; i++) {
571 if (priv->port_sts[i].bridge_dev != bridge)
574 /* Add this local port to the remote port VLAN control
575 * membership and update the remote port bitmask
577 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
579 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
580 priv->port_sts[i].vlan_ctl_mask = reg;
585 /* Configure the local port VLAN control membership to include
586 * remote ports and update the local port bitmask
588 core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
589 priv->port_sts[port].vlan_ctl_mask = p_ctl;
594 static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
596 struct bcm_sf2_priv *priv = ds_to_priv(ds);
597 struct net_device *bridge = priv->port_sts[port].bridge_dev;
598 s8 cpu_port = ds->dst->cpu_port;
602 p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
604 for (i = 0; i < priv->hw_params.num_ports; i++) {
605 /* Don't touch the remaining ports */
606 if (priv->port_sts[i].bridge_dev != bridge)
609 reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
611 core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i));
612 priv->port_sts[port].vlan_ctl_mask = reg;
614 /* Prevent self removal to preserve isolation */
619 core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port));
620 priv->port_sts[port].vlan_ctl_mask = p_ctl;
621 priv->port_sts[port].bridge_dev = NULL;
623 /* Make this port join all VLANs without VLAN entries */
624 reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN);
626 if (!(reg & BIT(cpu_port)))
627 reg |= BIT(cpu_port);
628 core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN);
631 static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
634 struct bcm_sf2_priv *priv = ds_to_priv(ds);
635 u8 hw_state, cur_hw_state;
638 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
639 cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
642 case BR_STATE_DISABLED:
643 hw_state = G_MISTP_DIS_STATE;
645 case BR_STATE_LISTENING:
646 hw_state = G_MISTP_LISTEN_STATE;
648 case BR_STATE_LEARNING:
649 hw_state = G_MISTP_LEARN_STATE;
651 case BR_STATE_FORWARDING:
652 hw_state = G_MISTP_FWD_STATE;
654 case BR_STATE_BLOCKING:
655 hw_state = G_MISTP_BLOCK_STATE;
658 pr_err("%s: invalid STP state: %d\n", __func__, state);
662 /* Fast-age ARL entries if we are moving a port from Learning or
663 * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
666 if (cur_hw_state != hw_state) {
667 if (cur_hw_state >= G_MISTP_LEARN_STATE &&
668 hw_state <= G_MISTP_LISTEN_STATE) {
669 if (bcm_sf2_sw_fast_age_port(ds, port)) {
670 pr_err("%s: fast-ageing failed\n", __func__);
676 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
677 reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
679 core_writel(priv, reg, CORE_G_PCTL_PORT(port));
682 /* Address Resolution Logic routines */
683 static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv)
685 unsigned int timeout = 10;
689 reg = core_readl(priv, CORE_ARLA_RWCTL);
690 if (!(reg & ARL_STRTDN))
693 usleep_range(1000, 2000);
699 static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op)
706 cmd = core_readl(priv, CORE_ARLA_RWCTL);
707 cmd &= ~IVL_SVL_SELECT;
713 core_writel(priv, cmd, CORE_ARLA_RWCTL);
715 return bcm_sf2_arl_op_wait(priv);
718 static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac,
719 u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx,
725 ret = bcm_sf2_arl_op_wait(priv);
729 /* Read the 4 bins */
730 for (i = 0; i < 4; i++) {
734 mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i));
735 fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i));
736 bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
738 if (ent->is_valid && is_valid) {
743 /* This is the MAC we just deleted */
744 if (!is_valid && (mac_vid & mac))
751 static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port,
752 const unsigned char *addr, u16 vid, bool is_valid)
754 struct bcm_sf2_arl_entry ent;
756 u64 mac, mac_vid = 0;
760 /* Convert the array into a 64-bit MAC */
761 mac = bcm_sf2_mac_to_u64(addr);
763 /* Perform a read for the given MAC and VID */
764 core_writeq(priv, mac, CORE_ARLA_MAC);
765 core_writel(priv, vid, CORE_ARLA_VID);
767 /* Issue a read operation for this MAC */
768 ret = bcm_sf2_arl_rw_op(priv, 1);
772 ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
773 /* If this is a read, just finish now */
777 /* We could not find a matching MAC, so reset to a new entry */
783 memset(&ent, 0, sizeof(ent));
785 ent.is_valid = is_valid;
787 ent.is_static = true;
788 memcpy(ent.mac, addr, ETH_ALEN);
789 bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent);
791 core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx));
792 core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx));
794 ret = bcm_sf2_arl_rw_op(priv, 0);
798 /* Re-read the entry to check */
799 return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
802 static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
803 const struct switchdev_obj_port_fdb *fdb,
804 struct switchdev_trans *trans)
806 /* We do not need to do anything specific here yet */
810 static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
811 const struct switchdev_obj_port_fdb *fdb,
812 struct switchdev_trans *trans)
814 struct bcm_sf2_priv *priv = ds_to_priv(ds);
816 if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
817 pr_err("%s: failed to add MAC address\n", __func__);
820 static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
821 const struct switchdev_obj_port_fdb *fdb)
823 struct bcm_sf2_priv *priv = ds_to_priv(ds);
825 return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
828 static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv)
830 unsigned timeout = 1000;
834 reg = core_readl(priv, CORE_ARLA_SRCH_CTL);
835 if (!(reg & ARLA_SRCH_STDN))
838 if (reg & ARLA_SRCH_VLID)
841 usleep_range(1000, 2000);
847 static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
848 struct bcm_sf2_arl_entry *ent)
853 mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx));
854 fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx));
855 bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
858 static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port,
859 const struct bcm_sf2_arl_entry *ent,
860 struct switchdev_obj_port_fdb *fdb,
861 int (*cb)(struct switchdev_obj *obj))
866 if (port != ent->port)
869 ether_addr_copy(fdb->addr, ent->mac);
871 fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
873 return cb(&fdb->obj);
876 static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
877 struct switchdev_obj_port_fdb *fdb,
878 int (*cb)(struct switchdev_obj *obj))
880 struct bcm_sf2_priv *priv = ds_to_priv(ds);
881 struct net_device *dev = ds->ports[port].netdev;
882 struct bcm_sf2_arl_entry results[2];
883 unsigned int count = 0;
886 /* Start search operation */
887 core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL);
890 ret = bcm_sf2_arl_search_wait(priv);
894 /* Read both entries, then return their values back */
895 bcm_sf2_arl_search_rd(priv, 0, &results[0]);
896 ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb);
900 bcm_sf2_arl_search_rd(priv, 1, &results[1]);
901 ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb);
905 if (!results[0].is_valid && !results[1].is_valid)
908 } while (count++ < CORE_ARLA_NUM_ENTRIES);
913 static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr,
919 reg = reg_readl(priv, REG_SWITCH_CNTRL);
920 reg |= MDIO_MASTER_SEL;
921 reg_writel(priv, reg, REG_SWITCH_CNTRL);
923 /* Page << 8 | offset */
926 core_writel(priv, addr, reg);
928 /* Page << 8 | offset */
929 reg = 0x80 << 8 | regnum << 1;
933 ret = core_readl(priv, reg);
935 core_writel(priv, val, reg);
937 reg = reg_readl(priv, REG_SWITCH_CNTRL);
938 reg &= ~MDIO_MASTER_SEL;
939 reg_writel(priv, reg, REG_SWITCH_CNTRL);
944 static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
946 struct bcm_sf2_priv *priv = bus->priv;
948 /* Intercept reads from Broadcom pseudo-PHY address, else, send
949 * them to our master MDIO bus controller
951 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
952 return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
954 return mdiobus_read(priv->master_mii_bus, addr, regnum);
957 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
960 struct bcm_sf2_priv *priv = bus->priv;
962 /* Intercept writes to the Broadcom pseudo-PHY address, else,
963 * send them to our master MDIO bus controller
965 if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
966 bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
968 mdiobus_write(priv->master_mii_bus, addr, regnum, val);
973 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
975 struct bcm_sf2_priv *priv = dev_id;
977 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
979 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
984 static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
986 struct bcm_sf2_priv *priv = dev_id;
988 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
990 intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
992 if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF))
993 priv->port_sts[7].link = 1;
994 if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF))
995 priv->port_sts[7].link = 0;
1000 static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
1002 unsigned int timeout = 1000;
1005 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
1006 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
1007 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
1010 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
1011 if (!(reg & SOFTWARE_RESET))
1014 usleep_range(1000, 2000);
1015 } while (timeout-- > 0);
1023 static void bcm_sf2_intr_disable(struct bcm_sf2_priv *priv)
1025 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1026 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1027 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1028 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
1029 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1030 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1033 static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
1034 struct device_node *dn)
1036 struct device_node *port;
1037 const char *phy_mode_str;
1039 unsigned int port_num;
1042 priv->moca_port = -1;
1044 for_each_available_child_of_node(dn, port) {
1045 if (of_property_read_u32(port, "reg", &port_num))
1048 /* Internal PHYs get assigned a specific 'phy-mode' property
1049 * value: "internal" to help flag them before MDIO probing
1050 * has completed, since they might be turned off at that
1053 mode = of_get_phy_mode(port);
1055 ret = of_property_read_string(port, "phy-mode",
1060 if (!strcasecmp(phy_mode_str, "internal"))
1061 priv->int_phy_mask |= 1 << port_num;
1064 if (mode == PHY_INTERFACE_MODE_MOCA)
1065 priv->moca_port = port_num;
1069 static int bcm_sf2_mdio_register(struct dsa_switch *ds)
1071 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1072 struct device_node *dn;
1076 /* Find our integrated MDIO bus node */
1077 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
1078 priv->master_mii_bus = of_mdio_find_bus(dn);
1079 if (!priv->master_mii_bus)
1080 return -EPROBE_DEFER;
1082 get_device(&priv->master_mii_bus->dev);
1083 priv->master_mii_dn = dn;
1085 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
1086 if (!priv->slave_mii_bus)
1089 priv->slave_mii_bus->priv = priv;
1090 priv->slave_mii_bus->name = "sf2 slave mii";
1091 priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read;
1092 priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write;
1093 snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d",
1095 priv->slave_mii_bus->dev.of_node = dn;
1097 /* Include the pseudo-PHY address to divert reads towards our
1098 * workaround. This is only required for 7445D0, since 7445E0
1099 * disconnects the internal switch pseudo-PHY such that we can use the
1100 * regular SWITCH_MDIO master controller instead.
1102 * Here we flag the pseudo PHY as needing special treatment and would
1103 * otherwise make all other PHY read/writes go to the master MDIO bus
1104 * controller that comes with this switch backed by the "mdio-unimac"
1107 if (of_machine_is_compatible("brcm,bcm7445d0"))
1108 priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
1110 priv->indir_phy_mask = 0;
1112 ds->phys_mii_mask = priv->indir_phy_mask;
1113 ds->slave_mii_bus = priv->slave_mii_bus;
1114 priv->slave_mii_bus->parent = ds->dev->parent;
1115 priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
1118 err = of_mdiobus_register(priv->slave_mii_bus, dn);
1120 err = mdiobus_register(priv->slave_mii_bus);
1128 static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
1130 mdiobus_unregister(priv->slave_mii_bus);
1131 if (priv->master_mii_dn)
1132 of_node_put(priv->master_mii_dn);
1135 static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr)
1140 static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
1142 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1144 /* The BCM7xxx PHY driver expects to find the integrated PHY revision
1145 * in bits 15:8 and the patch level in bits 7:0 which is exactly what
1146 * the REG_PHY_REVISION register layout is.
1149 return priv->hw_params.gphy_rev;
1152 static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
1153 struct phy_device *phydev)
1155 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1156 u32 id_mode_dis = 0, port_mode;
1157 const char *str = NULL;
1160 switch (phydev->interface) {
1161 case PHY_INTERFACE_MODE_RGMII:
1162 str = "RGMII (no delay)";
1164 case PHY_INTERFACE_MODE_RGMII_TXID:
1166 str = "RGMII (TX delay)";
1167 port_mode = EXT_GPHY;
1169 case PHY_INTERFACE_MODE_MII:
1171 port_mode = EXT_EPHY;
1173 case PHY_INTERFACE_MODE_REVMII:
1174 str = "Reverse MII";
1175 port_mode = EXT_REVMII;
1178 /* All other PHYs: internal and MoCA */
1182 /* If the link is down, just disable the interface to conserve power */
1183 if (!phydev->link) {
1184 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
1185 reg &= ~RGMII_MODE_EN;
1186 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
1190 /* Clear id_mode_dis bit, and the existing port mode, but
1191 * make sure we enable the RGMII block for data to pass
1193 reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
1194 reg &= ~ID_MODE_DIS;
1195 reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
1196 reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
1198 reg |= port_mode | RGMII_MODE_EN;
1202 if (phydev->pause) {
1203 if (phydev->asym_pause)
1208 reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
1210 pr_info("Port %d configured for %s\n", port, str);
1213 /* Force link settings detected from the PHY */
1215 switch (phydev->speed) {
1217 reg |= SPDSTS_1000 << SPEED_SHIFT;
1220 reg |= SPDSTS_100 << SPEED_SHIFT;
1226 if (phydev->duplex == DUPLEX_FULL)
1229 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
1232 static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
1233 struct fixed_phy_status *status)
1235 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1239 duplex = core_readl(priv, CORE_DUPSTS);
1240 pause = core_readl(priv, CORE_PAUSESTS);
1244 /* MoCA port is special as we do not get link status from CORE_LNKSTS,
1245 * which means that we need to force the link at the port override
1246 * level to get the data to flow. We do use what the interrupt handler
1247 * did determine before.
1249 * For the other ports, we just force the link status, since this is
1250 * a fixed PHY device.
1252 if (port == priv->moca_port) {
1253 status->link = priv->port_sts[port].link;
1254 /* For MoCA interfaces, also force a link down notification
1255 * since some version of the user-space daemon (mocad) use
1256 * cmd->autoneg to force the link, which messes up the PHY
1257 * state machine and make it go in PHY_FORCING state instead.
1260 netif_carrier_off(ds->ports[port].netdev);
1264 status->duplex = !!(duplex & (1 << port));
1267 reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
1273 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
1275 if ((pause & (1 << port)) &&
1276 (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
1277 status->asym_pause = 1;
1281 if (pause & (1 << port))
1285 static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
1287 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1290 bcm_sf2_intr_disable(priv);
1292 /* Disable all ports physically present including the IMP
1293 * port, the other ones have already been disabled during
1296 for (port = 0; port < DSA_MAX_PORTS; port++) {
1297 if ((1 << port) & ds->enabled_port_mask ||
1298 dsa_is_cpu_port(ds, port))
1299 bcm_sf2_port_disable(ds, port, NULL);
1305 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
1307 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1311 ret = bcm_sf2_sw_rst(priv);
1313 pr_err("%s: failed to software reset switch\n", __func__);
1317 if (priv->hw_params.num_gphy == 1)
1318 bcm_sf2_gphy_enable_set(ds, true);
1320 for (port = 0; port < DSA_MAX_PORTS; port++) {
1321 if ((1 << port) & ds->enabled_port_mask)
1322 bcm_sf2_port_setup(ds, port, NULL);
1323 else if (dsa_is_cpu_port(ds, port))
1324 bcm_sf2_imp_setup(ds, port);
1330 static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
1331 struct ethtool_wolinfo *wol)
1333 struct net_device *p = ds->dst[ds->index].master_netdev;
1334 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1335 struct ethtool_wolinfo pwol;
1337 /* Get the parent device WoL settings */
1338 p->ethtool_ops->get_wol(p, &pwol);
1340 /* Advertise the parent device supported settings */
1341 wol->supported = pwol.supported;
1342 memset(&wol->sopass, 0, sizeof(wol->sopass));
1344 if (pwol.wolopts & WAKE_MAGICSECURE)
1345 memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass));
1347 if (priv->wol_ports_mask & (1 << port))
1348 wol->wolopts = pwol.wolopts;
1353 static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
1354 struct ethtool_wolinfo *wol)
1356 struct net_device *p = ds->dst[ds->index].master_netdev;
1357 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1358 s8 cpu_port = ds->dst[ds->index].cpu_port;
1359 struct ethtool_wolinfo pwol;
1361 p->ethtool_ops->get_wol(p, &pwol);
1362 if (wol->wolopts & ~pwol.supported)
1366 priv->wol_ports_mask |= (1 << port);
1368 priv->wol_ports_mask &= ~(1 << port);
1370 /* If we have at least one port enabled, make sure the CPU port
1371 * is also enabled. If the CPU port is the last one enabled, we disable
1372 * it since this configuration does not make sense.
1374 if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port))
1375 priv->wol_ports_mask |= (1 << cpu_port);
1377 priv->wol_ports_mask &= ~(1 << cpu_port);
1379 return p->ethtool_ops->set_wol(p, wol);
1382 static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable)
1384 u32 mgmt, vc0, vc1, vc4, vc5;
1386 mgmt = core_readl(priv, CORE_SWMODE);
1387 vc0 = core_readl(priv, CORE_VLAN_CTRL0);
1388 vc1 = core_readl(priv, CORE_VLAN_CTRL1);
1389 vc4 = core_readl(priv, CORE_VLAN_CTRL4);
1390 vc5 = core_readl(priv, CORE_VLAN_CTRL5);
1392 mgmt &= ~SW_FWDG_MODE;
1395 vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL;
1396 vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP;
1397 vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
1398 vc4 |= INGR_VID_CHK_DROP;
1399 vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD;
1401 vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL);
1402 vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP);
1403 vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT);
1404 vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD);
1405 vc4 |= INGR_VID_CHK_VID_VIOL_IMP;
1408 core_writel(priv, vc0, CORE_VLAN_CTRL0);
1409 core_writel(priv, vc1, CORE_VLAN_CTRL1);
1410 core_writel(priv, 0, CORE_VLAN_CTRL3);
1411 core_writel(priv, vc4, CORE_VLAN_CTRL4);
1412 core_writel(priv, vc5, CORE_VLAN_CTRL5);
1413 core_writel(priv, mgmt, CORE_SWMODE);
1416 static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds)
1418 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1421 /* Clear all VLANs */
1422 bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR);
1424 for (port = 0; port < priv->hw_params.num_ports; port++) {
1425 if (!((1 << port) & ds->enabled_port_mask))
1428 core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port));
1432 static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port,
1433 bool vlan_filtering)
1438 static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port,
1439 const struct switchdev_obj_port_vlan *vlan,
1440 struct switchdev_trans *trans)
1442 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1444 bcm_sf2_enable_vlan(priv, true);
1449 static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port,
1450 const struct switchdev_obj_port_vlan *vlan,
1451 struct switchdev_trans *trans)
1453 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1454 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1455 bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1456 s8 cpu_port = ds->dst->cpu_port;
1457 struct bcm_sf2_vlan *vl;
1460 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1461 vl = &priv->vlans[vid];
1463 bcm_sf2_get_vlan_entry(priv, vid, vl);
1465 vl->members |= BIT(port) | BIT(cpu_port);
1467 vl->untag |= BIT(port) | BIT(cpu_port);
1469 vl->untag &= ~(BIT(port) | BIT(cpu_port));
1471 bcm_sf2_set_vlan_entry(priv, vid, vl);
1472 bcm_sf2_sw_fast_age_vlan(priv, vid);
1476 core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port));
1477 core_writel(priv, vlan->vid_end,
1478 CORE_DEFAULT_1Q_TAG_P(cpu_port));
1479 bcm_sf2_sw_fast_age_vlan(priv, vid);
1483 static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port,
1484 const struct switchdev_obj_port_vlan *vlan)
1486 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1487 bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1488 s8 cpu_port = ds->dst->cpu_port;
1489 struct bcm_sf2_vlan *vl;
1493 pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
1495 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1496 vl = &priv->vlans[vid];
1498 ret = bcm_sf2_get_vlan_entry(priv, vid, vl);
1502 vl->members &= ~BIT(port);
1503 if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
1508 vl->untag &= ~BIT(port);
1509 if ((vl->untag & BIT(port)) == BIT(cpu_port))
1513 bcm_sf2_set_vlan_entry(priv, vid, vl);
1514 bcm_sf2_sw_fast_age_vlan(priv, vid);
1517 core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port));
1518 core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port));
1519 bcm_sf2_sw_fast_age_vlan(priv, vid);
1524 static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port,
1525 struct switchdev_obj_port_vlan *vlan,
1526 int (*cb)(struct switchdev_obj *obj))
1528 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1529 struct bcm_sf2_port_status *p = &priv->port_sts[port];
1530 struct bcm_sf2_vlan *vl;
1534 pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port));
1536 for (vid = 0; vid < VLAN_N_VID; vid++) {
1537 vl = &priv->vlans[vid];
1539 if (!(vl->members & BIT(port)))
1542 vlan->vid_begin = vlan->vid_end = vid;
1545 if (vl->untag & BIT(port))
1546 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1548 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1550 err = cb(&vlan->obj);
1558 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
1560 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1563 /* Enable all valid ports and disable those unused */
1564 for (port = 0; port < priv->hw_params.num_ports; port++) {
1565 /* IMP port receives special treatment */
1566 if ((1 << port) & ds->enabled_port_mask)
1567 bcm_sf2_port_setup(ds, port, NULL);
1568 else if (dsa_is_cpu_port(ds, port))
1569 bcm_sf2_imp_setup(ds, port);
1571 bcm_sf2_port_disable(ds, port, NULL);
1574 bcm_sf2_sw_configure_vlan(ds);
1579 static struct dsa_switch_driver bcm_sf2_switch_driver = {
1580 .tag_protocol = DSA_TAG_PROTO_BRCM,
1581 .setup = bcm_sf2_sw_setup,
1582 .set_addr = bcm_sf2_sw_set_addr,
1583 .get_phy_flags = bcm_sf2_sw_get_phy_flags,
1584 .get_strings = bcm_sf2_sw_get_strings,
1585 .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
1586 .get_sset_count = bcm_sf2_sw_get_sset_count,
1587 .adjust_link = bcm_sf2_sw_adjust_link,
1588 .fixed_link_update = bcm_sf2_sw_fixed_link_update,
1589 .suspend = bcm_sf2_sw_suspend,
1590 .resume = bcm_sf2_sw_resume,
1591 .get_wol = bcm_sf2_sw_get_wol,
1592 .set_wol = bcm_sf2_sw_set_wol,
1593 .port_enable = bcm_sf2_port_setup,
1594 .port_disable = bcm_sf2_port_disable,
1595 .get_eee = bcm_sf2_sw_get_eee,
1596 .set_eee = bcm_sf2_sw_set_eee,
1597 .port_bridge_join = bcm_sf2_sw_br_join,
1598 .port_bridge_leave = bcm_sf2_sw_br_leave,
1599 .port_stp_state_set = bcm_sf2_sw_br_set_stp_state,
1600 .port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
1601 .port_fdb_add = bcm_sf2_sw_fdb_add,
1602 .port_fdb_del = bcm_sf2_sw_fdb_del,
1603 .port_fdb_dump = bcm_sf2_sw_fdb_dump,
1604 .port_vlan_filtering = bcm_sf2_sw_vlan_filtering,
1605 .port_vlan_prepare = bcm_sf2_sw_vlan_prepare,
1606 .port_vlan_add = bcm_sf2_sw_vlan_add,
1607 .port_vlan_del = bcm_sf2_sw_vlan_del,
1608 .port_vlan_dump = bcm_sf2_sw_vlan_dump,
1611 static int bcm_sf2_sw_probe(struct platform_device *pdev)
1613 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
1614 struct device_node *dn = pdev->dev.of_node;
1615 struct bcm_sf2_priv *priv;
1616 struct dsa_switch *ds;
1617 void __iomem **base;
1623 ds = devm_kzalloc(&pdev->dev, sizeof(*ds) + sizeof(*priv), GFP_KERNEL);
1627 priv = (struct bcm_sf2_priv *)(ds + 1);
1629 ds->dev = &pdev->dev;
1630 ds->drv = &bcm_sf2_switch_driver;
1632 dev_set_drvdata(&pdev->dev, ds);
1634 spin_lock_init(&priv->indir_lock);
1635 mutex_init(&priv->stats_mutex);
1637 bcm_sf2_identify_ports(priv, dn->child);
1639 priv->irq0 = irq_of_parse_and_map(dn, 0);
1640 priv->irq1 = irq_of_parse_and_map(dn, 1);
1643 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
1644 r = platform_get_resource(pdev, IORESOURCE_MEM, i);
1645 *base = devm_ioremap_resource(&pdev->dev, r);
1646 if (IS_ERR(*base)) {
1647 pr_err("unable to find register: %s\n", reg_names[i]);
1648 return PTR_ERR(*base);
1653 ret = bcm_sf2_sw_rst(priv);
1655 pr_err("unable to software reset switch: %d\n", ret);
1659 ret = bcm_sf2_mdio_register(ds);
1661 pr_err("failed to register MDIO bus\n");
1665 /* Disable all interrupts and request them */
1666 bcm_sf2_intr_disable(priv);
1668 ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0,
1671 pr_err("failed to request switch_0 IRQ\n");
1675 ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0,
1678 pr_err("failed to request switch_1 IRQ\n");
1682 /* Reset the MIB counters */
1683 reg = core_readl(priv, CORE_GMNCFGCFG);
1685 core_writel(priv, reg, CORE_GMNCFGCFG);
1686 reg &= ~RST_MIB_CNT;
1687 core_writel(priv, reg, CORE_GMNCFGCFG);
1689 /* Get the maximum number of ports for this switch */
1690 priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1;
1691 if (priv->hw_params.num_ports > DSA_MAX_PORTS)
1692 priv->hw_params.num_ports = DSA_MAX_PORTS;
1694 /* Assume a single GPHY setup if we can't read that property */
1695 if (of_property_read_u32(dn, "brcm,num-gphy",
1696 &priv->hw_params.num_gphy))
1697 priv->hw_params.num_gphy = 1;
1699 rev = reg_readl(priv, REG_SWITCH_REVISION);
1700 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
1701 SWITCH_TOP_REV_MASK;
1702 priv->hw_params.core_rev = (rev & SF2_REV_MASK);
1704 rev = reg_readl(priv, REG_PHY_REVISION);
1705 priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK;
1707 ret = dsa_register_switch(ds, dn);
1711 pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
1712 priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
1713 priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
1714 priv->core, priv->irq0, priv->irq1);
1719 bcm_sf2_mdio_unregister(priv);
1723 static int bcm_sf2_sw_remove(struct platform_device *pdev)
1725 struct dsa_switch *ds = platform_get_drvdata(pdev);
1726 struct bcm_sf2_priv *priv = ds_to_priv(ds);
1728 /* Disable all ports and interrupts */
1729 priv->wol_ports_mask = 0;
1730 bcm_sf2_sw_suspend(ds);
1731 dsa_unregister_switch(ds);
1732 bcm_sf2_mdio_unregister(priv);
1737 #ifdef CONFIG_PM_SLEEP
1738 static int bcm_sf2_suspend(struct device *dev)
1740 struct platform_device *pdev = to_platform_device(dev);
1741 struct dsa_switch *ds = platform_get_drvdata(pdev);
1743 return dsa_switch_suspend(ds);
1746 static int bcm_sf2_resume(struct device *dev)
1748 struct platform_device *pdev = to_platform_device(dev);
1749 struct dsa_switch *ds = platform_get_drvdata(pdev);
1751 return dsa_switch_resume(ds);
1753 #endif /* CONFIG_PM_SLEEP */
1755 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
1756 bcm_sf2_suspend, bcm_sf2_resume);
1758 static const struct of_device_id bcm_sf2_of_match[] = {
1759 { .compatible = "brcm,bcm7445-switch-v4.0" },
1763 static struct platform_driver bcm_sf2_driver = {
1764 .probe = bcm_sf2_sw_probe,
1765 .remove = bcm_sf2_sw_remove,
1768 .of_match_table = bcm_sf2_of_match,
1769 .pm = &bcm_sf2_pm_ops,
1772 module_platform_driver(bcm_sf2_driver);
1774 MODULE_AUTHOR("Broadcom Corporation");
1775 MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip");
1776 MODULE_LICENSE("GPL");
1777 MODULE_ALIAS("platform:brcm-sf2");