2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/ethtool.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <linux/if_bridge.h>
48 #include <linux/workqueue.h>
49 #include <linux/jiffies.h>
50 #include <linux/bitops.h>
51 #include <linux/list.h>
52 #include <linux/notifier.h>
53 #include <linux/dcbnl.h>
54 #include <net/switchdev.h>
55 #include <generated/utsrelease.h>
64 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
65 static const char mlxsw_sp_driver_version[] = "1.0";
71 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
74 * Packet control type.
75 * 0 - Ethernet control (e.g. EMADs, LACP)
78 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
81 * Packet protocol type. Must be set to 1 (Ethernet).
83 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
85 /* tx_hdr_rx_is_router
86 * Packet is sent from the router. Valid for data packets only.
88 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
91 * Indicates if the 'fid' field is valid and should be used for
92 * forwarding lookup. Valid for data packets only.
94 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
97 * Switch partition ID. Must be set to 0.
99 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
101 /* tx_hdr_control_tclass
102 * Indicates if the packet should use the control TClass and not one
103 * of the data TClasses.
105 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
108 * Egress TClass to be used on the egress device on the egress port.
110 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
113 * Destination local port for unicast packets.
114 * Destination multicast ID for multicast packets.
116 * Control packets are directed to a specific egress port, while data
117 * packets are transmitted through the CPU port (0) into the switch partition,
118 * where forwarding rules are applied.
120 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
123 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
124 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
125 * Valid for data packets only.
127 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
131 * 6 - Control packets
133 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
135 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
136 const struct mlxsw_tx_info *tx_info)
138 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
140 memset(txhdr, 0, MLXSW_TXHDR_LEN);
142 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
143 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
144 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
145 mlxsw_tx_hdr_swid_set(txhdr, 0);
146 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
147 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
148 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
151 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
153 char spad_pl[MLXSW_REG_SPAD_LEN];
156 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
159 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
163 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
166 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
167 char paos_pl[MLXSW_REG_PAOS_LEN];
169 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
170 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
171 MLXSW_PORT_ADMIN_STATUS_DOWN);
172 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
175 static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
178 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
179 char paos_pl[MLXSW_REG_PAOS_LEN];
183 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
184 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
187 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
188 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
192 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
195 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
196 char ppad_pl[MLXSW_REG_PPAD_LEN];
198 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
199 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
200 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
203 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
205 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
206 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
208 ether_addr_copy(addr, mlxsw_sp->base_mac);
209 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
210 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
213 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
214 u16 vid, enum mlxsw_reg_spms_state state)
216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
220 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
223 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
224 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
225 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
230 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
232 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
233 char pmtu_pl[MLXSW_REG_PMTU_LEN];
237 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
238 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
239 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
242 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
247 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
248 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
251 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
254 char pspa_pl[MLXSW_REG_PSPA_LEN];
256 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
257 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
260 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
262 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
264 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
268 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
271 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
272 char svpe_pl[MLXSW_REG_SVPE_LEN];
274 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
275 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
278 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
279 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
282 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
283 char svfa_pl[MLXSW_REG_SVFA_LEN];
285 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
287 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
290 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
291 u16 vid, bool learn_enable)
293 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
297 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
300 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
302 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
308 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
310 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
311 char sspr_pl[MLXSW_REG_SSPR_LEN];
313 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
314 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
317 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
318 u8 local_port, u8 *p_module,
319 u8 *p_width, u8 *p_lane)
321 char pmlp_pl[MLXSW_REG_PMLP_LEN];
324 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
325 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
328 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
329 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
330 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
334 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
335 u8 module, u8 width, u8 lane)
337 char pmlp_pl[MLXSW_REG_PMLP_LEN];
340 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
341 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
342 for (i = 0; i < width; i++) {
343 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
344 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
347 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
350 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
352 char pmlp_pl[MLXSW_REG_PMLP_LEN];
354 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
355 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
356 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
359 static int mlxsw_sp_port_open(struct net_device *dev)
361 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
364 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
367 netif_start_queue(dev);
371 static int mlxsw_sp_port_stop(struct net_device *dev)
373 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
375 netif_stop_queue(dev);
376 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
379 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
380 struct net_device *dev)
382 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
383 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
384 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
385 const struct mlxsw_tx_info tx_info = {
386 .local_port = mlxsw_sp_port->local_port,
392 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
393 return NETDEV_TX_BUSY;
395 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
396 struct sk_buff *skb_orig = skb;
398 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
400 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
401 dev_kfree_skb_any(skb_orig);
406 if (eth_skb_pad(skb)) {
407 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
411 mlxsw_sp_txhdr_construct(skb, &tx_info);
413 /* Due to a race we might fail here because of a full queue. In that
414 * unlikely case we simply drop the packet.
416 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
419 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
420 u64_stats_update_begin(&pcpu_stats->syncp);
421 pcpu_stats->tx_packets++;
422 pcpu_stats->tx_bytes += len;
423 u64_stats_update_end(&pcpu_stats->syncp);
425 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
426 dev_kfree_skb_any(skb);
431 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
435 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
437 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
438 struct sockaddr *addr = p;
441 if (!is_valid_ether_addr(addr->sa_data))
442 return -EADDRNOTAVAIL;
444 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
447 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
451 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
452 bool pause_en, bool pfc_en, u16 delay)
454 u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
456 delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
457 MLXSW_SP_PAUSE_DELAY;
459 if (pause_en || pfc_en)
460 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
461 pg_size + delay, pg_size);
463 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
466 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
467 u8 *prio_tc, bool pause_en,
468 struct ieee_pfc *my_pfc)
470 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
471 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
472 u16 delay = !!my_pfc ? my_pfc->delay : 0;
473 char pbmc_pl[MLXSW_REG_PBMC_LEN];
476 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
477 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
481 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
482 bool configure = false;
485 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
486 if (prio_tc[j] == i) {
487 pfc = pfc_en & BIT(j);
495 mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
498 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
501 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
502 int mtu, bool pause_en)
504 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
505 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
506 struct ieee_pfc *my_pfc;
509 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
510 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
512 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
516 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
518 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
519 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
522 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
525 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
527 goto err_port_mtu_set;
532 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
536 static struct rtnl_link_stats64 *
537 mlxsw_sp_port_get_stats64(struct net_device *dev,
538 struct rtnl_link_stats64 *stats)
540 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
541 struct mlxsw_sp_port_pcpu_stats *p;
542 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
547 for_each_possible_cpu(i) {
548 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
550 start = u64_stats_fetch_begin_irq(&p->syncp);
551 rx_packets = p->rx_packets;
552 rx_bytes = p->rx_bytes;
553 tx_packets = p->tx_packets;
554 tx_bytes = p->tx_bytes;
555 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
557 stats->rx_packets += rx_packets;
558 stats->rx_bytes += rx_bytes;
559 stats->tx_packets += tx_packets;
560 stats->tx_bytes += tx_bytes;
561 /* tx_dropped is u32, updated without syncp protection. */
562 tx_dropped += p->tx_dropped;
564 stats->tx_dropped = tx_dropped;
568 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
569 u16 vid_end, bool is_member, bool untagged)
571 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
575 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
579 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
580 vid_end, is_member, untagged);
581 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
586 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
588 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
589 u16 vid, last_visited_vid;
592 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
593 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
596 last_visited_vid = vid;
597 goto err_port_vid_to_fid_set;
601 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
603 last_visited_vid = VLAN_N_VID;
604 goto err_port_vid_to_fid_set;
609 err_port_vid_to_fid_set:
610 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
611 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
616 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
618 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
622 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
626 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
627 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
636 static struct mlxsw_sp_vfid *
637 mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
639 struct mlxsw_sp_vfid *vfid;
641 list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
642 if (vfid->vid == vid)
649 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
651 return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
652 MLXSW_SP_VFID_PORT_MAX);
655 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
657 char sfmr_pl[MLXSW_REG_SFMR_LEN];
659 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
660 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
663 static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
666 struct device *dev = mlxsw_sp->bus_info->dev;
667 struct mlxsw_sp_vfid *f;
671 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
672 if (vfid == MLXSW_SP_VFID_PORT_MAX) {
673 dev_err(dev, "No available vFIDs\n");
674 return ERR_PTR(-ERANGE);
677 fid = mlxsw_sp_vfid_to_fid(vfid);
678 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
680 dev_err(dev, "Failed to create FID=%d\n", fid);
684 f = kzalloc(sizeof(*f), GFP_KERNEL);
686 goto err_allocate_vfid;
691 list_add(&f->list, &mlxsw_sp->port_vfids.list);
692 set_bit(vfid, mlxsw_sp->port_vfids.mapped);
697 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
698 return ERR_PTR(-ENOMEM);
701 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
702 struct mlxsw_sp_vfid *vfid)
704 u16 fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
706 clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
707 list_del(&vfid->list);
709 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
714 static struct mlxsw_sp_port *
715 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
716 struct mlxsw_sp_vfid *vfid)
718 struct mlxsw_sp_port *mlxsw_sp_vport;
720 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
724 /* dev will be set correctly after the VLAN device is linked
725 * with the real device. In case of bridge SELF invocation, dev
728 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
729 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
730 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
731 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
732 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
733 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
734 mlxsw_sp_vport->vport.vfid = vfid;
735 mlxsw_sp_vport->vport.vid = vfid->vid;
737 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
739 return mlxsw_sp_vport;
742 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
744 list_del(&mlxsw_sp_vport->vport.list);
745 kfree(mlxsw_sp_vport);
748 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
751 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
752 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
754 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
758 int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
761 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
762 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
763 struct mlxsw_sp_port *mlxsw_sp_vport;
764 struct mlxsw_sp_vfid *vfid;
768 /* VLAN 0 is added to HW filter when device goes up, but it is
769 * reserved in our case, so simply return.
774 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
775 netdev_warn(dev, "VID=%d already configured\n", vid);
779 vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
781 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
783 netdev_err(dev, "Failed to create vFID for VID=%d\n",
785 return PTR_ERR(vfid);
789 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
790 if (!mlxsw_sp_vport) {
791 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
793 goto err_port_vport_create;
796 if (!vfid->nr_vports) {
797 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
800 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
802 goto err_vport_flood_set;
806 /* When adding the first VLAN interface on a bridged port we need to
807 * transition all the active 802.1Q bridge VLANs to use explicit
808 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
810 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
811 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
813 netdev_err(dev, "Failed to set to Virtual mode\n");
814 goto err_port_vp_mode_trans;
818 fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
819 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, true);
821 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
823 goto err_vport_fid_map;
826 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
828 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
829 goto err_port_vid_learning_set;
832 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
834 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
836 goto err_port_add_vid;
839 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
840 MLXSW_REG_SPMS_STATE_FORWARDING);
842 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
843 goto err_port_stp_state_set;
850 err_port_stp_state_set:
851 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
853 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
854 err_port_vid_learning_set:
855 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, false);
857 if (list_is_singular(&mlxsw_sp_port->vports_list))
858 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
859 err_port_vp_mode_trans:
860 if (!vfid->nr_vports)
861 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false);
863 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
864 err_port_vport_create:
865 if (!vfid->nr_vports)
866 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
870 int mlxsw_sp_port_kill_vid(struct net_device *dev,
871 __be16 __always_unused proto, u16 vid)
873 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
874 struct mlxsw_sp_port *mlxsw_sp_vport;
875 struct mlxsw_sp_vfid *vfid;
879 /* VLAN 0 is removed from HW filter when device goes down, but
880 * it is reserved in our case, so simply return.
885 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
886 if (!mlxsw_sp_vport) {
887 netdev_warn(dev, "VID=%d does not exist\n", vid);
891 vfid = mlxsw_sp_vport->vport.vfid;
893 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
894 MLXSW_REG_SPMS_STATE_DISCARDING);
896 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
900 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
902 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
907 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
909 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
913 fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
914 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, false);
916 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
921 /* When removing the last VLAN interface on a bridged port we need to
922 * transition all active 802.1Q bridge VLANs to use VID to FID
923 * mappings and set port's mode to VLAN mode.
925 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
926 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
928 netdev_err(dev, "Failed to set to VLAN mode\n");
934 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
936 /* Destroy the vFID if no vPorts are assigned to it anymore. */
937 if (!vfid->nr_vports)
938 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
943 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
946 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
947 u8 module = mlxsw_sp_port->mapping.module;
948 u8 width = mlxsw_sp_port->mapping.width;
949 u8 lane = mlxsw_sp_port->mapping.lane;
952 if (!mlxsw_sp_port->split)
953 err = snprintf(name, len, "p%d", module + 1);
955 err = snprintf(name, len, "p%ds%d", module + 1,
964 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
965 .ndo_open = mlxsw_sp_port_open,
966 .ndo_stop = mlxsw_sp_port_stop,
967 .ndo_start_xmit = mlxsw_sp_port_xmit,
968 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
969 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
970 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
971 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
972 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
973 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
974 .ndo_fdb_add = switchdev_port_fdb_add,
975 .ndo_fdb_del = switchdev_port_fdb_del,
976 .ndo_fdb_dump = switchdev_port_fdb_dump,
977 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
978 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
979 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
980 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
983 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
984 struct ethtool_drvinfo *drvinfo)
986 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
987 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
989 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
990 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
991 sizeof(drvinfo->version));
992 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
994 mlxsw_sp->bus_info->fw_rev.major,
995 mlxsw_sp->bus_info->fw_rev.minor,
996 mlxsw_sp->bus_info->fw_rev.subminor);
997 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
998 sizeof(drvinfo->bus_info));
1001 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1002 struct ethtool_pauseparam *pause)
1004 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1006 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1007 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1010 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1011 struct ethtool_pauseparam *pause)
1013 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1015 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1016 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1017 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1019 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1023 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1024 struct ethtool_pauseparam *pause)
1026 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1027 bool pause_en = pause->tx_pause || pause->rx_pause;
1030 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1031 netdev_err(dev, "PFC already enabled on port\n");
1035 if (pause->autoneg) {
1036 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1040 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1042 netdev_err(dev, "Failed to configure port's headroom\n");
1046 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1048 netdev_err(dev, "Failed to set PAUSE parameters\n");
1049 goto err_port_pause_configure;
1052 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1053 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1057 err_port_pause_configure:
1058 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1059 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1063 struct mlxsw_sp_port_hw_stats {
1064 char str[ETH_GSTRING_LEN];
1065 u64 (*getter)(char *payload);
1068 static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1070 .str = "a_frames_transmitted_ok",
1071 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1074 .str = "a_frames_received_ok",
1075 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1078 .str = "a_frame_check_sequence_errors",
1079 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1082 .str = "a_alignment_errors",
1083 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1086 .str = "a_octets_transmitted_ok",
1087 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1090 .str = "a_octets_received_ok",
1091 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1094 .str = "a_multicast_frames_xmitted_ok",
1095 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1098 .str = "a_broadcast_frames_xmitted_ok",
1099 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1102 .str = "a_multicast_frames_received_ok",
1103 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1106 .str = "a_broadcast_frames_received_ok",
1107 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1110 .str = "a_in_range_length_errors",
1111 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1114 .str = "a_out_of_range_length_field",
1115 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1118 .str = "a_frame_too_long_errors",
1119 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1122 .str = "a_symbol_error_during_carrier",
1123 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1126 .str = "a_mac_control_frames_transmitted",
1127 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1130 .str = "a_mac_control_frames_received",
1131 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1134 .str = "a_unsupported_opcodes_received",
1135 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1138 .str = "a_pause_mac_ctrl_frames_received",
1139 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1142 .str = "a_pause_mac_ctrl_frames_xmitted",
1143 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1147 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1149 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1150 u32 stringset, u8 *data)
1155 switch (stringset) {
1157 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1158 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1160 p += ETH_GSTRING_LEN;
1166 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1167 enum ethtool_phys_id_state state)
1169 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1170 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1171 char mlcr_pl[MLXSW_REG_MLCR_LEN];
1175 case ETHTOOL_ID_ACTIVE:
1178 case ETHTOOL_ID_INACTIVE:
1185 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1186 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1189 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1190 struct ethtool_stats *stats, u64 *data)
1192 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1194 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1198 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
1199 MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
1200 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1201 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1202 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1205 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1209 return MLXSW_SP_PORT_HW_STATS_LEN;
1215 struct mlxsw_sp_port_link_mode {
1222 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1224 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1225 .supported = SUPPORTED_100baseT_Full,
1226 .advertised = ADVERTISED_100baseT_Full,
1230 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1234 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1235 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1236 .supported = SUPPORTED_1000baseKX_Full,
1237 .advertised = ADVERTISED_1000baseKX_Full,
1241 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1242 .supported = SUPPORTED_10000baseT_Full,
1243 .advertised = ADVERTISED_10000baseT_Full,
1247 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1248 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1249 .supported = SUPPORTED_10000baseKX4_Full,
1250 .advertised = ADVERTISED_10000baseKX4_Full,
1254 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1255 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1256 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1257 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1258 .supported = SUPPORTED_10000baseKR_Full,
1259 .advertised = ADVERTISED_10000baseKR_Full,
1263 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1264 .supported = SUPPORTED_20000baseKR2_Full,
1265 .advertised = ADVERTISED_20000baseKR2_Full,
1269 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1270 .supported = SUPPORTED_40000baseCR4_Full,
1271 .advertised = ADVERTISED_40000baseCR4_Full,
1275 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1276 .supported = SUPPORTED_40000baseKR4_Full,
1277 .advertised = ADVERTISED_40000baseKR4_Full,
1281 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1282 .supported = SUPPORTED_40000baseSR4_Full,
1283 .advertised = ADVERTISED_40000baseSR4_Full,
1287 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1288 .supported = SUPPORTED_40000baseLR4_Full,
1289 .advertised = ADVERTISED_40000baseLR4_Full,
1293 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1294 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1295 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1299 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1300 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1301 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1305 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1306 .supported = SUPPORTED_56000baseKR4_Full,
1307 .advertised = ADVERTISED_56000baseKR4_Full,
1311 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1312 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1313 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1314 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1319 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1321 static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1323 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1324 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1325 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1326 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1327 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1328 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1329 return SUPPORTED_FIBRE;
1331 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1332 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1333 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1334 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1335 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1336 return SUPPORTED_Backplane;
1340 static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1345 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1346 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1347 modes |= mlxsw_sp_port_link_mode[i].supported;
1352 static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1357 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1358 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1359 modes |= mlxsw_sp_port_link_mode[i].advertised;
1364 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1365 struct ethtool_cmd *cmd)
1367 u32 speed = SPEED_UNKNOWN;
1368 u8 duplex = DUPLEX_UNKNOWN;
1374 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1375 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1376 speed = mlxsw_sp_port_link_mode[i].speed;
1377 duplex = DUPLEX_FULL;
1382 ethtool_cmd_speed_set(cmd, speed);
1383 cmd->duplex = duplex;
1386 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1388 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1389 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1390 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1391 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1394 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1395 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1396 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1399 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1400 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1401 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1402 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1408 static int mlxsw_sp_port_get_settings(struct net_device *dev,
1409 struct ethtool_cmd *cmd)
1411 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1412 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1413 char ptys_pl[MLXSW_REG_PTYS_LEN];
1415 u32 eth_proto_admin;
1419 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1420 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1422 netdev_err(dev, "Failed to get proto");
1425 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap,
1426 ð_proto_admin, ð_proto_oper);
1428 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1429 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1430 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1431 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1432 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1433 eth_proto_oper, cmd);
1435 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1436 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1437 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1439 cmd->transceiver = XCVR_INTERNAL;
1443 static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1448 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1449 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1450 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1455 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1460 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1461 if (speed == mlxsw_sp_port_link_mode[i].speed)
1462 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1467 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1472 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1473 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1474 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1479 static int mlxsw_sp_port_set_settings(struct net_device *dev,
1480 struct ethtool_cmd *cmd)
1482 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1483 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1484 char ptys_pl[MLXSW_REG_PTYS_LEN];
1488 u32 eth_proto_admin;
1492 speed = ethtool_cmd_speed(cmd);
1494 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1495 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1496 mlxsw_sp_to_ptys_speed(speed);
1498 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1499 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1501 netdev_err(dev, "Failed to get proto");
1504 mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL);
1506 eth_proto_new = eth_proto_new & eth_proto_cap;
1507 if (!eth_proto_new) {
1508 netdev_err(dev, "Not supported proto admin requested");
1511 if (eth_proto_new == eth_proto_admin)
1514 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1515 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1517 netdev_err(dev, "Failed to set proto admin");
1521 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1523 netdev_err(dev, "Failed to get oper status");
1529 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1531 netdev_err(dev, "Failed to set admin status");
1535 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1537 netdev_err(dev, "Failed to set admin status");
1544 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1545 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
1546 .get_link = ethtool_op_get_link,
1547 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
1548 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
1549 .get_strings = mlxsw_sp_port_get_strings,
1550 .set_phys_id = mlxsw_sp_port_set_phys_id,
1551 .get_ethtool_stats = mlxsw_sp_port_get_stats,
1552 .get_sset_count = mlxsw_sp_port_get_sset_count,
1553 .get_settings = mlxsw_sp_port_get_settings,
1554 .set_settings = mlxsw_sp_port_set_settings,
1558 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1560 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1561 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1562 char ptys_pl[MLXSW_REG_PTYS_LEN];
1563 u32 eth_proto_admin;
1565 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1566 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1568 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1571 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1572 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1573 bool dwrr, u8 dwrr_weight)
1575 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1576 char qeec_pl[MLXSW_REG_QEEC_LEN];
1578 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1580 mlxsw_reg_qeec_de_set(qeec_pl, true);
1581 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1582 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1583 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1586 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1587 enum mlxsw_reg_qeec_hr hr, u8 index,
1588 u8 next_index, u32 maxrate)
1590 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1591 char qeec_pl[MLXSW_REG_QEEC_LEN];
1593 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1595 mlxsw_reg_qeec_mase_set(qeec_pl, true);
1596 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1597 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1600 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1601 u8 switch_prio, u8 tclass)
1603 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1604 char qtct_pl[MLXSW_REG_QTCT_LEN];
1606 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1608 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1611 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1615 /* Setup the elements hierarcy, so that each TC is linked to
1616 * one subgroup, which are all member in the same group.
1618 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1619 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
1623 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1624 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1625 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
1630 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1631 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1632 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
1638 /* Make sure the max shaper is disabled in all hierarcies that
1641 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1642 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
1643 MLXSW_REG_QEEC_MAS_DIS);
1646 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1647 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1648 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
1650 MLXSW_REG_QEEC_MAS_DIS);
1654 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1655 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1656 MLXSW_REG_QEEC_HIERARCY_TC,
1658 MLXSW_REG_QEEC_MAS_DIS);
1663 /* Map all priorities to traffic class 0. */
1664 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1665 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1673 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1674 bool split, u8 module, u8 width, u8 lane)
1676 struct mlxsw_sp_port *mlxsw_sp_port;
1677 struct net_device *dev;
1681 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1684 mlxsw_sp_port = netdev_priv(dev);
1685 mlxsw_sp_port->dev = dev;
1686 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1687 mlxsw_sp_port->local_port = local_port;
1688 mlxsw_sp_port->split = split;
1689 mlxsw_sp_port->mapping.module = module;
1690 mlxsw_sp_port->mapping.width = width;
1691 mlxsw_sp_port->mapping.lane = lane;
1692 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1693 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1694 if (!mlxsw_sp_port->active_vlans) {
1696 goto err_port_active_vlans_alloc;
1698 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1699 if (!mlxsw_sp_port->untagged_vlans) {
1701 goto err_port_untagged_vlans_alloc;
1703 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
1705 mlxsw_sp_port->pcpu_stats =
1706 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1707 if (!mlxsw_sp_port->pcpu_stats) {
1709 goto err_alloc_stats;
1712 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1713 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1715 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1717 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1718 mlxsw_sp_port->local_port);
1719 goto err_dev_addr_init;
1722 netif_carrier_off(dev);
1724 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1725 NETIF_F_HW_VLAN_CTAG_FILTER;
1727 /* Each packet needs to have a Tx header (metadata) on top all other
1730 dev->hard_header_len += MLXSW_TXHDR_LEN;
1732 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1734 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1735 mlxsw_sp_port->local_port);
1736 goto err_port_system_port_mapping_set;
1739 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1741 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1742 mlxsw_sp_port->local_port);
1743 goto err_port_swid_set;
1746 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1748 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1749 mlxsw_sp_port->local_port);
1750 goto err_port_speed_by_width_set;
1753 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1755 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1756 mlxsw_sp_port->local_port);
1757 goto err_port_mtu_set;
1760 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1762 goto err_port_admin_status_set;
1764 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1766 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1767 mlxsw_sp_port->local_port);
1768 goto err_port_buffers_init;
1771 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1773 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1774 mlxsw_sp_port->local_port);
1775 goto err_port_ets_init;
1778 /* ETS and buffers must be initialized before DCB. */
1779 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1781 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1782 mlxsw_sp_port->local_port);
1783 goto err_port_dcb_init;
1786 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1787 err = register_netdev(dev);
1789 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1790 mlxsw_sp_port->local_port);
1791 goto err_register_netdev;
1794 err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
1795 mlxsw_sp_port->local_port, dev,
1796 mlxsw_sp_port->split, module);
1798 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1799 mlxsw_sp_port->local_port);
1800 goto err_core_port_init;
1803 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1805 goto err_port_vlan_init;
1807 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1811 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1813 unregister_netdev(dev);
1814 err_register_netdev:
1817 err_port_buffers_init:
1818 err_port_admin_status_set:
1820 err_port_speed_by_width_set:
1822 err_port_system_port_mapping_set:
1824 free_percpu(mlxsw_sp_port->pcpu_stats);
1826 kfree(mlxsw_sp_port->untagged_vlans);
1827 err_port_untagged_vlans_alloc:
1828 kfree(mlxsw_sp_port->active_vlans);
1829 err_port_active_vlans_alloc:
1834 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1836 struct net_device *dev = mlxsw_sp_port->dev;
1837 struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
1839 list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1840 &mlxsw_sp_port->vports_list, vport.list) {
1841 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1843 /* vPorts created for VLAN devices should already be gone
1844 * by now, since we unregistered the port netdev.
1846 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1847 mlxsw_sp_port_kill_vid(dev, 0, vid);
1851 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1853 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1857 mlxsw_sp->ports[local_port] = NULL;
1858 mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
1859 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1860 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1861 mlxsw_sp_port_vports_fini(mlxsw_sp_port);
1862 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1863 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1864 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
1865 free_percpu(mlxsw_sp_port->pcpu_stats);
1866 kfree(mlxsw_sp_port->untagged_vlans);
1867 kfree(mlxsw_sp_port->active_vlans);
1868 free_netdev(mlxsw_sp_port->dev);
1871 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1875 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1876 mlxsw_sp_port_remove(mlxsw_sp, i);
1877 kfree(mlxsw_sp->ports);
1880 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1882 u8 module, width, lane;
1887 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1888 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1889 if (!mlxsw_sp->ports)
1892 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1893 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1896 goto err_port_module_info_get;
1899 mlxsw_sp->port_to_module[i] = module;
1900 err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
1903 goto err_port_create;
1908 err_port_module_info_get:
1909 for (i--; i >= 1; i--)
1910 mlxsw_sp_port_remove(mlxsw_sp, i);
1911 kfree(mlxsw_sp->ports);
1915 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1917 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1919 return local_port - offset;
1922 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
1923 u8 module, unsigned int count)
1925 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1928 for (i = 0; i < count; i++) {
1929 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
1932 goto err_port_module_map;
1935 for (i = 0; i < count; i++) {
1936 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
1938 goto err_port_swid_set;
1941 for (i = 0; i < count; i++) {
1942 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1943 module, width, i * width);
1945 goto err_port_create;
1951 for (i--; i >= 0; i--)
1952 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1955 for (i--; i >= 0; i--)
1956 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
1957 MLXSW_PORT_SWID_DISABLED_PORT);
1959 err_port_module_map:
1960 for (i--; i >= 0; i--)
1961 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
1965 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
1966 u8 base_port, unsigned int count)
1968 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
1971 /* Split by four means we need to re-create two ports, otherwise
1976 for (i = 0; i < count; i++) {
1977 local_port = base_port + i * 2;
1978 module = mlxsw_sp->port_to_module[local_port];
1980 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1984 for (i = 0; i < count; i++)
1985 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
1987 for (i = 0; i < count; i++) {
1988 local_port = base_port + i * 2;
1989 module = mlxsw_sp->port_to_module[local_port];
1991 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
1996 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
1999 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2000 struct mlxsw_sp_port *mlxsw_sp_port;
2001 u8 module, cur_width, base_port;
2005 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2006 if (!mlxsw_sp_port) {
2007 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2012 module = mlxsw_sp_port->mapping.module;
2013 cur_width = mlxsw_sp_port->mapping.width;
2015 if (count != 2 && count != 4) {
2016 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2020 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2021 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2025 /* Make sure we have enough slave (even) ports for the split. */
2027 base_port = local_port;
2028 if (mlxsw_sp->ports[base_port + 1]) {
2029 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2033 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2034 if (mlxsw_sp->ports[base_port + 1] ||
2035 mlxsw_sp->ports[base_port + 3]) {
2036 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2041 for (i = 0; i < count; i++)
2042 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2044 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2046 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2047 goto err_port_split_create;
2052 err_port_split_create:
2053 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2057 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2059 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2060 struct mlxsw_sp_port *mlxsw_sp_port;
2061 u8 cur_width, base_port;
2065 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2066 if (!mlxsw_sp_port) {
2067 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2072 if (!mlxsw_sp_port->split) {
2073 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2077 cur_width = mlxsw_sp_port->mapping.width;
2078 count = cur_width == 1 ? 4 : 2;
2080 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2082 /* Determine which ports to remove. */
2083 if (count == 2 && local_port >= base_port + 2)
2084 base_port = base_port + 2;
2086 for (i = 0; i < count; i++)
2087 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2089 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2094 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2095 char *pude_pl, void *priv)
2097 struct mlxsw_sp *mlxsw_sp = priv;
2098 struct mlxsw_sp_port *mlxsw_sp_port;
2099 enum mlxsw_reg_pude_oper_status status;
2102 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2103 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2104 if (!mlxsw_sp_port) {
2105 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
2110 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2111 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2112 netdev_info(mlxsw_sp_port->dev, "link up\n");
2113 netif_carrier_on(mlxsw_sp_port->dev);
2115 netdev_info(mlxsw_sp_port->dev, "link down\n");
2116 netif_carrier_off(mlxsw_sp_port->dev);
2120 static struct mlxsw_event_listener mlxsw_sp_pude_event = {
2121 .func = mlxsw_sp_pude_event_func,
2122 .trap_id = MLXSW_TRAP_ID_PUDE,
2125 static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
2126 enum mlxsw_event_trap_id trap_id)
2128 struct mlxsw_event_listener *el;
2129 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2133 case MLXSW_TRAP_ID_PUDE:
2134 el = &mlxsw_sp_pude_event;
2137 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
2141 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
2142 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2144 goto err_event_trap_set;
2149 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2153 static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
2154 enum mlxsw_event_trap_id trap_id)
2156 struct mlxsw_event_listener *el;
2159 case MLXSW_TRAP_ID_PUDE:
2160 el = &mlxsw_sp_pude_event;
2163 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
2166 static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
2169 struct mlxsw_sp *mlxsw_sp = priv;
2170 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2171 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2173 if (unlikely(!mlxsw_sp_port)) {
2174 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2179 skb->dev = mlxsw_sp_port->dev;
2181 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2182 u64_stats_update_begin(&pcpu_stats->syncp);
2183 pcpu_stats->rx_packets++;
2184 pcpu_stats->rx_bytes += skb->len;
2185 u64_stats_update_end(&pcpu_stats->syncp);
2187 skb->protocol = eth_type_trans(skb, skb->dev);
2188 netif_receive_skb(skb);
2191 static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
2193 .func = mlxsw_sp_rx_listener_func,
2194 .local_port = MLXSW_PORT_DONT_CARE,
2195 .trap_id = MLXSW_TRAP_ID_FDB_MC,
2197 /* Traps for specific L2 packet types, not trapped as FDB MC */
2199 .func = mlxsw_sp_rx_listener_func,
2200 .local_port = MLXSW_PORT_DONT_CARE,
2201 .trap_id = MLXSW_TRAP_ID_STP,
2204 .func = mlxsw_sp_rx_listener_func,
2205 .local_port = MLXSW_PORT_DONT_CARE,
2206 .trap_id = MLXSW_TRAP_ID_LACP,
2209 .func = mlxsw_sp_rx_listener_func,
2210 .local_port = MLXSW_PORT_DONT_CARE,
2211 .trap_id = MLXSW_TRAP_ID_EAPOL,
2214 .func = mlxsw_sp_rx_listener_func,
2215 .local_port = MLXSW_PORT_DONT_CARE,
2216 .trap_id = MLXSW_TRAP_ID_LLDP,
2219 .func = mlxsw_sp_rx_listener_func,
2220 .local_port = MLXSW_PORT_DONT_CARE,
2221 .trap_id = MLXSW_TRAP_ID_MMRP,
2224 .func = mlxsw_sp_rx_listener_func,
2225 .local_port = MLXSW_PORT_DONT_CARE,
2226 .trap_id = MLXSW_TRAP_ID_MVRP,
2229 .func = mlxsw_sp_rx_listener_func,
2230 .local_port = MLXSW_PORT_DONT_CARE,
2231 .trap_id = MLXSW_TRAP_ID_RPVST,
2234 .func = mlxsw_sp_rx_listener_func,
2235 .local_port = MLXSW_PORT_DONT_CARE,
2236 .trap_id = MLXSW_TRAP_ID_DHCP,
2239 .func = mlxsw_sp_rx_listener_func,
2240 .local_port = MLXSW_PORT_DONT_CARE,
2241 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
2244 .func = mlxsw_sp_rx_listener_func,
2245 .local_port = MLXSW_PORT_DONT_CARE,
2246 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
2249 .func = mlxsw_sp_rx_listener_func,
2250 .local_port = MLXSW_PORT_DONT_CARE,
2251 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
2254 .func = mlxsw_sp_rx_listener_func,
2255 .local_port = MLXSW_PORT_DONT_CARE,
2256 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
2259 .func = mlxsw_sp_rx_listener_func,
2260 .local_port = MLXSW_PORT_DONT_CARE,
2261 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
2265 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2267 char htgt_pl[MLXSW_REG_HTGT_LEN];
2268 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2272 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2273 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2277 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2278 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2282 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2283 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2284 &mlxsw_sp_rx_listener[i],
2287 goto err_rx_listener_register;
2289 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2290 mlxsw_sp_rx_listener[i].trap_id);
2291 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2293 goto err_rx_trap_set;
2298 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2299 &mlxsw_sp_rx_listener[i],
2301 err_rx_listener_register:
2302 for (i--; i >= 0; i--) {
2303 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2304 mlxsw_sp_rx_listener[i].trap_id);
2305 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2307 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2308 &mlxsw_sp_rx_listener[i],
2314 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2316 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2319 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2320 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2321 mlxsw_sp_rx_listener[i].trap_id);
2322 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2324 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2325 &mlxsw_sp_rx_listener[i],
2330 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2331 enum mlxsw_reg_sfgc_type type,
2332 enum mlxsw_reg_sfgc_bridge_type bridge_type)
2334 enum mlxsw_flood_table_type table_type;
2335 enum mlxsw_sp_flood_table flood_table;
2336 char sfgc_pl[MLXSW_REG_SFGC_LEN];
2338 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
2339 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
2341 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
2343 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2344 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2346 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
2348 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2350 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2353 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2357 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2358 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2361 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2362 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2366 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2367 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2375 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2377 char slcr_pl[MLXSW_REG_SLCR_LEN];
2379 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2380 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2381 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2382 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2383 MLXSW_REG_SLCR_LAG_HASH_SIP |
2384 MLXSW_REG_SLCR_LAG_HASH_DIP |
2385 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2386 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2387 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2388 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2391 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
2392 const struct mlxsw_bus_info *mlxsw_bus_info)
2394 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2397 mlxsw_sp->core = mlxsw_core;
2398 mlxsw_sp->bus_info = mlxsw_bus_info;
2399 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
2400 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
2401 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
2403 err = mlxsw_sp_base_mac_get(mlxsw_sp);
2405 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2409 err = mlxsw_sp_ports_create(mlxsw_sp);
2411 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
2415 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2417 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2418 goto err_event_register;
2421 err = mlxsw_sp_traps_init(mlxsw_sp);
2423 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2424 goto err_rx_listener_register;
2427 err = mlxsw_sp_flood_init(mlxsw_sp);
2429 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2430 goto err_flood_init;
2433 err = mlxsw_sp_buffers_init(mlxsw_sp);
2435 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2436 goto err_buffers_init;
2439 err = mlxsw_sp_lag_init(mlxsw_sp);
2441 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2445 err = mlxsw_sp_switchdev_init(mlxsw_sp);
2447 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2448 goto err_switchdev_init;
2455 mlxsw_sp_buffers_fini(mlxsw_sp);
2458 mlxsw_sp_traps_fini(mlxsw_sp);
2459 err_rx_listener_register:
2460 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2462 mlxsw_sp_ports_remove(mlxsw_sp);
2466 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
2468 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2470 mlxsw_sp_switchdev_fini(mlxsw_sp);
2471 mlxsw_sp_buffers_fini(mlxsw_sp);
2472 mlxsw_sp_traps_fini(mlxsw_sp);
2473 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2474 mlxsw_sp_ports_remove(mlxsw_sp);
2477 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2478 .used_max_vepa_channels = 1,
2479 .max_vepa_channels = 0,
2481 .max_lag = MLXSW_SP_LAG_MAX,
2482 .used_max_port_per_lag = 1,
2483 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
2485 .max_mid = MLXSW_SP_MID_MAX,
2488 .used_max_system_port = 1,
2489 .max_system_port = 64,
2490 .used_max_vlan_groups = 1,
2491 .max_vlan_groups = 127,
2492 .used_max_regions = 1,
2494 .used_flood_tables = 1,
2495 .used_flood_mode = 1,
2497 .max_fid_offset_flood_tables = 2,
2498 .fid_offset_flood_table_size = VLAN_N_VID - 1,
2499 .max_fid_flood_tables = 2,
2500 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
2501 .used_max_ib_mc = 1,
2508 .type = MLXSW_PORT_SWID_TYPE_ETH,
2513 static struct mlxsw_driver mlxsw_sp_driver = {
2514 .kind = MLXSW_DEVICE_KIND_SPECTRUM,
2515 .owner = THIS_MODULE,
2516 .priv_size = sizeof(struct mlxsw_sp),
2517 .init = mlxsw_sp_init,
2518 .fini = mlxsw_sp_fini,
2519 .port_split = mlxsw_sp_port_split,
2520 .port_unsplit = mlxsw_sp_port_unsplit,
2521 .sb_pool_get = mlxsw_sp_sb_pool_get,
2522 .sb_pool_set = mlxsw_sp_sb_pool_set,
2523 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
2524 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
2525 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
2526 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
2527 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
2528 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
2529 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
2530 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
2531 .txhdr_construct = mlxsw_sp_txhdr_construct,
2532 .txhdr_len = MLXSW_TXHDR_LEN,
2533 .profile = &mlxsw_sp_config_profile,
2537 mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2539 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2540 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2542 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2543 mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2545 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2549 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2553 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2555 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2556 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2557 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2558 mlxsw_sp_port->local_port);
2560 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2564 mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2566 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2567 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2569 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2570 mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2572 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2576 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2579 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2580 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2582 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2583 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2584 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2586 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2590 __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2592 int err, last_err = 0;
2595 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2596 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2605 __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2607 int err, last_err = 0;
2610 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2611 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2619 static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2621 if (!list_empty(&mlxsw_sp_port->vports_list))
2622 if (mlxsw_sp_port->lagged)
2623 return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2625 return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2627 if (mlxsw_sp_port->lagged)
2628 return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2630 return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2633 static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2635 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2636 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2638 if (mlxsw_sp_vport->lagged)
2639 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2642 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2645 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2647 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2650 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2651 struct net_device *br_dev)
2653 return !mlxsw_sp->master_bridge.dev ||
2654 mlxsw_sp->master_bridge.dev == br_dev;
2657 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2658 struct net_device *br_dev)
2660 mlxsw_sp->master_bridge.dev = br_dev;
2661 mlxsw_sp->master_bridge.ref_count++;
2664 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
2666 if (--mlxsw_sp->master_bridge.ref_count == 0)
2667 mlxsw_sp->master_bridge.dev = NULL;
2670 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2671 struct net_device *br_dev)
2673 struct net_device *dev = mlxsw_sp_port->dev;
2676 /* When port is not bridged untagged packets are tagged with
2677 * PVID=VID=1, thereby creating an implicit VLAN interface in
2678 * the device. Remove it and let bridge code take care of its
2681 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
2685 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
2687 mlxsw_sp_port->learning = 1;
2688 mlxsw_sp_port->learning_sync = 1;
2689 mlxsw_sp_port->uc_flood = 1;
2690 mlxsw_sp_port->bridged = 1;
2695 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2698 struct net_device *dev = mlxsw_sp_port->dev;
2700 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2701 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2703 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2705 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
2707 mlxsw_sp_port->learning = 0;
2708 mlxsw_sp_port->learning_sync = 0;
2709 mlxsw_sp_port->uc_flood = 0;
2710 mlxsw_sp_port->bridged = 0;
2712 /* Add implicit VLAN interface in the device, so that untagged
2713 * packets will be classified to the default vFID.
2715 mlxsw_sp_port_add_vid(dev, 0, 1);
2718 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2720 char sldr_pl[MLXSW_REG_SLDR_LEN];
2722 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2723 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2726 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2728 char sldr_pl[MLXSW_REG_SLDR_LEN];
2730 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2731 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2734 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2735 u16 lag_id, u8 port_index)
2737 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2738 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2740 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2741 lag_id, port_index);
2742 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2745 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2748 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2749 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2751 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2753 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2756 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2759 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2760 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2762 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2764 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2767 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2770 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2771 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2773 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2775 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2778 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2779 struct net_device *lag_dev,
2782 struct mlxsw_sp_upper *lag;
2783 int free_lag_id = -1;
2786 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2787 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2788 if (lag->ref_count) {
2789 if (lag->dev == lag_dev) {
2793 } else if (free_lag_id < 0) {
2797 if (free_lag_id < 0)
2799 *p_lag_id = free_lag_id;
2804 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2805 struct net_device *lag_dev,
2806 struct netdev_lag_upper_info *lag_upper_info)
2810 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2812 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2817 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2818 u16 lag_id, u8 *p_port_index)
2822 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2823 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2831 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2832 struct net_device *lag_dev)
2834 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2835 struct mlxsw_sp_upper *lag;
2840 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2843 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2844 if (!lag->ref_count) {
2845 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2851 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2854 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2856 goto err_col_port_add;
2857 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2859 goto err_col_port_enable;
2861 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2862 mlxsw_sp_port->local_port);
2863 mlxsw_sp_port->lag_id = lag_id;
2864 mlxsw_sp_port->lagged = 1;
2868 err_col_port_enable:
2869 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2871 if (!lag->ref_count)
2872 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2876 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2877 struct net_device *br_dev,
2880 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2881 struct net_device *lag_dev)
2883 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2884 struct mlxsw_sp_port *mlxsw_sp_vport;
2885 struct mlxsw_sp_upper *lag;
2886 u16 lag_id = mlxsw_sp_port->lag_id;
2888 if (!mlxsw_sp_port->lagged)
2890 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2891 WARN_ON(lag->ref_count == 0);
2893 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2894 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2896 /* In case we leave a LAG device that has bridges built on top,
2897 * then their teardown sequence is never issued and we need to
2898 * invoke the necessary cleanup routines ourselves.
2900 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2902 struct net_device *br_dev;
2904 if (!mlxsw_sp_vport->bridged)
2907 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
2908 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
2911 if (mlxsw_sp_port->bridged) {
2912 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2913 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2916 if (lag->ref_count == 1) {
2917 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2918 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2919 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2922 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2923 mlxsw_sp_port->local_port);
2924 mlxsw_sp_port->lagged = 0;
2928 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2931 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2932 char sldr_pl[MLXSW_REG_SLDR_LEN];
2934 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2935 mlxsw_sp_port->local_port);
2936 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2939 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2942 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2943 char sldr_pl[MLXSW_REG_SLDR_LEN];
2945 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2946 mlxsw_sp_port->local_port);
2947 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2950 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2951 bool lag_tx_enabled)
2954 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2955 mlxsw_sp_port->lag_id);
2957 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2958 mlxsw_sp_port->lag_id);
2961 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2962 struct netdev_lag_lower_state_info *info)
2964 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2967 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2968 struct net_device *vlan_dev)
2970 struct mlxsw_sp_port *mlxsw_sp_vport;
2971 u16 vid = vlan_dev_vlan_id(vlan_dev);
2973 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2974 if (WARN_ON(!mlxsw_sp_vport))
2977 mlxsw_sp_vport->dev = vlan_dev;
2982 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2983 struct net_device *vlan_dev)
2985 struct mlxsw_sp_port *mlxsw_sp_vport;
2986 u16 vid = vlan_dev_vlan_id(vlan_dev);
2988 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2989 if (WARN_ON(!mlxsw_sp_vport))
2992 /* When removing a VLAN device while still bridged we should first
2993 * remove it from the bridge, as we receive the bridge's notification
2994 * when the vPort is already gone.
2996 if (mlxsw_sp_vport->bridged) {
2997 struct net_device *br_dev;
2999 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
3000 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
3003 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3006 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3007 unsigned long event, void *ptr)
3009 struct netdev_notifier_changeupper_info *info;
3010 struct mlxsw_sp_port *mlxsw_sp_port;
3011 struct net_device *upper_dev;
3012 struct mlxsw_sp *mlxsw_sp;
3015 mlxsw_sp_port = netdev_priv(dev);
3016 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3020 case NETDEV_PRECHANGEUPPER:
3021 upper_dev = info->upper_dev;
3022 if (!is_vlan_dev(upper_dev) &&
3023 !netif_is_lag_master(upper_dev) &&
3024 !netif_is_bridge_master(upper_dev))
3028 /* HW limitation forbids to put ports to multiple bridges. */
3029 if (netif_is_bridge_master(upper_dev) &&
3030 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
3032 if (netif_is_lag_master(upper_dev) &&
3033 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
3036 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
3038 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
3039 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
3042 case NETDEV_CHANGEUPPER:
3043 upper_dev = info->upper_dev;
3044 if (is_vlan_dev(upper_dev)) {
3046 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
3049 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
3051 } else if (netif_is_bridge_master(upper_dev)) {
3053 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
3056 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, true);
3057 } else if (netif_is_lag_master(upper_dev)) {
3059 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
3062 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
3074 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
3075 unsigned long event, void *ptr)
3077 struct netdev_notifier_changelowerstate_info *info;
3078 struct mlxsw_sp_port *mlxsw_sp_port;
3081 mlxsw_sp_port = netdev_priv(dev);
3085 case NETDEV_CHANGELOWERSTATE:
3086 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
3087 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
3088 info->lower_state_info);
3090 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
3098 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
3099 unsigned long event, void *ptr)
3102 case NETDEV_PRECHANGEUPPER:
3103 case NETDEV_CHANGEUPPER:
3104 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
3105 case NETDEV_CHANGELOWERSTATE:
3106 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
3112 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
3113 unsigned long event, void *ptr)
3115 struct net_device *dev;
3116 struct list_head *iter;
3119 netdev_for_each_lower_dev(lag_dev, dev, iter) {
3120 if (mlxsw_sp_port_dev_check(dev)) {
3121 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3130 static struct mlxsw_sp_vfid *
3131 mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
3132 const struct net_device *br_dev)
3134 struct mlxsw_sp_vfid *vfid;
3136 list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
3137 if (vfid->br_dev == br_dev)
3144 static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
3146 return vfid - MLXSW_SP_VFID_PORT_MAX;
3149 static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
3151 return MLXSW_SP_VFID_PORT_MAX + br_vfid;
3154 static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
3156 return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
3157 MLXSW_SP_VFID_BR_MAX);
3160 static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
3161 struct net_device *br_dev)
3163 struct device *dev = mlxsw_sp->bus_info->dev;
3164 struct mlxsw_sp_vfid *f;
3168 vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
3169 if (vfid == MLXSW_SP_VFID_MAX) {
3170 dev_err(dev, "No available vFIDs\n");
3171 return ERR_PTR(-ERANGE);
3174 fid = mlxsw_sp_vfid_to_fid(vfid);
3175 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
3177 dev_err(dev, "Failed to create FID=%d\n", fid);
3178 return ERR_PTR(err);
3181 f = kzalloc(sizeof(*f), GFP_KERNEL);
3183 goto err_allocate_vfid;
3188 list_add(&f->list, &mlxsw_sp->br_vfids.list);
3189 set_bit(mlxsw_sp_vfid_to_br_vfid(vfid), mlxsw_sp->br_vfids.mapped);
3194 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3195 return ERR_PTR(-ENOMEM);
3198 static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
3199 struct mlxsw_sp_vfid *vfid)
3201 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
3202 u16 fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
3204 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
3205 list_del(&vfid->list);
3207 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
3212 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
3213 struct net_device *br_dev,
3216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3217 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3218 struct net_device *dev = mlxsw_sp_vport->dev;
3219 struct mlxsw_sp_vfid *vfid, *new_vfid;
3223 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3227 /* We need a vFID to go back to after leaving the bridge's vFID. */
3228 new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
3230 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
3231 if (IS_ERR(new_vfid)) {
3232 netdev_err(dev, "Failed to create vFID for VID=%d\n",
3238 /* Invalidate existing {Port, VID} to vFID mapping and create a new
3239 * one for the new vFID.
3241 fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
3242 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, false);
3244 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3246 goto err_vport_fid_unmap;
3249 new_fid = mlxsw_sp_vfid_to_fid(new_vfid->vfid);
3250 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, new_fid, true);
3252 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3254 goto err_vport_fid_map;
3257 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3259 netdev_err(dev, "Failed to disable learning\n");
3260 goto err_port_vid_learning_set;
3263 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false);
3265 netdev_err(dev, "Failed clear to clear flooding\n");
3266 goto err_vport_flood_set;
3269 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3270 MLXSW_REG_SPMS_STATE_FORWARDING);
3272 netdev_err(dev, "Failed to set STP state\n");
3273 goto err_port_stp_state_set;
3276 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
3277 netdev_err(dev, "Failed to flush FDB\n");
3279 /* Switch between the vFIDs and destroy the old one if needed. */
3280 new_vfid->nr_vports++;
3281 mlxsw_sp_vport->vport.vfid = new_vfid;
3283 if (!vfid->nr_vports)
3284 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3286 mlxsw_sp_vport->learning = 0;
3287 mlxsw_sp_vport->learning_sync = 0;
3288 mlxsw_sp_vport->uc_flood = 0;
3289 mlxsw_sp_vport->bridged = 0;
3293 err_port_stp_state_set:
3294 err_vport_flood_set:
3295 err_port_vid_learning_set:
3297 err_vport_fid_unmap:
3298 /* Rollback vFID only if new. */
3299 if (!new_vfid->nr_vports)
3300 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
3303 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3304 struct net_device *br_dev)
3306 struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
3307 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3308 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3309 struct net_device *dev = mlxsw_sp_vport->dev;
3310 struct mlxsw_sp_vfid *vfid;
3314 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3316 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
3318 netdev_err(dev, "Failed to create bridge vFID\n");
3319 return PTR_ERR(vfid);
3323 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true);
3325 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
3327 goto err_port_flood_set;
3330 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3332 netdev_err(dev, "Failed to enable learning\n");
3333 goto err_port_vid_learning_set;
3336 /* We need to invalidate existing {Port, VID} to vFID mapping and
3337 * create a new one for the bridge's vFID.
3339 old_fid = mlxsw_sp_vfid_to_fid(old_vfid->vfid);
3340 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, old_fid, false);
3342 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3344 goto err_vport_fid_unmap;
3347 fid = mlxsw_sp_vfid_to_fid(vfid->vfid);
3348 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, fid, true);
3350 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3352 goto err_vport_fid_map;
3355 /* Switch between the vFIDs and destroy the old one if needed. */
3357 mlxsw_sp_vport->vport.vfid = vfid;
3358 old_vfid->nr_vports--;
3359 if (!old_vfid->nr_vports)
3360 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
3362 mlxsw_sp_vport->learning = 1;
3363 mlxsw_sp_vport->learning_sync = 1;
3364 mlxsw_sp_vport->uc_flood = 1;
3365 mlxsw_sp_vport->bridged = 1;
3370 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, old_fid, true);
3371 err_vport_fid_unmap:
3372 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3373 err_port_vid_learning_set:
3374 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false);
3376 if (!vfid->nr_vports)
3377 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3382 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3383 const struct net_device *br_dev)
3385 struct mlxsw_sp_port *mlxsw_sp_vport;
3387 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3389 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
3396 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3397 unsigned long event, void *ptr,
3400 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3401 struct netdev_notifier_changeupper_info *info = ptr;
3402 struct mlxsw_sp_port *mlxsw_sp_vport;
3403 struct net_device *upper_dev;
3406 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3409 case NETDEV_PRECHANGEUPPER:
3410 upper_dev = info->upper_dev;
3411 if (!netif_is_bridge_master(upper_dev))
3415 /* We can't have multiple VLAN interfaces configured on
3416 * the same port and being members in the same bridge.
3418 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3422 case NETDEV_CHANGEUPPER:
3423 upper_dev = info->upper_dev;
3424 if (info->linking) {
3425 if (WARN_ON(!mlxsw_sp_vport))
3427 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3430 /* We ignore bridge's unlinking notifications if vPort
3431 * is gone, since we already left the bridge when the
3432 * VLAN device was unlinked from the real device.
3434 if (!mlxsw_sp_vport)
3436 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, upper_dev,
3444 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3445 unsigned long event, void *ptr,
3448 struct net_device *dev;
3449 struct list_head *iter;
3452 netdev_for_each_lower_dev(lag_dev, dev, iter) {
3453 if (mlxsw_sp_port_dev_check(dev)) {
3454 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3464 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3465 unsigned long event, void *ptr)
3467 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3468 u16 vid = vlan_dev_vlan_id(vlan_dev);
3470 if (mlxsw_sp_port_dev_check(real_dev))
3471 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3473 else if (netif_is_lag_master(real_dev))
3474 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3480 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3481 unsigned long event, void *ptr)
3483 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3486 if (mlxsw_sp_port_dev_check(dev))
3487 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
3488 else if (netif_is_lag_master(dev))
3489 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3490 else if (is_vlan_dev(dev))
3491 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3493 return notifier_from_errno(err);
3496 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3497 .notifier_call = mlxsw_sp_netdevice_event,
3500 static int __init mlxsw_sp_module_init(void)
3504 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3505 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3507 goto err_core_driver_register;
3510 err_core_driver_register:
3511 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3515 static void __exit mlxsw_sp_module_exit(void)
3517 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3518 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3521 module_init(mlxsw_sp_module_init);
3522 module_exit(mlxsw_sp_module_exit);
3524 MODULE_LICENSE("Dual BSD/GPL");
3525 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3526 MODULE_DESCRIPTION("Mellanox Spectrum driver");
3527 MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);