2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <net/switchdev.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59 #include <net/netevent.h>
60 #include <net/tc_act/tc_sample.h>
69 #include "spectrum_cnt.h"
71 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
72 static const char mlxsw_sp_driver_version[] = "1.0";
78 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
81 * Packet control type.
82 * 0 - Ethernet control (e.g. EMADs, LACP)
85 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
88 * Packet protocol type. Must be set to 1 (Ethernet).
90 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
92 /* tx_hdr_rx_is_router
93 * Packet is sent from the router. Valid for data packets only.
95 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
98 * Indicates if the 'fid' field is valid and should be used for
99 * forwarding lookup. Valid for data packets only.
101 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
104 * Switch partition ID. Must be set to 0.
106 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
108 /* tx_hdr_control_tclass
109 * Indicates if the packet should use the control TClass and not one
110 * of the data TClasses.
112 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
115 * Egress TClass to be used on the egress device on the egress port.
117 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
120 * Destination local port for unicast packets.
121 * Destination multicast ID for multicast packets.
123 * Control packets are directed to a specific egress port, while data
124 * packets are transmitted through the CPU port (0) into the switch partition,
125 * where forwarding rules are applied.
127 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
130 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
131 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
132 * Valid for data packets only.
134 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
138 * 6 - Control packets
140 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
142 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
143 unsigned int counter_index, u64 *packets,
146 char mgpc_pl[MLXSW_REG_MGPC_LEN];
149 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
150 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
151 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
154 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
155 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
159 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
160 unsigned int counter_index)
162 char mgpc_pl[MLXSW_REG_MGPC_LEN];
164 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
165 MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
166 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
169 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
170 unsigned int *p_counter_index)
174 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
178 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
180 goto err_counter_clear;
184 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
189 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
190 unsigned int counter_index)
192 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
196 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
197 const struct mlxsw_tx_info *tx_info)
199 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
201 memset(txhdr, 0, MLXSW_TXHDR_LEN);
203 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
204 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
205 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
206 mlxsw_tx_hdr_swid_set(txhdr, 0);
207 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
208 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
209 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
212 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
214 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
220 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
224 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
228 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
231 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
233 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
234 sizeof(struct mlxsw_sp_span_entry),
236 if (!mlxsw_sp->span.entries)
239 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
240 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
245 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
249 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
250 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
252 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
254 kfree(mlxsw_sp->span.entries);
257 static struct mlxsw_sp_span_entry *
258 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
260 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
261 struct mlxsw_sp_span_entry *span_entry;
262 char mpat_pl[MLXSW_REG_MPAT_LEN];
263 u8 local_port = port->local_port;
268 /* find a free entry to use */
270 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
271 if (!mlxsw_sp->span.entries[i].used) {
273 span_entry = &mlxsw_sp->span.entries[i];
280 /* create a new port analayzer entry for local_port */
281 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
282 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
286 span_entry->used = true;
287 span_entry->id = index;
288 span_entry->ref_count = 1;
289 span_entry->local_port = local_port;
293 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
294 struct mlxsw_sp_span_entry *span_entry)
296 u8 local_port = span_entry->local_port;
297 char mpat_pl[MLXSW_REG_MPAT_LEN];
298 int pa_id = span_entry->id;
300 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
301 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
302 span_entry->used = false;
305 static struct mlxsw_sp_span_entry *
306 mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
308 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
311 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
312 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
314 if (curr->used && curr->local_port == port->local_port)
320 static struct mlxsw_sp_span_entry
321 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
323 struct mlxsw_sp_span_entry *span_entry;
325 span_entry = mlxsw_sp_span_entry_find(port);
327 /* Already exists, just take a reference */
328 span_entry->ref_count++;
332 return mlxsw_sp_span_entry_create(port);
335 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
336 struct mlxsw_sp_span_entry *span_entry)
338 WARN_ON(!span_entry->ref_count);
339 if (--span_entry->ref_count == 0)
340 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
344 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
346 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
347 struct mlxsw_sp_span_inspected_port *p;
350 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
351 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
353 list_for_each_entry(p, &curr->bound_ports_list, list)
354 if (p->local_port == port->local_port &&
355 p->type == MLXSW_SP_SPAN_EGRESS)
362 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
365 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
368 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
370 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
371 char sbib_pl[MLXSW_REG_SBIB_LEN];
374 /* If port is egress mirrored, the shared buffer size should be
375 * updated according to the mtu value
377 if (mlxsw_sp_span_is_egress_mirror(port)) {
378 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
380 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
381 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
383 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
391 static struct mlxsw_sp_span_inspected_port *
392 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
393 struct mlxsw_sp_span_entry *span_entry)
395 struct mlxsw_sp_span_inspected_port *p;
397 list_for_each_entry(p, &span_entry->bound_ports_list, list)
398 if (port->local_port == p->local_port)
404 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
405 struct mlxsw_sp_span_entry *span_entry,
406 enum mlxsw_sp_span_type type)
408 struct mlxsw_sp_span_inspected_port *inspected_port;
409 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
410 char mpar_pl[MLXSW_REG_MPAR_LEN];
411 char sbib_pl[MLXSW_REG_SBIB_LEN];
412 int pa_id = span_entry->id;
415 /* if it is an egress SPAN, bind a shared buffer to it */
416 if (type == MLXSW_SP_SPAN_EGRESS) {
417 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
420 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
421 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
423 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
428 /* bind the port to the SPAN entry */
429 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
430 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
431 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
433 goto err_mpar_reg_write;
435 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
436 if (!inspected_port) {
438 goto err_inspected_port_alloc;
440 inspected_port->local_port = port->local_port;
441 inspected_port->type = type;
442 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
447 err_inspected_port_alloc:
448 if (type == MLXSW_SP_SPAN_EGRESS) {
449 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
450 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
456 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
457 struct mlxsw_sp_span_entry *span_entry,
458 enum mlxsw_sp_span_type type)
460 struct mlxsw_sp_span_inspected_port *inspected_port;
461 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
462 char mpar_pl[MLXSW_REG_MPAR_LEN];
463 char sbib_pl[MLXSW_REG_SBIB_LEN];
464 int pa_id = span_entry->id;
466 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
470 /* remove the inspected port */
471 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
472 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
473 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
475 /* remove the SBIB buffer if it was egress SPAN */
476 if (type == MLXSW_SP_SPAN_EGRESS) {
477 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
478 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
481 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
483 list_del(&inspected_port->list);
484 kfree(inspected_port);
487 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
488 struct mlxsw_sp_port *to,
489 enum mlxsw_sp_span_type type)
491 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
492 struct mlxsw_sp_span_entry *span_entry;
495 span_entry = mlxsw_sp_span_entry_get(to);
499 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
502 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
509 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
513 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
514 struct mlxsw_sp_port *to,
515 enum mlxsw_sp_span_type type)
517 struct mlxsw_sp_span_entry *span_entry;
519 span_entry = mlxsw_sp_span_entry_find(to);
521 netdev_err(from->dev, "no span entry found\n");
525 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
527 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
530 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
531 bool enable, u32 rate)
533 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
534 char mpsc_pl[MLXSW_REG_MPSC_LEN];
536 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
537 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
540 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
543 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
544 char paos_pl[MLXSW_REG_PAOS_LEN];
546 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
547 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
548 MLXSW_PORT_ADMIN_STATUS_DOWN);
549 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
552 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
555 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
556 char ppad_pl[MLXSW_REG_PPAD_LEN];
558 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
559 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
560 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
563 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
565 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
566 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
568 ether_addr_copy(addr, mlxsw_sp->base_mac);
569 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
570 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
573 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
575 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
576 char pmtu_pl[MLXSW_REG_PMTU_LEN];
580 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
581 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
582 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
585 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
590 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
591 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
594 static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
597 char pspa_pl[MLXSW_REG_PSPA_LEN];
599 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
600 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
603 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
605 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
607 return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
611 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
614 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
615 char svpe_pl[MLXSW_REG_SVPE_LEN];
617 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
618 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
621 int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
622 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
625 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
626 char svfa_pl[MLXSW_REG_SVFA_LEN];
628 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
630 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
633 int __mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
634 u16 vid_begin, u16 vid_end,
637 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
641 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
644 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid_begin,
645 vid_end, learn_enable);
646 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
651 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
652 u16 vid, bool learn_enable)
654 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
659 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
661 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
662 char sspr_pl[MLXSW_REG_SSPR_LEN];
664 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
665 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
668 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
669 u8 local_port, u8 *p_module,
670 u8 *p_width, u8 *p_lane)
672 char pmlp_pl[MLXSW_REG_PMLP_LEN];
675 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
676 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
679 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
680 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
681 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
685 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
686 u8 module, u8 width, u8 lane)
688 char pmlp_pl[MLXSW_REG_PMLP_LEN];
691 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
692 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
693 for (i = 0; i < width; i++) {
694 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
695 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
698 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
701 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
703 char pmlp_pl[MLXSW_REG_PMLP_LEN];
705 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
706 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
707 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
710 static int mlxsw_sp_port_open(struct net_device *dev)
712 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
715 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
718 netif_start_queue(dev);
722 static int mlxsw_sp_port_stop(struct net_device *dev)
724 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
726 netif_stop_queue(dev);
727 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
730 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
731 struct net_device *dev)
733 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
734 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
735 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
736 const struct mlxsw_tx_info tx_info = {
737 .local_port = mlxsw_sp_port->local_port,
743 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
744 return NETDEV_TX_BUSY;
746 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
747 struct sk_buff *skb_orig = skb;
749 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
751 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
752 dev_kfree_skb_any(skb_orig);
755 dev_consume_skb_any(skb_orig);
758 if (eth_skb_pad(skb)) {
759 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
763 mlxsw_sp_txhdr_construct(skb, &tx_info);
764 /* TX header is consumed by HW on the way so we shouldn't count its
765 * bytes as being sent.
767 len = skb->len - MLXSW_TXHDR_LEN;
769 /* Due to a race we might fail here because of a full queue. In that
770 * unlikely case we simply drop the packet.
772 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
775 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
776 u64_stats_update_begin(&pcpu_stats->syncp);
777 pcpu_stats->tx_packets++;
778 pcpu_stats->tx_bytes += len;
779 u64_stats_update_end(&pcpu_stats->syncp);
781 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
782 dev_kfree_skb_any(skb);
787 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
791 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
793 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
794 struct sockaddr *addr = p;
797 if (!is_valid_ether_addr(addr->sa_data))
798 return -EADDRNOTAVAIL;
800 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
803 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
807 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
810 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
813 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
815 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
818 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
820 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
824 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
825 * Assumes 100m cable and maximum MTU.
827 #define MLXSW_SP_PAUSE_DELAY 58752
829 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
830 u16 delay, bool pfc, bool pause)
833 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
835 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
840 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
844 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
846 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
850 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
851 u8 *prio_tc, bool pause_en,
852 struct ieee_pfc *my_pfc)
854 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
855 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
856 u16 delay = !!my_pfc ? my_pfc->delay : 0;
857 char pbmc_pl[MLXSW_REG_PBMC_LEN];
860 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
861 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
865 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
866 bool configure = false;
871 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
872 if (prio_tc[j] == i) {
873 pfc = pfc_en & BIT(j);
882 lossy = !(pfc || pause_en);
883 thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
884 delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
886 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
889 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
892 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
893 int mtu, bool pause_en)
895 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
896 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
897 struct ieee_pfc *my_pfc;
900 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
901 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
903 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
907 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
909 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
910 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
913 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
916 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
918 goto err_span_port_mtu_update;
919 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
921 goto err_port_mtu_set;
926 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
927 err_span_port_mtu_update:
928 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
933 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
934 struct rtnl_link_stats64 *stats)
936 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
937 struct mlxsw_sp_port_pcpu_stats *p;
938 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
943 for_each_possible_cpu(i) {
944 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
946 start = u64_stats_fetch_begin_irq(&p->syncp);
947 rx_packets = p->rx_packets;
948 rx_bytes = p->rx_bytes;
949 tx_packets = p->tx_packets;
950 tx_bytes = p->tx_bytes;
951 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
953 stats->rx_packets += rx_packets;
954 stats->rx_bytes += rx_bytes;
955 stats->tx_packets += tx_packets;
956 stats->tx_bytes += tx_bytes;
957 /* tx_dropped is u32, updated without syncp protection. */
958 tx_dropped += p->tx_dropped;
960 stats->tx_dropped = tx_dropped;
964 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
967 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
974 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
978 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
979 return mlxsw_sp_port_get_sw_stats64(dev, sp);
985 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
986 int prio, char *ppcnt_pl)
988 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
989 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
991 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
992 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
995 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
996 struct rtnl_link_stats64 *stats)
998 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1001 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1007 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1009 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1011 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1013 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1015 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1017 stats->rx_crc_errors =
1018 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1019 stats->rx_frame_errors =
1020 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1022 stats->rx_length_errors = (
1023 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1024 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1025 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1027 stats->rx_errors = (stats->rx_crc_errors +
1028 stats->rx_frame_errors + stats->rx_length_errors);
1034 static void update_stats_cache(struct work_struct *work)
1036 struct mlxsw_sp_port *mlxsw_sp_port =
1037 container_of(work, struct mlxsw_sp_port,
1038 hw_stats.update_dw.work);
1040 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1043 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1044 mlxsw_sp_port->hw_stats.cache);
1047 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1048 MLXSW_HW_STATS_UPDATE_TIME);
1051 /* Return the stats from a cache that is updated periodically,
1052 * as this function might get called in an atomic context.
1055 mlxsw_sp_port_get_stats64(struct net_device *dev,
1056 struct rtnl_link_stats64 *stats)
1058 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1060 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
1063 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1064 u16 vid_end, bool is_member, bool untagged)
1066 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1070 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1074 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1075 vid_end, is_member, untagged);
1076 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1081 static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1083 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1084 u16 vid, last_visited_vid;
1087 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1088 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
1091 last_visited_vid = vid;
1092 goto err_port_vid_to_fid_set;
1096 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
1098 last_visited_vid = VLAN_N_VID;
1099 goto err_port_vid_to_fid_set;
1104 err_port_vid_to_fid_set:
1105 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
1106 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
1111 static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
1113 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
1117 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
1121 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1122 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
1131 static struct mlxsw_sp_port *
1132 mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1134 struct mlxsw_sp_port *mlxsw_sp_vport;
1136 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
1137 if (!mlxsw_sp_vport)
1140 /* dev will be set correctly after the VLAN device is linked
1141 * with the real device. In case of bridge SELF invocation, dev
1142 * will remain as is.
1144 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
1145 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1146 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
1147 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
1148 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
1149 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
1150 mlxsw_sp_vport->vport.vid = vid;
1152 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
1154 return mlxsw_sp_vport;
1157 static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
1159 list_del(&mlxsw_sp_vport->vport.list);
1160 kfree(mlxsw_sp_vport);
1163 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1164 __be16 __always_unused proto, u16 vid)
1166 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1167 struct mlxsw_sp_port *mlxsw_sp_vport;
1168 bool untagged = vid == 1;
1171 /* VLAN 0 is added to HW filter when device goes up, but it is
1172 * reserved in our case, so simply return.
1177 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
1180 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
1181 if (!mlxsw_sp_vport)
1184 /* When adding the first VLAN interface on a bridged port we need to
1185 * transition all the active 802.1Q bridge VLANs to use explicit
1186 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
1188 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
1189 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
1191 goto err_port_vp_mode_trans;
1194 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
1196 goto err_port_add_vid;
1201 if (list_is_singular(&mlxsw_sp_port->vports_list))
1202 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1203 err_port_vp_mode_trans:
1204 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1208 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1209 __be16 __always_unused proto, u16 vid)
1211 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1212 struct mlxsw_sp_port *mlxsw_sp_vport;
1213 struct mlxsw_sp_fid *f;
1215 /* VLAN 0 is removed from HW filter when device goes down, but
1216 * it is reserved in our case, so simply return.
1221 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
1222 if (WARN_ON(!mlxsw_sp_vport))
1225 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
1227 /* Drop FID reference. If this was the last reference the
1228 * resources will be freed.
1230 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
1231 if (f && !WARN_ON(!f->leave))
1232 f->leave(mlxsw_sp_vport);
1234 /* When removing the last VLAN interface on a bridged port we need to
1235 * transition all active 802.1Q bridge VLANs to use VID to FID
1236 * mappings and set port's mode to VLAN mode.
1238 if (list_is_singular(&mlxsw_sp_port->vports_list))
1239 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
1241 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
1246 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1249 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1250 u8 module = mlxsw_sp_port->mapping.module;
1251 u8 width = mlxsw_sp_port->mapping.width;
1252 u8 lane = mlxsw_sp_port->mapping.lane;
1255 if (!mlxsw_sp_port->split)
1256 err = snprintf(name, len, "p%d", module + 1);
1258 err = snprintf(name, len, "p%ds%d", module + 1,
1267 static struct mlxsw_sp_port_mall_tc_entry *
1268 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1269 unsigned long cookie) {
1270 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1272 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1273 if (mall_tc_entry->cookie == cookie)
1274 return mall_tc_entry;
1280 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1281 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1282 const struct tc_action *a,
1285 struct net *net = dev_net(mlxsw_sp_port->dev);
1286 enum mlxsw_sp_span_type span_type;
1287 struct mlxsw_sp_port *to_port;
1288 struct net_device *to_dev;
1291 ifindex = tcf_mirred_ifindex(a);
1292 to_dev = __dev_get_by_index(net, ifindex);
1294 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1298 if (!mlxsw_sp_port_dev_check(to_dev)) {
1299 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
1302 to_port = netdev_priv(to_dev);
1304 mirror->to_local_port = to_port->local_port;
1305 mirror->ingress = ingress;
1306 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1307 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1311 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1312 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1314 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1315 enum mlxsw_sp_span_type span_type;
1316 struct mlxsw_sp_port *to_port;
1318 to_port = mlxsw_sp->ports[mirror->to_local_port];
1319 span_type = mirror->ingress ?
1320 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1321 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
1325 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1326 struct tc_cls_matchall_offload *cls,
1327 const struct tc_action *a,
1332 if (!mlxsw_sp_port->sample)
1334 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1335 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1338 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1339 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1343 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1344 tcf_sample_psample_group(a));
1345 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1346 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1347 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1349 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1351 goto err_port_sample_set;
1354 err_port_sample_set:
1355 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1360 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1362 if (!mlxsw_sp_port->sample)
1365 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1366 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1369 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1371 struct tc_cls_matchall_offload *cls,
1374 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1375 const struct tc_action *a;
1379 if (!tc_single_action(cls->exts)) {
1380 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1384 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1387 mall_tc_entry->cookie = cls->cookie;
1389 tcf_exts_to_list(cls->exts, &actions);
1390 a = list_first_entry(&actions, struct tc_action, list);
1392 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1393 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1395 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1396 mirror = &mall_tc_entry->mirror;
1397 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1398 mirror, a, ingress);
1399 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1400 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1401 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
1408 goto err_add_action;
1410 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1414 kfree(mall_tc_entry);
1418 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1419 struct tc_cls_matchall_offload *cls)
1421 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1423 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1425 if (!mall_tc_entry) {
1426 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1429 list_del(&mall_tc_entry->list);
1431 switch (mall_tc_entry->type) {
1432 case MLXSW_SP_PORT_MALL_MIRROR:
1433 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1434 &mall_tc_entry->mirror);
1436 case MLXSW_SP_PORT_MALL_SAMPLE:
1437 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1443 kfree(mall_tc_entry);
1446 static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
1447 __be16 proto, struct tc_to_netdev *tc)
1449 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1450 bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
1453 case TC_SETUP_MATCHALL:
1454 switch (tc->cls_mall->command) {
1455 case TC_CLSMATCHALL_REPLACE:
1456 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
1460 case TC_CLSMATCHALL_DESTROY:
1461 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
1467 case TC_SETUP_CLSFLOWER:
1468 switch (tc->cls_flower->command) {
1469 case TC_CLSFLOWER_REPLACE:
1470 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
1471 proto, tc->cls_flower);
1472 case TC_CLSFLOWER_DESTROY:
1473 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
1476 case TC_CLSFLOWER_STATS:
1477 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
1487 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1488 .ndo_open = mlxsw_sp_port_open,
1489 .ndo_stop = mlxsw_sp_port_stop,
1490 .ndo_start_xmit = mlxsw_sp_port_xmit,
1491 .ndo_setup_tc = mlxsw_sp_setup_tc,
1492 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1493 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1494 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1495 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1496 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1497 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1498 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1499 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1500 .ndo_fdb_add = switchdev_port_fdb_add,
1501 .ndo_fdb_del = switchdev_port_fdb_del,
1502 .ndo_fdb_dump = switchdev_port_fdb_dump,
1503 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
1504 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
1505 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
1506 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
1509 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1510 struct ethtool_drvinfo *drvinfo)
1512 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1513 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1515 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1516 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1517 sizeof(drvinfo->version));
1518 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1520 mlxsw_sp->bus_info->fw_rev.major,
1521 mlxsw_sp->bus_info->fw_rev.minor,
1522 mlxsw_sp->bus_info->fw_rev.subminor);
1523 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1524 sizeof(drvinfo->bus_info));
1527 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1528 struct ethtool_pauseparam *pause)
1530 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1532 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1533 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1536 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1537 struct ethtool_pauseparam *pause)
1539 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1541 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1542 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1543 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1545 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1549 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1550 struct ethtool_pauseparam *pause)
1552 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1553 bool pause_en = pause->tx_pause || pause->rx_pause;
1556 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1557 netdev_err(dev, "PFC already enabled on port\n");
1561 if (pause->autoneg) {
1562 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1566 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1568 netdev_err(dev, "Failed to configure port's headroom\n");
1572 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1574 netdev_err(dev, "Failed to set PAUSE parameters\n");
1575 goto err_port_pause_configure;
1578 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1579 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1583 err_port_pause_configure:
1584 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1585 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1589 struct mlxsw_sp_port_hw_stats {
1590 char str[ETH_GSTRING_LEN];
1591 u64 (*getter)(const char *payload);
1595 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1597 .str = "a_frames_transmitted_ok",
1598 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1601 .str = "a_frames_received_ok",
1602 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1605 .str = "a_frame_check_sequence_errors",
1606 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1609 .str = "a_alignment_errors",
1610 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1613 .str = "a_octets_transmitted_ok",
1614 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1617 .str = "a_octets_received_ok",
1618 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1621 .str = "a_multicast_frames_xmitted_ok",
1622 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1625 .str = "a_broadcast_frames_xmitted_ok",
1626 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1629 .str = "a_multicast_frames_received_ok",
1630 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1633 .str = "a_broadcast_frames_received_ok",
1634 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1637 .str = "a_in_range_length_errors",
1638 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1641 .str = "a_out_of_range_length_field",
1642 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1645 .str = "a_frame_too_long_errors",
1646 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1649 .str = "a_symbol_error_during_carrier",
1650 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1653 .str = "a_mac_control_frames_transmitted",
1654 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1657 .str = "a_mac_control_frames_received",
1658 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1661 .str = "a_unsupported_opcodes_received",
1662 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1665 .str = "a_pause_mac_ctrl_frames_received",
1666 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1669 .str = "a_pause_mac_ctrl_frames_xmitted",
1670 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1674 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1676 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1678 .str = "rx_octets_prio",
1679 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1682 .str = "rx_frames_prio",
1683 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1686 .str = "tx_octets_prio",
1687 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1690 .str = "tx_frames_prio",
1691 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1694 .str = "rx_pause_prio",
1695 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1698 .str = "rx_pause_duration_prio",
1699 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1702 .str = "tx_pause_prio",
1703 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1706 .str = "tx_pause_duration_prio",
1707 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1711 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1713 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1715 .str = "tc_transmit_queue_tc",
1716 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
1717 .cells_bytes = true,
1720 .str = "tc_no_buffer_discard_uc_tc",
1721 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
1725 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
1727 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
1728 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
1729 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
1730 IEEE_8021QAZ_MAX_TCS)
1732 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
1736 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
1737 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1738 mlxsw_sp_port_hw_prio_stats[i].str, prio);
1739 *p += ETH_GSTRING_LEN;
1743 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
1747 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
1748 snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
1749 mlxsw_sp_port_hw_tc_stats[i].str, tc);
1750 *p += ETH_GSTRING_LEN;
1754 static void mlxsw_sp_port_get_strings(struct net_device *dev,
1755 u32 stringset, u8 *data)
1760 switch (stringset) {
1762 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1763 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1765 p += ETH_GSTRING_LEN;
1768 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1769 mlxsw_sp_port_get_prio_strings(&p, i);
1771 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1772 mlxsw_sp_port_get_tc_strings(&p, i);
1778 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1779 enum ethtool_phys_id_state state)
1781 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1782 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1783 char mlcr_pl[MLXSW_REG_MLCR_LEN];
1787 case ETHTOOL_ID_ACTIVE:
1790 case ETHTOOL_ID_INACTIVE:
1797 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1798 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1802 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
1803 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
1806 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
1807 *p_hw_stats = mlxsw_sp_port_hw_stats;
1808 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
1810 case MLXSW_REG_PPCNT_PRIO_CNT:
1811 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
1812 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1814 case MLXSW_REG_PPCNT_TC_CNT:
1815 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
1816 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
1825 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
1826 enum mlxsw_reg_ppcnt_grp grp, int prio,
1827 u64 *data, int data_index)
1829 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1830 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1831 struct mlxsw_sp_port_hw_stats *hw_stats;
1832 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1836 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
1839 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
1840 for (i = 0; i < len; i++) {
1841 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
1842 if (!hw_stats[i].cells_bytes)
1844 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
1845 data[data_index + i]);
1849 static void mlxsw_sp_port_get_stats(struct net_device *dev,
1850 struct ethtool_stats *stats, u64 *data)
1852 int i, data_index = 0;
1854 /* IEEE 802.3 Counters */
1855 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
1857 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
1859 /* Per-Priority Counters */
1860 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1861 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
1863 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
1866 /* Per-TC Counters */
1867 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1868 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
1870 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
1874 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1878 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
1884 struct mlxsw_sp_port_link_mode {
1885 enum ethtool_link_mode_bit_indices mask_ethtool;
1890 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1892 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1893 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1897 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1898 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1899 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1900 .speed = SPEED_1000,
1903 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1904 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
1905 .speed = SPEED_10000,
1908 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1909 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1910 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
1911 .speed = SPEED_10000,
1914 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1915 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1916 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1917 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1918 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1919 .speed = SPEED_10000,
1922 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1923 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
1924 .speed = SPEED_20000,
1927 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1928 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1929 .speed = SPEED_40000,
1932 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1933 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1934 .speed = SPEED_40000,
1937 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1938 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1939 .speed = SPEED_40000,
1942 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1943 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1944 .speed = SPEED_40000,
1947 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
1948 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1949 .speed = SPEED_25000,
1952 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
1953 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1954 .speed = SPEED_25000,
1957 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1958 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1959 .speed = SPEED_25000,
1962 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1963 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1964 .speed = SPEED_25000,
1967 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
1968 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1969 .speed = SPEED_50000,
1972 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1973 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1974 .speed = SPEED_50000,
1977 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
1978 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1979 .speed = SPEED_50000,
1982 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1983 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
1984 .speed = SPEED_56000,
1987 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1988 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
1989 .speed = SPEED_56000,
1992 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1993 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
1994 .speed = SPEED_56000,
1997 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1998 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
1999 .speed = SPEED_56000,
2002 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2003 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2004 .speed = SPEED_100000,
2007 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2008 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2009 .speed = SPEED_100000,
2012 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2013 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2014 .speed = SPEED_100000,
2017 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2018 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2019 .speed = SPEED_100000,
2023 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2026 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2027 struct ethtool_link_ksettings *cmd)
2029 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2030 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2031 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2032 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2033 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2034 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2035 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2037 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2038 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2039 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2040 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2041 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2042 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2045 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2049 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2050 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2051 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2056 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2057 struct ethtool_link_ksettings *cmd)
2059 u32 speed = SPEED_UNKNOWN;
2060 u8 duplex = DUPLEX_UNKNOWN;
2066 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2067 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2068 speed = mlxsw_sp_port_link_mode[i].speed;
2069 duplex = DUPLEX_FULL;
2074 cmd->base.speed = speed;
2075 cmd->base.duplex = duplex;
2078 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2080 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2081 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2082 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2083 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2086 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2087 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2088 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2091 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2092 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2093 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2094 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2101 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2106 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2107 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2108 cmd->link_modes.advertising))
2109 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2114 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2119 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2120 if (speed == mlxsw_sp_port_link_mode[i].speed)
2121 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2126 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2131 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2132 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2133 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2138 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2139 struct ethtool_link_ksettings *cmd)
2141 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2142 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2143 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2145 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2146 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2149 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2150 struct ethtool_link_ksettings *cmd)
2155 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2156 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2160 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2161 struct ethtool_link_ksettings *cmd)
2163 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2166 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2167 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2170 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2171 struct ethtool_link_ksettings *cmd)
2173 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2174 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2175 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2176 char ptys_pl[MLXSW_REG_PTYS_LEN];
2181 autoneg = mlxsw_sp_port->link.autoneg;
2182 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2183 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2186 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin,
2189 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2191 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2193 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2194 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2195 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2197 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2198 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2199 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2206 mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2207 const struct ethtool_link_ksettings *cmd)
2209 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2210 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2211 char ptys_pl[MLXSW_REG_PTYS_LEN];
2212 u32 eth_proto_cap, eth_proto_new;
2216 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2217 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2220 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL);
2222 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2223 eth_proto_new = autoneg ?
2224 mlxsw_sp_to_ptys_advert_link(cmd) :
2225 mlxsw_sp_to_ptys_speed(cmd->base.speed);
2227 eth_proto_new = eth_proto_new & eth_proto_cap;
2228 if (!eth_proto_new) {
2229 netdev_err(dev, "No supported speed requested\n");
2233 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2235 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2239 if (!netif_running(dev))
2242 mlxsw_sp_port->link.autoneg = autoneg;
2244 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2245 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2250 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2251 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2252 .get_link = ethtool_op_get_link,
2253 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2254 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
2255 .get_strings = mlxsw_sp_port_get_strings,
2256 .set_phys_id = mlxsw_sp_port_set_phys_id,
2257 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2258 .get_sset_count = mlxsw_sp_port_get_sset_count,
2259 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2260 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
2264 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2266 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2267 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2268 char ptys_pl[MLXSW_REG_PTYS_LEN];
2269 u32 eth_proto_admin;
2271 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2272 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2274 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2277 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2278 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2279 bool dwrr, u8 dwrr_weight)
2281 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2282 char qeec_pl[MLXSW_REG_QEEC_LEN];
2284 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2286 mlxsw_reg_qeec_de_set(qeec_pl, true);
2287 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2288 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2289 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2292 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2293 enum mlxsw_reg_qeec_hr hr, u8 index,
2294 u8 next_index, u32 maxrate)
2296 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2297 char qeec_pl[MLXSW_REG_QEEC_LEN];
2299 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2301 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2302 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2303 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2306 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2307 u8 switch_prio, u8 tclass)
2309 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2310 char qtct_pl[MLXSW_REG_QTCT_LEN];
2312 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2314 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2317 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2321 /* Setup the elements hierarcy, so that each TC is linked to
2322 * one subgroup, which are all member in the same group.
2324 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2325 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2329 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2330 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2331 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2336 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2337 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2338 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2344 /* Make sure the max shaper is disabled in all hierarcies that
2347 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2348 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2349 MLXSW_REG_QEEC_MAS_DIS);
2352 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2353 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2354 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2356 MLXSW_REG_QEEC_MAS_DIS);
2360 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2361 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2362 MLXSW_REG_QEEC_HIERARCY_TC,
2364 MLXSW_REG_QEEC_MAS_DIS);
2369 /* Map all priorities to traffic class 0. */
2370 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2371 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2379 static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
2381 mlxsw_sp_port->pvid = 1;
2383 return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
2386 static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
2388 return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
2391 static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2392 bool split, u8 module, u8 width, u8 lane)
2394 struct mlxsw_sp_port *mlxsw_sp_port;
2395 struct net_device *dev;
2399 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2402 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2403 mlxsw_sp_port = netdev_priv(dev);
2404 mlxsw_sp_port->dev = dev;
2405 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2406 mlxsw_sp_port->local_port = local_port;
2407 mlxsw_sp_port->split = split;
2408 mlxsw_sp_port->mapping.module = module;
2409 mlxsw_sp_port->mapping.width = width;
2410 mlxsw_sp_port->mapping.lane = lane;
2411 mlxsw_sp_port->link.autoneg = 1;
2412 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
2413 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
2414 if (!mlxsw_sp_port->active_vlans) {
2416 goto err_port_active_vlans_alloc;
2418 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
2419 if (!mlxsw_sp_port->untagged_vlans) {
2421 goto err_port_untagged_vlans_alloc;
2423 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
2424 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2426 mlxsw_sp_port->pcpu_stats =
2427 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2428 if (!mlxsw_sp_port->pcpu_stats) {
2430 goto err_alloc_stats;
2433 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2435 if (!mlxsw_sp_port->sample) {
2437 goto err_alloc_sample;
2440 mlxsw_sp_port->hw_stats.cache =
2441 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2443 if (!mlxsw_sp_port->hw_stats.cache) {
2445 goto err_alloc_hw_stats;
2447 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2448 &update_stats_cache);
2450 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2451 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2453 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2455 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2456 mlxsw_sp_port->local_port);
2457 goto err_port_swid_set;
2460 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2462 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2463 mlxsw_sp_port->local_port);
2464 goto err_dev_addr_init;
2467 netif_carrier_off(dev);
2469 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2470 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2471 dev->hw_features |= NETIF_F_HW_TC;
2474 dev->max_mtu = ETH_MAX_MTU;
2476 /* Each packet needs to have a Tx header (metadata) on top all other
2479 dev->needed_headroom = MLXSW_TXHDR_LEN;
2481 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2483 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2484 mlxsw_sp_port->local_port);
2485 goto err_port_system_port_mapping_set;
2488 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2490 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2491 mlxsw_sp_port->local_port);
2492 goto err_port_speed_by_width_set;
2495 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2497 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2498 mlxsw_sp_port->local_port);
2499 goto err_port_mtu_set;
2502 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2504 goto err_port_admin_status_set;
2506 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2508 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2509 mlxsw_sp_port->local_port);
2510 goto err_port_buffers_init;
2513 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2515 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2516 mlxsw_sp_port->local_port);
2517 goto err_port_ets_init;
2520 /* ETS and buffers must be initialized before DCB. */
2521 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2523 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2524 mlxsw_sp_port->local_port);
2525 goto err_port_dcb_init;
2528 err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
2530 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
2531 mlxsw_sp_port->local_port);
2532 goto err_port_pvid_vport_create;
2535 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
2536 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
2537 err = register_netdev(dev);
2539 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
2540 mlxsw_sp_port->local_port);
2541 goto err_register_netdev;
2544 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
2545 mlxsw_sp_port, dev, mlxsw_sp_port->split,
2547 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
2550 err_register_netdev:
2551 mlxsw_sp->ports[local_port] = NULL;
2552 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2553 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2554 err_port_pvid_vport_create:
2555 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2558 err_port_buffers_init:
2559 err_port_admin_status_set:
2561 err_port_speed_by_width_set:
2562 err_port_system_port_mapping_set:
2564 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2566 kfree(mlxsw_sp_port->hw_stats.cache);
2568 kfree(mlxsw_sp_port->sample);
2570 free_percpu(mlxsw_sp_port->pcpu_stats);
2572 kfree(mlxsw_sp_port->untagged_vlans);
2573 err_port_untagged_vlans_alloc:
2574 kfree(mlxsw_sp_port->active_vlans);
2575 err_port_active_vlans_alloc:
2580 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2581 bool split, u8 module, u8 width, u8 lane)
2585 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2587 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2591 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split,
2592 module, width, lane);
2594 goto err_port_create;
2598 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2602 static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2604 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2606 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
2607 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
2608 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
2609 mlxsw_sp->ports[local_port] = NULL;
2610 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
2611 mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
2612 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
2613 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
2614 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
2615 kfree(mlxsw_sp_port->hw_stats.cache);
2616 kfree(mlxsw_sp_port->sample);
2617 free_percpu(mlxsw_sp_port->pcpu_stats);
2618 kfree(mlxsw_sp_port->untagged_vlans);
2619 kfree(mlxsw_sp_port->active_vlans);
2620 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
2621 free_netdev(mlxsw_sp_port->dev);
2624 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2626 __mlxsw_sp_port_remove(mlxsw_sp, local_port);
2627 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
2630 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
2632 return mlxsw_sp->ports[local_port] != NULL;
2635 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2639 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
2640 if (mlxsw_sp_port_created(mlxsw_sp, i))
2641 mlxsw_sp_port_remove(mlxsw_sp, i);
2642 kfree(mlxsw_sp->port_to_module);
2643 kfree(mlxsw_sp->ports);
2646 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2648 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2649 u8 module, width, lane;
2654 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2655 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2656 if (!mlxsw_sp->ports)
2659 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
2660 if (!mlxsw_sp->port_to_module) {
2662 goto err_port_to_module_alloc;
2665 for (i = 1; i < max_ports; i++) {
2666 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
2669 goto err_port_module_info_get;
2672 mlxsw_sp->port_to_module[i] = module;
2673 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
2674 module, width, lane);
2676 goto err_port_create;
2681 err_port_module_info_get:
2682 for (i--; i >= 1; i--)
2683 if (mlxsw_sp_port_created(mlxsw_sp, i))
2684 mlxsw_sp_port_remove(mlxsw_sp, i);
2685 kfree(mlxsw_sp->port_to_module);
2686 err_port_to_module_alloc:
2687 kfree(mlxsw_sp->ports);
2691 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
2693 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
2695 return local_port - offset;
2698 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
2699 u8 module, unsigned int count)
2701 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
2704 for (i = 0; i < count; i++) {
2705 err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
2708 goto err_port_module_map;
2711 for (i = 0; i < count; i++) {
2712 err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
2714 goto err_port_swid_set;
2717 for (i = 0; i < count; i++) {
2718 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
2719 module, width, i * width);
2721 goto err_port_create;
2727 for (i--; i >= 0; i--)
2728 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2729 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2732 for (i--; i >= 0; i--)
2733 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
2734 MLXSW_PORT_SWID_DISABLED_PORT);
2736 err_port_module_map:
2737 for (i--; i >= 0; i--)
2738 mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
2742 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2743 u8 base_port, unsigned int count)
2745 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
2748 /* Split by four means we need to re-create two ports, otherwise
2753 for (i = 0; i < count; i++) {
2754 local_port = base_port + i * 2;
2755 module = mlxsw_sp->port_to_module[local_port];
2757 mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
2761 for (i = 0; i < count; i++)
2762 __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
2764 for (i = 0; i < count; i++) {
2765 local_port = base_port + i * 2;
2766 module = mlxsw_sp->port_to_module[local_port];
2768 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
2773 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
2776 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2777 struct mlxsw_sp_port *mlxsw_sp_port;
2778 u8 module, cur_width, base_port;
2782 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2783 if (!mlxsw_sp_port) {
2784 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2789 module = mlxsw_sp_port->mapping.module;
2790 cur_width = mlxsw_sp_port->mapping.width;
2792 if (count != 2 && count != 4) {
2793 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
2797 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
2798 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
2802 /* Make sure we have enough slave (even) ports for the split. */
2804 base_port = local_port;
2805 if (mlxsw_sp->ports[base_port + 1]) {
2806 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2810 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2811 if (mlxsw_sp->ports[base_port + 1] ||
2812 mlxsw_sp->ports[base_port + 3]) {
2813 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
2818 for (i = 0; i < count; i++)
2819 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2820 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2822 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
2824 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2825 goto err_port_split_create;
2830 err_port_split_create:
2831 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2835 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
2837 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2838 struct mlxsw_sp_port *mlxsw_sp_port;
2839 u8 cur_width, base_port;
2843 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2844 if (!mlxsw_sp_port) {
2845 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2850 if (!mlxsw_sp_port->split) {
2851 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
2855 cur_width = mlxsw_sp_port->mapping.width;
2856 count = cur_width == 1 ? 4 : 2;
2858 base_port = mlxsw_sp_cluster_base_port_get(local_port);
2860 /* Determine which ports to remove. */
2861 if (count == 2 && local_port >= base_port + 2)
2862 base_port = base_port + 2;
2864 for (i = 0; i < count; i++)
2865 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
2866 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
2868 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
2873 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2874 char *pude_pl, void *priv)
2876 struct mlxsw_sp *mlxsw_sp = priv;
2877 struct mlxsw_sp_port *mlxsw_sp_port;
2878 enum mlxsw_reg_pude_oper_status status;
2881 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2882 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2886 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2887 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2888 netdev_info(mlxsw_sp_port->dev, "link up\n");
2889 netif_carrier_on(mlxsw_sp_port->dev);
2891 netdev_info(mlxsw_sp_port->dev, "link down\n");
2892 netif_carrier_off(mlxsw_sp_port->dev);
2896 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2897 u8 local_port, void *priv)
2899 struct mlxsw_sp *mlxsw_sp = priv;
2900 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2901 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2903 if (unlikely(!mlxsw_sp_port)) {
2904 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2909 skb->dev = mlxsw_sp_port->dev;
2911 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2912 u64_stats_update_begin(&pcpu_stats->syncp);
2913 pcpu_stats->rx_packets++;
2914 pcpu_stats->rx_bytes += skb->len;
2915 u64_stats_update_end(&pcpu_stats->syncp);
2917 skb->protocol = eth_type_trans(skb, skb->dev);
2918 netif_receive_skb(skb);
2921 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
2924 skb->offload_fwd_mark = 1;
2925 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2928 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
2931 struct mlxsw_sp *mlxsw_sp = priv;
2932 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2933 struct psample_group *psample_group;
2936 if (unlikely(!mlxsw_sp_port)) {
2937 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
2941 if (unlikely(!mlxsw_sp_port->sample)) {
2942 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
2947 size = mlxsw_sp_port->sample->truncate ?
2948 mlxsw_sp_port->sample->trunc_size : skb->len;
2951 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
2954 psample_sample_packet(psample_group, skb, size,
2955 mlxsw_sp_port->dev->ifindex, 0,
2956 mlxsw_sp_port->sample->rate);
2963 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2964 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2965 _is_ctrl, SP_##_trap_group, DISCARD)
2967 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2968 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2969 _is_ctrl, SP_##_trap_group, DISCARD)
2971 #define MLXSW_SP_EVENTL(_func, _trap_id) \
2972 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2974 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2976 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2978 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
2979 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
2980 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
2981 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
2982 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
2983 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
2984 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
2985 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
2986 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
2987 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
2988 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
2990 MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2991 MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2992 MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
2993 MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false),
2994 MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
2995 MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
2996 MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
2997 MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
2998 /* PKT Sample trap */
2999 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
3000 false, SP_IP2ME, DISCARD)
3003 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3005 char qpcr_pl[MLXSW_REG_QPCR_LEN];
3006 enum mlxsw_reg_qpcr_ir_units ir_units;
3007 int max_cpu_policers;
3013 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3016 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3018 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3019 for (i = 0; i < max_cpu_policers; i++) {
3022 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3023 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3024 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3025 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3029 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3033 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3034 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3035 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3036 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3037 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3038 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3042 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3051 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3053 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3061 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3063 char htgt_pl[MLXSW_REG_HTGT_LEN];
3064 enum mlxsw_reg_htgt_trap_group i;
3065 int max_cpu_policers;
3066 int max_trap_groups;
3071 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3074 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3075 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3077 for (i = 0; i < max_trap_groups; i++) {
3080 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3081 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3082 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3083 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3087 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4:
3088 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3092 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3093 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3097 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3101 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS:
3102 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3103 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3107 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3108 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3109 tc = MLXSW_REG_HTGT_DEFAULT_TC;
3110 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3116 if (max_cpu_policers <= policer_id &&
3117 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3120 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3121 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3129 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3134 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3138 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3142 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3143 err = mlxsw_core_trap_register(mlxsw_sp->core,
3144 &mlxsw_sp_listener[i],
3147 goto err_listener_register;
3152 err_listener_register:
3153 for (i--; i >= 0; i--) {
3154 mlxsw_core_trap_unregister(mlxsw_sp->core,
3155 &mlxsw_sp_listener[i],
3161 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3165 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3166 mlxsw_core_trap_unregister(mlxsw_sp->core,
3167 &mlxsw_sp_listener[i],
3172 static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
3173 enum mlxsw_reg_sfgc_type type,
3174 enum mlxsw_reg_sfgc_bridge_type bridge_type)
3176 enum mlxsw_flood_table_type table_type;
3177 enum mlxsw_sp_flood_table flood_table;
3178 char sfgc_pl[MLXSW_REG_SFGC_LEN];
3180 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
3181 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
3183 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
3186 case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
3187 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
3189 case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
3190 flood_table = MLXSW_SP_FLOOD_TABLE_MC;
3193 flood_table = MLXSW_SP_FLOOD_TABLE_BC;
3196 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
3198 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
3201 static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
3205 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
3206 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
3209 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3210 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
3214 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
3215 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
3223 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3225 char slcr_pl[MLXSW_REG_SLCR_LEN];
3228 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3229 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3230 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3231 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3232 MLXSW_REG_SLCR_LAG_HASH_SIP |
3233 MLXSW_REG_SLCR_LAG_HASH_DIP |
3234 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3235 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3236 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3237 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3241 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3242 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3245 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3246 sizeof(struct mlxsw_sp_upper),
3248 if (!mlxsw_sp->lags)
3254 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3256 kfree(mlxsw_sp->lags);
3259 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3261 char htgt_pl[MLXSW_REG_HTGT_LEN];
3263 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3264 MLXSW_REG_HTGT_INVALID_POLICER,
3265 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3266 MLXSW_REG_HTGT_DEFAULT_TC);
3267 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3270 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3271 const struct mlxsw_bus_info *mlxsw_bus_info)
3273 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3276 mlxsw_sp->core = mlxsw_core;
3277 mlxsw_sp->bus_info = mlxsw_bus_info;
3278 INIT_LIST_HEAD(&mlxsw_sp->fids);
3279 INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
3280 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
3282 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3284 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3288 err = mlxsw_sp_traps_init(mlxsw_sp);
3290 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3294 err = mlxsw_sp_flood_init(mlxsw_sp);
3296 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
3297 goto err_flood_init;
3300 err = mlxsw_sp_buffers_init(mlxsw_sp);
3302 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3303 goto err_buffers_init;
3306 err = mlxsw_sp_lag_init(mlxsw_sp);
3308 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3312 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3314 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3315 goto err_switchdev_init;
3318 err = mlxsw_sp_router_init(mlxsw_sp);
3320 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3321 goto err_router_init;
3324 err = mlxsw_sp_span_init(mlxsw_sp);
3326 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3330 err = mlxsw_sp_acl_init(mlxsw_sp);
3332 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3336 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3338 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3339 goto err_counter_pool_init;
3342 err = mlxsw_sp_ports_create(mlxsw_sp);
3344 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3345 goto err_ports_create;
3351 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3352 err_counter_pool_init:
3353 mlxsw_sp_acl_fini(mlxsw_sp);
3355 mlxsw_sp_span_fini(mlxsw_sp);
3357 mlxsw_sp_router_fini(mlxsw_sp);
3359 mlxsw_sp_switchdev_fini(mlxsw_sp);
3361 mlxsw_sp_lag_fini(mlxsw_sp);
3363 mlxsw_sp_buffers_fini(mlxsw_sp);
3366 mlxsw_sp_traps_fini(mlxsw_sp);
3370 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3372 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3374 mlxsw_sp_ports_remove(mlxsw_sp);
3375 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3376 mlxsw_sp_acl_fini(mlxsw_sp);
3377 mlxsw_sp_span_fini(mlxsw_sp);
3378 mlxsw_sp_router_fini(mlxsw_sp);
3379 mlxsw_sp_switchdev_fini(mlxsw_sp);
3380 mlxsw_sp_lag_fini(mlxsw_sp);
3381 mlxsw_sp_buffers_fini(mlxsw_sp);
3382 mlxsw_sp_traps_fini(mlxsw_sp);
3383 WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
3384 WARN_ON(!list_empty(&mlxsw_sp->fids));
3387 static struct mlxsw_config_profile mlxsw_sp_config_profile = {
3388 .used_max_vepa_channels = 1,
3389 .max_vepa_channels = 0,
3391 .max_mid = MLXSW_SP_MID_MAX,
3394 .used_flood_tables = 1,
3395 .used_flood_mode = 1,
3397 .max_fid_offset_flood_tables = 3,
3398 .fid_offset_flood_table_size = VLAN_N_VID - 1,
3399 .max_fid_flood_tables = 3,
3400 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
3401 .used_max_ib_mc = 1,
3405 .used_kvd_split_data = 1,
3406 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3407 .kvd_hash_single_parts = 2,
3408 .kvd_hash_double_parts = 1,
3409 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3413 .type = MLXSW_PORT_SWID_TYPE_ETH,
3416 .resource_query_enable = 1,
3419 static struct mlxsw_driver mlxsw_sp_driver = {
3420 .kind = mlxsw_sp_driver_name,
3421 .priv_size = sizeof(struct mlxsw_sp),
3422 .init = mlxsw_sp_init,
3423 .fini = mlxsw_sp_fini,
3424 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3425 .port_split = mlxsw_sp_port_split,
3426 .port_unsplit = mlxsw_sp_port_unsplit,
3427 .sb_pool_get = mlxsw_sp_sb_pool_get,
3428 .sb_pool_set = mlxsw_sp_sb_pool_set,
3429 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3430 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3431 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3432 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3433 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3434 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3435 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3436 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3437 .txhdr_construct = mlxsw_sp_txhdr_construct,
3438 .txhdr_len = MLXSW_TXHDR_LEN,
3439 .profile = &mlxsw_sp_config_profile,
3442 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3444 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3447 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3449 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3452 if (mlxsw_sp_port_dev_check(lower_dev)) {
3453 *p_mlxsw_sp_port = netdev_priv(lower_dev);
3460 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3462 struct mlxsw_sp_port *mlxsw_sp_port;
3464 if (mlxsw_sp_port_dev_check(dev))
3465 return netdev_priv(dev);
3467 mlxsw_sp_port = NULL;
3468 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
3470 return mlxsw_sp_port;
3473 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3475 struct mlxsw_sp_port *mlxsw_sp_port;
3477 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3478 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3481 static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3483 struct mlxsw_sp_port *mlxsw_sp_port;
3485 if (mlxsw_sp_port_dev_check(dev))
3486 return netdev_priv(dev);
3488 mlxsw_sp_port = NULL;
3489 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3492 return mlxsw_sp_port;
3495 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3497 struct mlxsw_sp_port *mlxsw_sp_port;
3500 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3502 dev_hold(mlxsw_sp_port->dev);
3504 return mlxsw_sp_port;
3507 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3509 dev_put(mlxsw_sp_port->dev);
3512 static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
3515 if (mlxsw_sp_fid_is_vfid(fid))
3516 return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
3518 return test_bit(fid, lag_port->active_vlans);
3521 static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
3524 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3525 u8 local_port = mlxsw_sp_port->local_port;
3526 u16 lag_id = mlxsw_sp_port->lag_id;
3527 u64 max_lag_members;
3530 if (!mlxsw_sp_port->lagged)
3533 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3535 for (i = 0; i < max_lag_members; i++) {
3536 struct mlxsw_sp_port *lag_port;
3538 lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
3539 if (!lag_port || lag_port->local_port == local_port)
3541 if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
3549 mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3553 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3555 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
3556 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3557 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
3558 mlxsw_sp_port->local_port);
3560 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
3561 mlxsw_sp_port->local_port, fid);
3563 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3567 mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
3570 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3571 char sfdf_pl[MLXSW_REG_SFDF_LEN];
3573 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
3574 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
3575 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
3577 netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
3578 mlxsw_sp_port->lag_id, fid);
3580 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
3583 int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
3585 if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
3588 if (mlxsw_sp_port->lagged)
3589 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
3592 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
3595 static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
3597 struct mlxsw_sp_fid *f, *tmp;
3599 list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
3600 if (--f->ref_count == 0)
3601 mlxsw_sp_fid_destroy(mlxsw_sp, f);
3606 static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
3607 struct net_device *br_dev)
3609 return !mlxsw_sp->master_bridge.dev ||
3610 mlxsw_sp->master_bridge.dev == br_dev;
3613 static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
3614 struct net_device *br_dev)
3616 mlxsw_sp->master_bridge.dev = br_dev;
3617 mlxsw_sp->master_bridge.ref_count++;
3620 static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
3622 if (--mlxsw_sp->master_bridge.ref_count == 0) {
3623 mlxsw_sp->master_bridge.dev = NULL;
3624 /* It's possible upper VLAN devices are still holding
3625 * references to underlying FIDs. Drop the reference
3626 * and release the resources if it was the last one.
3627 * If it wasn't, then something bad happened.
3629 mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
3633 static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
3634 struct net_device *br_dev)
3636 struct net_device *dev = mlxsw_sp_port->dev;
3639 /* When port is not bridged untagged packets are tagged with
3640 * PVID=VID=1, thereby creating an implicit VLAN interface in
3641 * the device. Remove it and let bridge code take care of its
3644 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
3648 mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
3650 mlxsw_sp_port->learning = 1;
3651 mlxsw_sp_port->learning_sync = 1;
3652 mlxsw_sp_port->uc_flood = 1;
3653 mlxsw_sp_port->mc_flood = 1;
3654 mlxsw_sp_port->mc_router = 0;
3655 mlxsw_sp_port->mc_disabled = 1;
3656 mlxsw_sp_port->bridged = 1;
3661 static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3663 struct net_device *dev = mlxsw_sp_port->dev;
3665 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
3667 mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
3669 mlxsw_sp_port->learning = 0;
3670 mlxsw_sp_port->learning_sync = 0;
3671 mlxsw_sp_port->uc_flood = 0;
3672 mlxsw_sp_port->mc_flood = 0;
3673 mlxsw_sp_port->mc_router = 0;
3674 mlxsw_sp_port->bridged = 0;
3676 /* Add implicit VLAN interface in the device, so that untagged
3677 * packets will be classified to the default vFID.
3679 mlxsw_sp_port_add_vid(dev, 0, 1);
3682 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3684 char sldr_pl[MLXSW_REG_SLDR_LEN];
3686 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3687 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3690 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3692 char sldr_pl[MLXSW_REG_SLDR_LEN];
3694 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3695 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3698 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3699 u16 lag_id, u8 port_index)
3701 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3702 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3704 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3705 lag_id, port_index);
3706 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3709 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3712 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3713 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3715 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3717 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3720 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3723 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3724 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3726 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3728 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3731 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3734 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3735 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3737 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3739 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3742 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3743 struct net_device *lag_dev,
3746 struct mlxsw_sp_upper *lag;
3747 int free_lag_id = -1;
3751 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
3752 for (i = 0; i < max_lag; i++) {
3753 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
3754 if (lag->ref_count) {
3755 if (lag->dev == lag_dev) {
3759 } else if (free_lag_id < 0) {
3763 if (free_lag_id < 0)
3765 *p_lag_id = free_lag_id;
3770 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
3771 struct net_device *lag_dev,
3772 struct netdev_lag_upper_info *lag_upper_info)
3776 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
3778 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
3783 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
3784 u16 lag_id, u8 *p_port_index)
3786 u64 max_lag_members;
3789 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3791 for (i = 0; i < max_lag_members; i++) {
3792 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
3801 mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3802 struct net_device *lag_dev, u16 lag_id)
3804 struct mlxsw_sp_port *mlxsw_sp_vport;
3805 struct mlxsw_sp_fid *f;
3807 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3808 if (WARN_ON(!mlxsw_sp_vport))
3811 /* If vPort is assigned a RIF, then leave it since it's no
3814 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3816 f->leave(mlxsw_sp_vport);
3818 mlxsw_sp_vport->lag_id = lag_id;
3819 mlxsw_sp_vport->lagged = 1;
3820 mlxsw_sp_vport->dev = lag_dev;
3824 mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
3826 struct mlxsw_sp_port *mlxsw_sp_vport;
3827 struct mlxsw_sp_fid *f;
3829 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
3830 if (WARN_ON(!mlxsw_sp_vport))
3833 f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
3835 f->leave(mlxsw_sp_vport);
3837 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3838 mlxsw_sp_vport->lagged = 0;
3841 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
3842 struct net_device *lag_dev)
3844 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3845 struct mlxsw_sp_upper *lag;
3850 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
3853 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3854 if (!lag->ref_count) {
3855 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
3861 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
3864 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
3866 goto err_col_port_add;
3867 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
3869 goto err_col_port_enable;
3871 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
3872 mlxsw_sp_port->local_port);
3873 mlxsw_sp_port->lag_id = lag_id;
3874 mlxsw_sp_port->lagged = 1;
3877 mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_dev, lag_id);
3881 err_col_port_enable:
3882 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3884 if (!lag->ref_count)
3885 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3889 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
3890 struct net_device *lag_dev)
3892 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3893 u16 lag_id = mlxsw_sp_port->lag_id;
3894 struct mlxsw_sp_upper *lag;
3896 if (!mlxsw_sp_port->lagged)
3898 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
3899 WARN_ON(lag->ref_count == 0);
3901 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
3902 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
3904 if (mlxsw_sp_port->bridged) {
3905 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
3906 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
3909 if (lag->ref_count == 1)
3910 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
3912 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
3913 mlxsw_sp_port->local_port);
3914 mlxsw_sp_port->lagged = 0;
3917 mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
3920 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3923 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3924 char sldr_pl[MLXSW_REG_SLDR_LEN];
3926 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
3927 mlxsw_sp_port->local_port);
3928 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3931 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3934 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3935 char sldr_pl[MLXSW_REG_SLDR_LEN];
3937 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
3938 mlxsw_sp_port->local_port);
3939 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3942 static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
3943 bool lag_tx_enabled)
3946 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
3947 mlxsw_sp_port->lag_id);
3949 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
3950 mlxsw_sp_port->lag_id);
3953 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
3954 struct netdev_lag_lower_state_info *info)
3956 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
3959 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
3960 struct net_device *vlan_dev)
3962 struct mlxsw_sp_port *mlxsw_sp_vport;
3963 u16 vid = vlan_dev_vlan_id(vlan_dev);
3965 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3966 if (WARN_ON(!mlxsw_sp_vport))
3969 mlxsw_sp_vport->dev = vlan_dev;
3974 static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
3975 struct net_device *vlan_dev)
3977 struct mlxsw_sp_port *mlxsw_sp_vport;
3978 u16 vid = vlan_dev_vlan_id(vlan_dev);
3980 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3981 if (WARN_ON(!mlxsw_sp_vport))
3984 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
3987 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
3988 unsigned long event, void *ptr)
3990 struct netdev_notifier_changeupper_info *info;
3991 struct mlxsw_sp_port *mlxsw_sp_port;
3992 struct net_device *upper_dev;
3993 struct mlxsw_sp *mlxsw_sp;
3996 mlxsw_sp_port = netdev_priv(dev);
3997 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4001 case NETDEV_PRECHANGEUPPER:
4002 upper_dev = info->upper_dev;
4003 if (!is_vlan_dev(upper_dev) &&
4004 !netif_is_lag_master(upper_dev) &&
4005 !netif_is_bridge_master(upper_dev) &&
4006 !netif_is_l3_master(upper_dev))
4010 /* HW limitation forbids to put ports to multiple bridges. */
4011 if (netif_is_bridge_master(upper_dev) &&
4012 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
4014 if (netif_is_lag_master(upper_dev) &&
4015 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4018 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4020 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4021 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4024 case NETDEV_CHANGEUPPER:
4025 upper_dev = info->upper_dev;
4026 if (is_vlan_dev(upper_dev)) {
4028 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
4031 mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
4033 } else if (netif_is_bridge_master(upper_dev)) {
4035 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4038 mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
4039 } else if (netif_is_lag_master(upper_dev)) {
4041 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4044 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4046 } else if (netif_is_l3_master(upper_dev)) {
4048 err = mlxsw_sp_port_vrf_join(mlxsw_sp_port);
4050 mlxsw_sp_port_vrf_leave(mlxsw_sp_port);
4061 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4062 unsigned long event, void *ptr)
4064 struct netdev_notifier_changelowerstate_info *info;
4065 struct mlxsw_sp_port *mlxsw_sp_port;
4068 mlxsw_sp_port = netdev_priv(dev);
4072 case NETDEV_CHANGELOWERSTATE:
4073 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4074 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4075 info->lower_state_info);
4077 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4085 static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
4086 unsigned long event, void *ptr)
4089 case NETDEV_PRECHANGEUPPER:
4090 case NETDEV_CHANGEUPPER:
4091 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
4092 case NETDEV_CHANGELOWERSTATE:
4093 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
4099 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4100 unsigned long event, void *ptr)
4102 struct net_device *dev;
4103 struct list_head *iter;
4106 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4107 if (mlxsw_sp_port_dev_check(dev)) {
4108 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4117 static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
4118 struct net_device *vlan_dev)
4120 u16 fid = vlan_dev_vlan_id(vlan_dev);
4121 struct mlxsw_sp_fid *f;
4123 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4125 f = mlxsw_sp_fid_create(mlxsw_sp, fid);
4135 static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
4136 struct net_device *vlan_dev)
4138 u16 fid = vlan_dev_vlan_id(vlan_dev);
4139 struct mlxsw_sp_fid *f;
4141 f = mlxsw_sp_fid_find(mlxsw_sp, fid);
4143 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
4144 if (f && --f->ref_count == 0)
4145 mlxsw_sp_fid_destroy(mlxsw_sp, f);
4148 static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
4149 unsigned long event, void *ptr)
4151 struct netdev_notifier_changeupper_info *info;
4152 struct net_device *upper_dev;
4153 struct mlxsw_sp *mlxsw_sp;
4156 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
4163 case NETDEV_PRECHANGEUPPER:
4164 upper_dev = info->upper_dev;
4165 if (!is_vlan_dev(upper_dev) && !netif_is_l3_master(upper_dev))
4167 if (is_vlan_dev(upper_dev) &&
4168 br_dev != mlxsw_sp->master_bridge.dev)
4171 case NETDEV_CHANGEUPPER:
4172 upper_dev = info->upper_dev;
4173 if (is_vlan_dev(upper_dev)) {
4175 err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
4178 mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp,
4180 } else if (netif_is_l3_master(upper_dev)) {
4182 err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
4185 mlxsw_sp_bridge_vrf_leave(mlxsw_sp, br_dev);
4196 static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
4198 return find_first_zero_bit(mlxsw_sp->vfids.mapped,
4202 static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
4204 char sfmr_pl[MLXSW_REG_SFMR_LEN];
4206 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
4207 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
4210 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
4212 static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
4213 struct net_device *br_dev)
4215 struct device *dev = mlxsw_sp->bus_info->dev;
4216 struct mlxsw_sp_fid *f;
4220 vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
4221 if (vfid == MLXSW_SP_VFID_MAX) {
4222 dev_err(dev, "No available vFIDs\n");
4223 return ERR_PTR(-ERANGE);
4226 fid = mlxsw_sp_vfid_to_fid(vfid);
4227 err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
4229 dev_err(dev, "Failed to create FID=%d\n", fid);
4230 return ERR_PTR(err);
4233 f = kzalloc(sizeof(*f), GFP_KERNEL);
4235 goto err_allocate_vfid;
4237 f->leave = mlxsw_sp_vport_vfid_leave;
4241 list_add(&f->list, &mlxsw_sp->vfids.list);
4242 set_bit(vfid, mlxsw_sp->vfids.mapped);
4247 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4248 return ERR_PTR(-ENOMEM);
4251 static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
4252 struct mlxsw_sp_fid *f)
4254 u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
4257 clear_bit(vfid, mlxsw_sp->vfids.mapped);
4261 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
4265 mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
4268 static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
4271 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
4272 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4274 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
4278 static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4279 struct net_device *br_dev)
4281 struct mlxsw_sp_fid *f;
4284 f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
4286 f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
4291 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
4293 goto err_vport_flood_set;
4295 err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
4297 goto err_vport_fid_map;
4299 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
4302 netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
4307 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4308 err_vport_flood_set:
4310 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4314 static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4316 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4318 netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
4320 mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
4322 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
4324 mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
4326 mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
4327 if (--f->ref_count == 0)
4328 mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
4331 static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
4332 struct net_device *br_dev)
4334 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
4335 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4336 struct net_device *dev = mlxsw_sp_vport->dev;
4339 if (f && !WARN_ON(!f->leave))
4340 f->leave(mlxsw_sp_vport);
4342 err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
4344 netdev_err(dev, "Failed to join vFID\n");
4348 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
4350 netdev_err(dev, "Failed to enable learning\n");
4351 goto err_port_vid_learning_set;
4354 mlxsw_sp_vport->learning = 1;
4355 mlxsw_sp_vport->learning_sync = 1;
4356 mlxsw_sp_vport->uc_flood = 1;
4357 mlxsw_sp_vport->mc_flood = 1;
4358 mlxsw_sp_vport->mc_router = 0;
4359 mlxsw_sp_vport->mc_disabled = 1;
4360 mlxsw_sp_vport->bridged = 1;
4364 err_port_vid_learning_set:
4365 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4369 static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
4371 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
4373 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
4375 mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
4377 mlxsw_sp_vport->learning = 0;
4378 mlxsw_sp_vport->learning_sync = 0;
4379 mlxsw_sp_vport->uc_flood = 0;
4380 mlxsw_sp_vport->mc_flood = 0;
4381 mlxsw_sp_vport->mc_router = 0;
4382 mlxsw_sp_vport->bridged = 0;
4386 mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
4387 const struct net_device *br_dev)
4389 struct mlxsw_sp_port *mlxsw_sp_vport;
4391 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
4393 struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
4395 if (dev && dev == br_dev)
4402 static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
4403 unsigned long event, void *ptr,
4406 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4407 struct netdev_notifier_changeupper_info *info = ptr;
4408 struct mlxsw_sp_port *mlxsw_sp_vport;
4409 struct net_device *upper_dev;
4412 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
4413 if (!mlxsw_sp_vport)
4417 case NETDEV_PRECHANGEUPPER:
4418 upper_dev = info->upper_dev;
4419 if (!netif_is_bridge_master(upper_dev) &&
4420 !netif_is_l3_master(upper_dev))
4424 /* We can't have multiple VLAN interfaces configured on
4425 * the same port and being members in the same bridge.
4427 if (netif_is_bridge_master(upper_dev) &&
4428 !mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
4432 case NETDEV_CHANGEUPPER:
4433 upper_dev = info->upper_dev;
4434 if (netif_is_bridge_master(upper_dev)) {
4436 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
4439 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
4440 } else if (netif_is_l3_master(upper_dev)) {
4442 err = mlxsw_sp_vport_vrf_join(mlxsw_sp_vport);
4444 mlxsw_sp_vport_vrf_leave(mlxsw_sp_vport);
4455 static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
4456 unsigned long event, void *ptr,
4459 struct net_device *dev;
4460 struct list_head *iter;
4463 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4464 if (mlxsw_sp_port_dev_check(dev)) {
4465 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
4475 static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
4476 unsigned long event, void *ptr)
4478 struct netdev_notifier_changeupper_info *info;
4479 struct mlxsw_sp *mlxsw_sp;
4482 mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
4489 case NETDEV_PRECHANGEUPPER:
4490 /* VLAN devices are only allowed on top of the
4491 * VLAN-aware bridge.
4493 if (WARN_ON(vlan_dev_real_dev(vlan_dev) !=
4494 mlxsw_sp->master_bridge.dev))
4496 if (!netif_is_l3_master(info->upper_dev))
4499 case NETDEV_CHANGEUPPER:
4500 if (netif_is_l3_master(info->upper_dev)) {
4502 err = mlxsw_sp_bridge_vrf_join(mlxsw_sp,
4505 mlxsw_sp_bridge_vrf_leave(mlxsw_sp, vlan_dev);
4516 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4517 unsigned long event, void *ptr)
4519 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4520 u16 vid = vlan_dev_vlan_id(vlan_dev);
4522 if (mlxsw_sp_port_dev_check(real_dev))
4523 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
4525 else if (netif_is_lag_master(real_dev))
4526 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
4528 else if (netif_is_bridge_master(real_dev))
4529 return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, event,
4535 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4536 unsigned long event, void *ptr)
4538 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4541 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4542 err = mlxsw_sp_netdevice_router_port_event(dev);
4543 else if (mlxsw_sp_port_dev_check(dev))
4544 err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
4545 else if (netif_is_lag_master(dev))
4546 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4547 else if (netif_is_bridge_master(dev))
4548 err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
4549 else if (is_vlan_dev(dev))
4550 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4552 return notifier_from_errno(err);
4555 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4556 .notifier_call = mlxsw_sp_netdevice_event,
4559 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4560 .notifier_call = mlxsw_sp_inetaddr_event,
4561 .priority = 10, /* Must be called before FIB notifier block */
4564 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4565 .notifier_call = mlxsw_sp_router_netevent_event,
4568 static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4569 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4573 static struct pci_driver mlxsw_sp_pci_driver = {
4574 .name = mlxsw_sp_driver_name,
4575 .id_table = mlxsw_sp_pci_id_table,
4578 static int __init mlxsw_sp_module_init(void)
4582 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4583 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4584 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4586 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4588 goto err_core_driver_register;
4590 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4592 goto err_pci_driver_register;
4596 err_pci_driver_register:
4597 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4598 err_core_driver_register:
4599 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4600 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4601 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4605 static void __exit mlxsw_sp_module_exit(void)
4607 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
4608 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4609 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4610 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4611 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4614 module_init(mlxsw_sp_module_init);
4615 module_exit(mlxsw_sp_module_exit);
4617 MODULE_LICENSE("Dual BSD/GPL");
4618 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4619 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4620 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);