goto err_alloc_stats;
}
- if (mlxsw_driver->profile->used_max_lag &&
- mlxsw_driver->profile->used_max_port_per_lag) {
- alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag *
- mlxsw_driver->profile->max_port_per_lag;
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
+ &mlxsw_core->resources);
+ if (err)
+ goto err_bus_init;
+
+ if (mlxsw_core->resources.max_lag_valid &&
+ mlxsw_core->resources.max_ports_in_lag_valid) {
+ alloc_size = sizeof(u8) * mlxsw_core->resources.max_lag *
+ mlxsw_core->resources.max_ports_in_lag;
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_core->lag.mapping) {
err = -ENOMEM;
}
}
- err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
- &mlxsw_core->resources);
- if (err)
- goto err_bus_init;
-
err = mlxsw_emad_init(mlxsw_core);
if (err)
goto err_emad_init;
err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
- mlxsw_bus->fini(bus_priv);
-err_bus_init:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
+ mlxsw_bus->fini(bus_priv);
+err_bus_init:
free_percpu(mlxsw_core->pcpu_stats);
err_alloc_stats:
devlink_free(devlink);
static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 port_index)
{
- return mlxsw_core->driver->profile->max_port_per_lag * lag_id +
+ return mlxsw_core->resources.max_ports_in_lag * lag_id +
port_index;
}
{
int i;
- for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) {
+ for (i = 0; i < mlxsw_core->resources.max_ports_in_lag; i++) {
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, i);
mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
mbox, profile->max_vepa_channels);
}
- if (profile->used_max_lag) {
- mlxsw_cmd_mbox_config_profile_set_max_lag_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_max_lag_set(
- mbox, profile->max_lag);
- }
- if (profile->used_max_port_per_lag) {
- mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
- mbox, profile->max_port_per_lag);
- }
if (profile->used_max_mid) {
mlxsw_cmd_mbox_config_profile_set_max_mid_set(
mbox, 1);
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
char slcr_pl[MLXSW_REG_SLCR_LEN];
+ int err;
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
MLXSW_REG_SLCR_LAG_HASH_DMAC |
MLXSW_REG_SLCR_LAG_HASH_SPORT |
MLXSW_REG_SLCR_LAG_HASH_DPORT |
MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
+ if (err)
+ return err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!(resources->max_lag_valid && resources->max_ports_in_lag_valid))
+ return -EIO;
+
+ mlxsw_sp->lags = kcalloc(resources->max_lag,
+ sizeof(struct mlxsw_sp_upper),
+ GFP_KERNEL);
+ if (!mlxsw_sp->lags)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->lags);
}
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
err_router_init:
mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
+ mlxsw_sp_lag_fini(mlxsw_sp);
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
- .used_max_lag = 1,
- .max_lag = MLXSW_SP_LAG_MAX,
- .used_max_port_per_lag = 1,
- .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_max_pgt = 1,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u16 lag_id = mlxsw_sp_port->lag_id;
+ struct mlxsw_resources *resources;
int i, count = 0;
if (!mlxsw_sp_port->lagged)
return true;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
struct mlxsw_sp_port *lag_port;
lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
struct net_device *lag_dev,
u16 *p_lag_id)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_upper *lag;
int free_lag_id = -1;
int i;
- for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_lag; i++) {
lag = mlxsw_sp_lag_get(mlxsw_sp, i);
if (lag->ref_count) {
if (lag->dev == lag_dev) {
static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
u16 lag_id, u8 *p_port_index)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
*p_port_index = i;
return 0;