2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
46 struct mlx5_flow_rule *
47 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
49 struct mlx5_esw_flow_attr *attr)
51 struct mlx5_flow_destination dest = { 0 };
52 struct mlx5_fc *counter = NULL;
53 struct mlx5_flow_rule *rule;
57 if (esw->mode != SRIOV_OFFLOADS)
58 return ERR_PTR(-EOPNOTSUPP);
60 action = attr->action;
62 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
63 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
64 dest.vport_num = attr->out_rep->vport;
65 action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
66 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
67 counter = mlx5_fc_create(esw->dev, true);
69 return ERR_CAST(counter);
70 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
71 dest.counter = counter;
74 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
75 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
77 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
78 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
80 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
81 MLX5_MATCH_MISC_PARAMETERS;
83 rule = mlx5_add_flow_rule((struct mlx5_flow_table *)esw->fdb_table.fdb,
84 spec, action, 0, &dest);
87 mlx5_fc_destroy(esw->dev, counter);
92 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
94 struct mlx5_eswitch_rep *rep;
95 int vf_vport, err = 0;
97 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
98 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
99 rep = &esw->offloads.vport_reps[vf_vport];
103 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
112 static struct mlx5_eswitch_rep *
113 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
115 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
117 in_rep = attr->in_rep;
118 out_rep = attr->out_rep;
130 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
131 bool push, bool pop, bool fwd)
133 struct mlx5_eswitch_rep *in_rep, *out_rep;
135 if ((push || pop) && !fwd)
138 in_rep = attr->in_rep;
139 out_rep = attr->out_rep;
141 if (push && in_rep->vport == FDB_UPLINK_VPORT)
144 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
147 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
148 if (!push && !pop && fwd)
149 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
152 /* protects against (1) setting rules with different vlans to push and
153 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
155 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
164 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
165 struct mlx5_esw_flow_attr *attr)
167 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
168 struct mlx5_eswitch_rep *vport = NULL;
172 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
173 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
174 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
176 err = esw_add_vlan_action_check(attr, push, pop, fwd);
180 attr->vlan_handled = false;
182 vport = esw_vlan_action_get_vport(attr, push, pop);
184 if (!push && !pop && fwd) {
185 /* tracks VF --> wire rules without vlan push action */
186 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
187 vport->vlan_refcount++;
188 attr->vlan_handled = true;
197 if (!(offloads->vlan_push_pop_refcount)) {
198 /* it's the 1st vlan rule, apply global vlan pop policy */
199 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
203 offloads->vlan_push_pop_refcount++;
206 if (vport->vlan_refcount)
209 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
210 SET_VLAN_INSERT | SET_VLAN_STRIP);
213 vport->vlan = attr->vlan;
215 vport->vlan_refcount++;
219 attr->vlan_handled = true;
223 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
224 struct mlx5_esw_flow_attr *attr)
226 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
227 struct mlx5_eswitch_rep *vport = NULL;
231 if (!attr->vlan_handled)
234 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
235 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
236 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
238 vport = esw_vlan_action_get_vport(attr, push, pop);
240 if (!push && !pop && fwd) {
241 /* tracks VF --> wire rules without vlan push action */
242 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
243 vport->vlan_refcount--;
249 vport->vlan_refcount--;
250 if (vport->vlan_refcount)
251 goto skip_unset_push;
254 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
255 0, 0, SET_VLAN_STRIP);
261 offloads->vlan_push_pop_refcount--;
262 if (offloads->vlan_push_pop_refcount)
265 /* no more vlan rules, stop global vlan pop policy */
266 err = esw_set_global_vlan_pop(esw, 0);
272 static struct mlx5_flow_rule *
273 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
275 struct mlx5_flow_destination dest;
276 struct mlx5_flow_rule *flow_rule;
277 struct mlx5_flow_spec *spec;
280 spec = mlx5_vzalloc(sizeof(*spec));
282 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
283 flow_rule = ERR_PTR(-ENOMEM);
287 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
288 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
289 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
291 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
292 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
293 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
295 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
296 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
297 dest.vport_num = vport;
299 flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
300 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
302 if (IS_ERR(flow_rule))
303 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
309 void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
310 struct mlx5_eswitch_rep *rep)
312 struct mlx5_esw_sq *esw_sq, *tmp;
314 if (esw->mode != SRIOV_OFFLOADS)
317 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
318 mlx5_del_flow_rule(esw_sq->send_to_vport_rule);
319 list_del(&esw_sq->list);
324 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
325 struct mlx5_eswitch_rep *rep,
326 u16 *sqns_array, int sqns_num)
328 struct mlx5_flow_rule *flow_rule;
329 struct mlx5_esw_sq *esw_sq;
333 if (esw->mode != SRIOV_OFFLOADS)
336 for (i = 0; i < sqns_num; i++) {
337 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
343 /* Add re-inject rule to the PF/representor sqs */
344 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
347 if (IS_ERR(flow_rule)) {
348 err = PTR_ERR(flow_rule);
352 esw_sq->send_to_vport_rule = flow_rule;
353 list_add(&esw_sq->list, &rep->vport_sqs_list);
358 mlx5_eswitch_sqs2vport_stop(esw, rep);
362 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
364 struct mlx5_flow_destination dest;
365 struct mlx5_flow_rule *flow_rule = NULL;
366 struct mlx5_flow_spec *spec;
369 spec = mlx5_vzalloc(sizeof(*spec));
371 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
376 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
379 flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
380 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
382 if (IS_ERR(flow_rule)) {
383 err = PTR_ERR(flow_rule);
384 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
388 esw->fdb_table.offloads.miss_rule = flow_rule;
394 #define MAX_PF_SQ 256
395 #define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
396 #define ESW_OFFLOADS_NUM_GROUPS 4
398 static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
400 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
401 struct mlx5_core_dev *dev = esw->dev;
402 struct mlx5_flow_namespace *root_ns;
403 struct mlx5_flow_table *fdb = NULL;
404 struct mlx5_flow_group *g;
406 void *match_criteria;
407 int table_size, ix, err = 0;
409 flow_group_in = mlx5_vzalloc(inlen);
413 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
415 esw_warn(dev, "Failed to get FDB flow namespace\n");
419 esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
420 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
422 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
423 ESW_OFFLOADS_NUM_ENTRIES,
424 ESW_OFFLOADS_NUM_GROUPS, 0);
427 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
430 esw->fdb_table.fdb = fdb;
432 table_size = nvports + MAX_PF_SQ + 1;
433 fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0);
436 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
439 esw->fdb_table.offloads.fdb = fdb;
441 /* create send-to-vport group */
442 memset(flow_group_in, 0, inlen);
443 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
444 MLX5_MATCH_MISC_PARAMETERS);
446 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
448 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
449 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
451 ix = nvports + MAX_PF_SQ;
452 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
453 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
455 g = mlx5_create_flow_group(fdb, flow_group_in);
458 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
461 esw->fdb_table.offloads.send_to_vport_grp = g;
463 /* create miss group */
464 memset(flow_group_in, 0, inlen);
465 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
467 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
468 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
470 g = mlx5_create_flow_group(fdb, flow_group_in);
473 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
476 esw->fdb_table.offloads.miss_grp = g;
478 err = esw_add_fdb_miss_rule(esw);
485 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
487 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
489 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
491 mlx5_destroy_flow_table(esw->fdb_table.fdb);
494 kvfree(flow_group_in);
498 static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
500 if (!esw->fdb_table.fdb)
503 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
504 mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule);
505 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
506 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
508 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
509 mlx5_destroy_flow_table(esw->fdb_table.fdb);
512 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
514 struct mlx5_flow_namespace *ns;
515 struct mlx5_flow_table *ft_offloads;
516 struct mlx5_core_dev *dev = esw->dev;
519 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
521 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
525 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0);
526 if (IS_ERR(ft_offloads)) {
527 err = PTR_ERR(ft_offloads);
528 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
532 esw->offloads.ft_offloads = ft_offloads;
536 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
538 struct mlx5_esw_offload *offloads = &esw->offloads;
540 mlx5_destroy_flow_table(offloads->ft_offloads);
543 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
545 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
546 struct mlx5_flow_group *g;
547 struct mlx5_priv *priv = &esw->dev->priv;
549 void *match_criteria, *misc;
551 int nvports = priv->sriov.num_vfs + 2;
553 flow_group_in = mlx5_vzalloc(inlen);
557 /* create vport rx group */
558 memset(flow_group_in, 0, inlen);
559 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
560 MLX5_MATCH_MISC_PARAMETERS);
562 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
563 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
564 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
566 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
567 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
569 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
573 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
577 esw->offloads.vport_rx_group = g;
579 kfree(flow_group_in);
583 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
585 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
588 struct mlx5_flow_rule *
589 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
591 struct mlx5_flow_destination dest;
592 struct mlx5_flow_rule *flow_rule;
593 struct mlx5_flow_spec *spec;
596 spec = mlx5_vzalloc(sizeof(*spec));
598 esw_warn(esw->dev, "Failed to alloc match parameters\n");
599 flow_rule = ERR_PTR(-ENOMEM);
603 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
604 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
606 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
607 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
609 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
610 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
613 flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec,
614 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
616 if (IS_ERR(flow_rule)) {
617 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
626 static int esw_offloads_start(struct mlx5_eswitch *esw)
628 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
630 if (esw->mode != SRIOV_LEGACY) {
631 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
635 mlx5_eswitch_disable_sriov(esw);
636 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
638 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
639 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
641 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
646 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
648 struct mlx5_eswitch_rep *rep;
652 err = esw_create_offloads_fdb_table(esw, nvports);
656 err = esw_create_offloads_table(esw);
660 err = esw_create_vport_rx_group(esw);
664 for (vport = 0; vport < nvports; vport++) {
665 rep = &esw->offloads.vport_reps[vport];
669 err = rep->load(esw, rep);
676 for (vport--; vport >= 0; vport--) {
677 rep = &esw->offloads.vport_reps[vport];
680 rep->unload(esw, rep);
682 esw_destroy_vport_rx_group(esw);
685 esw_destroy_offloads_table(esw);
688 esw_destroy_offloads_fdb_table(esw);
692 static int esw_offloads_stop(struct mlx5_eswitch *esw)
694 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
696 mlx5_eswitch_disable_sriov(esw);
697 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
699 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
700 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
702 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
708 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
710 struct mlx5_eswitch_rep *rep;
713 for (vport = 0; vport < nvports; vport++) {
714 rep = &esw->offloads.vport_reps[vport];
717 rep->unload(esw, rep);
720 esw_destroy_vport_rx_group(esw);
721 esw_destroy_offloads_table(esw);
722 esw_destroy_offloads_fdb_table(esw);
725 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
728 case DEVLINK_ESWITCH_MODE_LEGACY:
729 *mlx5_mode = SRIOV_LEGACY;
731 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
732 *mlx5_mode = SRIOV_OFFLOADS;
741 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
745 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
748 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
757 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
759 struct mlx5_core_dev *dev;
760 u16 cur_mlx5_mode, mlx5_mode = 0;
762 dev = devlink_priv(devlink);
764 if (!MLX5_CAP_GEN(dev, vport_group_manager))
767 cur_mlx5_mode = dev->priv.eswitch->mode;
769 if (cur_mlx5_mode == SRIOV_NONE)
772 if (esw_mode_from_devlink(mode, &mlx5_mode))
775 if (cur_mlx5_mode == mlx5_mode)
778 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
779 return esw_offloads_start(dev->priv.eswitch);
780 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
781 return esw_offloads_stop(dev->priv.eswitch);
786 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
788 struct mlx5_core_dev *dev;
790 dev = devlink_priv(devlink);
792 if (!MLX5_CAP_GEN(dev, vport_group_manager))
795 if (dev->priv.eswitch->mode == SRIOV_NONE)
798 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
801 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
803 struct mlx5_eswitch_rep *__rep)
805 struct mlx5_esw_offload *offloads = &esw->offloads;
806 struct mlx5_eswitch_rep *rep;
808 rep = &offloads->vport_reps[vport_index];
810 memset(rep, 0, sizeof(*rep));
812 rep->load = __rep->load;
813 rep->unload = __rep->unload;
814 rep->vport = __rep->vport;
815 rep->priv_data = __rep->priv_data;
816 ether_addr_copy(rep->hw_id, __rep->hw_id);
818 INIT_LIST_HEAD(&rep->vport_sqs_list);
822 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
825 struct mlx5_esw_offload *offloads = &esw->offloads;
826 struct mlx5_eswitch_rep *rep;
828 rep = &offloads->vport_reps[vport_index];
830 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
831 rep->unload(esw, rep);