2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
46 struct mlx5_flow_handle *
47 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
49 struct mlx5_esw_flow_attr *attr)
51 struct mlx5_flow_destination dest[2] = {};
52 struct mlx5_flow_act flow_act = {0};
53 struct mlx5_fc *counter = NULL;
54 struct mlx5_flow_handle *rule;
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
70 counter = mlx5_fc_create(esw->dev, true);
72 return ERR_CAST(counter);
73 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
74 dest[i].counter = counter;
78 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
79 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
81 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
82 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
84 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
85 MLX5_MATCH_MISC_PARAMETERS;
86 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
87 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
90 flow_act.encap_id = attr->encap->encap_id;
92 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
93 spec, &flow_act, dest, i);
95 mlx5_fc_destroy(esw->dev, counter);
97 esw->offloads.num_flows++;
103 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
104 struct mlx5_flow_handle *rule,
105 struct mlx5_esw_flow_attr *attr)
107 struct mlx5_fc *counter = NULL;
110 counter = mlx5_flow_rule_counter(rule);
111 mlx5_del_flow_rules(rule);
112 mlx5_fc_destroy(esw->dev, counter);
113 esw->offloads.num_flows--;
117 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
119 struct mlx5_eswitch_rep *rep;
120 int vf_vport, err = 0;
122 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
123 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
124 rep = &esw->offloads.vport_reps[vf_vport];
128 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
137 static struct mlx5_eswitch_rep *
138 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
140 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
142 in_rep = attr->in_rep;
143 out_rep = attr->out_rep;
155 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
156 bool push, bool pop, bool fwd)
158 struct mlx5_eswitch_rep *in_rep, *out_rep;
160 if ((push || pop) && !fwd)
163 in_rep = attr->in_rep;
164 out_rep = attr->out_rep;
166 if (push && in_rep->vport == FDB_UPLINK_VPORT)
169 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
172 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
173 if (!push && !pop && fwd)
174 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
177 /* protects against (1) setting rules with different vlans to push and
178 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
180 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
189 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
190 struct mlx5_esw_flow_attr *attr)
192 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
193 struct mlx5_eswitch_rep *vport = NULL;
197 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
198 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
199 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
201 err = esw_add_vlan_action_check(attr, push, pop, fwd);
205 attr->vlan_handled = false;
207 vport = esw_vlan_action_get_vport(attr, push, pop);
209 if (!push && !pop && fwd) {
210 /* tracks VF --> wire rules without vlan push action */
211 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
212 vport->vlan_refcount++;
213 attr->vlan_handled = true;
222 if (!(offloads->vlan_push_pop_refcount)) {
223 /* it's the 1st vlan rule, apply global vlan pop policy */
224 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
228 offloads->vlan_push_pop_refcount++;
231 if (vport->vlan_refcount)
234 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
235 SET_VLAN_INSERT | SET_VLAN_STRIP);
238 vport->vlan = attr->vlan;
240 vport->vlan_refcount++;
244 attr->vlan_handled = true;
248 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
249 struct mlx5_esw_flow_attr *attr)
251 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
252 struct mlx5_eswitch_rep *vport = NULL;
256 if (!attr->vlan_handled)
259 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
260 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
261 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
263 vport = esw_vlan_action_get_vport(attr, push, pop);
265 if (!push && !pop && fwd) {
266 /* tracks VF --> wire rules without vlan push action */
267 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
268 vport->vlan_refcount--;
274 vport->vlan_refcount--;
275 if (vport->vlan_refcount)
276 goto skip_unset_push;
279 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
280 0, 0, SET_VLAN_STRIP);
286 offloads->vlan_push_pop_refcount--;
287 if (offloads->vlan_push_pop_refcount)
290 /* no more vlan rules, stop global vlan pop policy */
291 err = esw_set_global_vlan_pop(esw, 0);
297 static struct mlx5_flow_handle *
298 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
300 struct mlx5_flow_act flow_act = {0};
301 struct mlx5_flow_destination dest;
302 struct mlx5_flow_handle *flow_rule;
303 struct mlx5_flow_spec *spec;
306 spec = mlx5_vzalloc(sizeof(*spec));
308 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
309 flow_rule = ERR_PTR(-ENOMEM);
313 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
314 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
315 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
317 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
318 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
319 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
321 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
322 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
323 dest.vport_num = vport;
324 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
326 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
327 &flow_act, &dest, 1);
328 if (IS_ERR(flow_rule))
329 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
335 void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
336 struct mlx5_eswitch_rep *rep)
338 struct mlx5_esw_sq *esw_sq, *tmp;
340 if (esw->mode != SRIOV_OFFLOADS)
343 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
344 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
345 list_del(&esw_sq->list);
350 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
351 struct mlx5_eswitch_rep *rep,
352 u16 *sqns_array, int sqns_num)
354 struct mlx5_flow_handle *flow_rule;
355 struct mlx5_esw_sq *esw_sq;
359 if (esw->mode != SRIOV_OFFLOADS)
362 for (i = 0; i < sqns_num; i++) {
363 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
369 /* Add re-inject rule to the PF/representor sqs */
370 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
373 if (IS_ERR(flow_rule)) {
374 err = PTR_ERR(flow_rule);
378 esw_sq->send_to_vport_rule = flow_rule;
379 list_add(&esw_sq->list, &rep->vport_sqs_list);
384 mlx5_eswitch_sqs2vport_stop(esw, rep);
388 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
390 struct mlx5_flow_act flow_act = {0};
391 struct mlx5_flow_destination dest;
392 struct mlx5_flow_handle *flow_rule = NULL;
393 struct mlx5_flow_spec *spec;
396 spec = mlx5_vzalloc(sizeof(*spec));
398 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
403 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
405 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
407 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
408 &flow_act, &dest, 1);
409 if (IS_ERR(flow_rule)) {
410 err = PTR_ERR(flow_rule);
411 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
415 esw->fdb_table.offloads.miss_rule = flow_rule;
421 #define MAX_PF_SQ 256
422 #define ESW_OFFLOADS_NUM_GROUPS 4
424 static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
426 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
427 int table_size, ix, esw_size, err = 0;
428 struct mlx5_core_dev *dev = esw->dev;
429 struct mlx5_flow_namespace *root_ns;
430 struct mlx5_flow_table *fdb = NULL;
431 struct mlx5_flow_group *g;
433 void *match_criteria;
436 flow_group_in = mlx5_vzalloc(inlen);
440 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
442 esw_warn(dev, "Failed to get FDB flow namespace\n");
447 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
448 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
449 MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS);
451 esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
452 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
454 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
455 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
456 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
458 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
460 ESW_OFFLOADS_NUM_GROUPS, 0,
464 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
467 esw->fdb_table.fdb = fdb;
469 table_size = nvports + MAX_PF_SQ + 1;
470 fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
473 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
476 esw->fdb_table.offloads.fdb = fdb;
478 /* create send-to-vport group */
479 memset(flow_group_in, 0, inlen);
480 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
481 MLX5_MATCH_MISC_PARAMETERS);
483 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
485 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
486 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
488 ix = nvports + MAX_PF_SQ;
489 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
490 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
492 g = mlx5_create_flow_group(fdb, flow_group_in);
495 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
498 esw->fdb_table.offloads.send_to_vport_grp = g;
500 /* create miss group */
501 memset(flow_group_in, 0, inlen);
502 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
504 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
505 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
507 g = mlx5_create_flow_group(fdb, flow_group_in);
510 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
513 esw->fdb_table.offloads.miss_grp = g;
515 err = esw_add_fdb_miss_rule(esw);
522 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
524 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
526 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
528 mlx5_destroy_flow_table(esw->fdb_table.fdb);
531 kvfree(flow_group_in);
535 static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
537 if (!esw->fdb_table.fdb)
540 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
541 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
542 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
543 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
545 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
546 mlx5_destroy_flow_table(esw->fdb_table.fdb);
549 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
551 struct mlx5_flow_namespace *ns;
552 struct mlx5_flow_table *ft_offloads;
553 struct mlx5_core_dev *dev = esw->dev;
556 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
558 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
562 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
563 if (IS_ERR(ft_offloads)) {
564 err = PTR_ERR(ft_offloads);
565 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
569 esw->offloads.ft_offloads = ft_offloads;
573 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
575 struct mlx5_esw_offload *offloads = &esw->offloads;
577 mlx5_destroy_flow_table(offloads->ft_offloads);
580 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
582 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
583 struct mlx5_flow_group *g;
584 struct mlx5_priv *priv = &esw->dev->priv;
586 void *match_criteria, *misc;
588 int nvports = priv->sriov.num_vfs + 2;
590 flow_group_in = mlx5_vzalloc(inlen);
594 /* create vport rx group */
595 memset(flow_group_in, 0, inlen);
596 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
597 MLX5_MATCH_MISC_PARAMETERS);
599 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
600 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
601 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
603 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
604 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
606 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
610 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
614 esw->offloads.vport_rx_group = g;
616 kfree(flow_group_in);
620 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
622 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
625 struct mlx5_flow_handle *
626 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
628 struct mlx5_flow_act flow_act = {0};
629 struct mlx5_flow_destination dest;
630 struct mlx5_flow_handle *flow_rule;
631 struct mlx5_flow_spec *spec;
634 spec = mlx5_vzalloc(sizeof(*spec));
636 esw_warn(esw->dev, "Failed to alloc match parameters\n");
637 flow_rule = ERR_PTR(-ENOMEM);
641 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
642 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
644 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
645 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
647 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
648 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
651 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
652 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
653 &flow_act, &dest, 1);
654 if (IS_ERR(flow_rule)) {
655 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
664 static int esw_offloads_start(struct mlx5_eswitch *esw)
666 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
668 if (esw->mode != SRIOV_LEGACY) {
669 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
673 mlx5_eswitch_disable_sriov(esw);
674 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
676 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
677 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
679 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
681 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
682 if (mlx5_eswitch_inline_mode_get(esw,
684 &esw->offloads.inline_mode)) {
685 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
686 esw_warn(esw->dev, "Inline mode is different between vports\n");
692 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
694 struct mlx5_eswitch_rep *rep;
698 /* disable PF RoCE so missed packets don't go through RoCE steering */
699 mlx5_dev_list_lock();
700 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
701 mlx5_dev_list_unlock();
703 err = esw_create_offloads_fdb_table(esw, nvports);
707 err = esw_create_offloads_table(esw);
711 err = esw_create_vport_rx_group(esw);
715 for (vport = 0; vport < nvports; vport++) {
716 rep = &esw->offloads.vport_reps[vport];
720 err = rep->load(esw, rep);
728 for (vport--; vport >= 0; vport--) {
729 rep = &esw->offloads.vport_reps[vport];
732 rep->unload(esw, rep);
734 esw_destroy_vport_rx_group(esw);
737 esw_destroy_offloads_table(esw);
740 esw_destroy_offloads_fdb_table(esw);
743 /* enable back PF RoCE */
744 mlx5_dev_list_lock();
745 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
746 mlx5_dev_list_unlock();
751 static int esw_offloads_stop(struct mlx5_eswitch *esw)
753 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
755 mlx5_eswitch_disable_sriov(esw);
756 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
758 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
759 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
761 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
764 /* enable back PF RoCE */
765 mlx5_dev_list_lock();
766 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
767 mlx5_dev_list_unlock();
772 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
774 struct mlx5_eswitch_rep *rep;
777 for (vport = 0; vport < nvports; vport++) {
778 rep = &esw->offloads.vport_reps[vport];
781 rep->unload(esw, rep);
784 esw_destroy_vport_rx_group(esw);
785 esw_destroy_offloads_table(esw);
786 esw_destroy_offloads_fdb_table(esw);
789 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
792 case DEVLINK_ESWITCH_MODE_LEGACY:
793 *mlx5_mode = SRIOV_LEGACY;
795 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
796 *mlx5_mode = SRIOV_OFFLOADS;
805 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
809 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
812 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
821 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
824 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
825 *mlx5_mode = MLX5_INLINE_MODE_NONE;
827 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
828 *mlx5_mode = MLX5_INLINE_MODE_L2;
830 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
831 *mlx5_mode = MLX5_INLINE_MODE_IP;
833 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
834 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
843 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
846 case MLX5_INLINE_MODE_NONE:
847 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
849 case MLX5_INLINE_MODE_L2:
850 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
852 case MLX5_INLINE_MODE_IP:
853 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
855 case MLX5_INLINE_MODE_TCP_UDP:
856 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
865 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
867 struct mlx5_core_dev *dev;
868 u16 cur_mlx5_mode, mlx5_mode = 0;
870 dev = devlink_priv(devlink);
872 if (!MLX5_CAP_GEN(dev, vport_group_manager))
875 cur_mlx5_mode = dev->priv.eswitch->mode;
877 if (cur_mlx5_mode == SRIOV_NONE)
880 if (esw_mode_from_devlink(mode, &mlx5_mode))
883 if (cur_mlx5_mode == mlx5_mode)
886 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
887 return esw_offloads_start(dev->priv.eswitch);
888 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
889 return esw_offloads_stop(dev->priv.eswitch);
894 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
896 struct mlx5_core_dev *dev;
898 dev = devlink_priv(devlink);
900 if (!MLX5_CAP_GEN(dev, vport_group_manager))
903 if (dev->priv.eswitch->mode == SRIOV_NONE)
906 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
909 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
911 struct mlx5_core_dev *dev = devlink_priv(devlink);
912 struct mlx5_eswitch *esw = dev->priv.eswitch;
913 int num_vports = esw->enabled_vports;
917 if (!MLX5_CAP_GEN(dev, vport_group_manager))
920 if (esw->mode == SRIOV_NONE)
923 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
924 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
925 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
928 case MLX5_CAP_INLINE_MODE_L2:
929 esw_warn(dev, "Inline mode can't be set\n");
931 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
935 if (esw->offloads.num_flows > 0) {
936 esw_warn(dev, "Can't set inline mode when flows are configured\n");
940 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
944 for (vport = 1; vport < num_vports; vport++) {
945 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
947 esw_warn(dev, "Failed to set min inline on vport %d\n",
949 goto revert_inline_mode;
953 esw->offloads.inline_mode = mlx5_mode;
958 mlx5_modify_nic_vport_min_inline(dev,
960 esw->offloads.inline_mode);
965 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
967 struct mlx5_core_dev *dev = devlink_priv(devlink);
968 struct mlx5_eswitch *esw = dev->priv.eswitch;
970 if (!MLX5_CAP_GEN(dev, vport_group_manager))
973 if (esw->mode == SRIOV_NONE)
976 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
979 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
981 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
982 struct mlx5_core_dev *dev = esw->dev;
985 if (!MLX5_CAP_GEN(dev, vport_group_manager))
988 if (esw->mode == SRIOV_NONE)
991 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
992 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
993 mlx5_mode = MLX5_INLINE_MODE_NONE;
995 case MLX5_CAP_INLINE_MODE_L2:
996 mlx5_mode = MLX5_INLINE_MODE_L2;
998 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1003 for (vport = 1; vport <= nvfs; vport++) {
1004 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1005 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1007 prev_mlx5_mode = mlx5_mode;
1015 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
1017 struct mlx5_eswitch_rep *__rep)
1019 struct mlx5_esw_offload *offloads = &esw->offloads;
1020 struct mlx5_eswitch_rep *rep;
1022 rep = &offloads->vport_reps[vport_index];
1024 memset(rep, 0, sizeof(*rep));
1026 rep->load = __rep->load;
1027 rep->unload = __rep->unload;
1028 rep->vport = __rep->vport;
1029 rep->netdev = __rep->netdev;
1030 ether_addr_copy(rep->hw_id, __rep->hw_id);
1032 INIT_LIST_HEAD(&rep->vport_sqs_list);
1036 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1039 struct mlx5_esw_offload *offloads = &esw->offloads;
1040 struct mlx5_eswitch_rep *rep;
1042 rep = &offloads->vport_reps[vport_index];
1044 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
1045 rep->unload(esw, rep);
1050 struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
1052 #define UPLINK_REP_INDEX 0
1053 struct mlx5_esw_offload *offloads = &esw->offloads;
1054 struct mlx5_eswitch_rep *rep;
1056 rep = &offloads->vport_reps[UPLINK_REP_INDEX];