2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
36 #include "mlx5_core.h"
40 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
41 sizeof(struct init_tree_node))
43 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
44 ...) {.type = FS_TYPE_PRIO,\
45 .min_ft_level = min_level_val,\
46 .num_levels = num_levels_val,\
47 .num_leaf_prios = num_prios_val,\
49 .children = (struct init_tree_node[]) {__VA_ARGS__},\
50 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
53 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
54 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
57 #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
58 .children = (struct init_tree_node[]) {__VA_ARGS__},\
59 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
62 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
65 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
67 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
68 .caps = (long[]) {__VA_ARGS__} }
70 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
71 FS_CAP(flow_table_properties_nic_receive.modify_root), \
72 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
73 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
75 #define LEFTOVERS_NUM_LEVELS 1
76 #define LEFTOVERS_NUM_PRIOS 1
78 #define BY_PASS_PRIO_NUM_LEVELS 1
79 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
82 #define ETHTOOL_PRIO_NUM_LEVELS 1
83 #define ETHTOOL_NUM_PRIOS 11
84 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
85 /* Vlan, mac, ttc, aRFS */
86 #define KERNEL_NIC_PRIO_NUM_LEVELS 4
87 #define KERNEL_NIC_NUM_PRIOS 1
88 /* One more level for tc */
89 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
91 #define ANCHOR_NUM_LEVELS 1
92 #define ANCHOR_NUM_PRIOS 1
93 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
95 #define OFFLOADS_MAX_FT 1
96 #define OFFLOADS_NUM_PRIOS 1
97 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
99 #define LAG_PRIO_NUM_LEVELS 1
100 #define LAG_NUM_PRIOS 1
101 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
107 static struct init_tree_node {
108 enum fs_node_type type;
109 struct init_tree_node *children;
111 struct node_caps caps;
117 .type = FS_TYPE_NAMESPACE,
119 .children = (struct init_tree_node[]) {
120 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
122 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
123 BY_PASS_PRIO_NUM_LEVELS))),
124 ADD_PRIO(0, LAG_MIN_LEVEL, 0,
126 ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
127 LAG_PRIO_NUM_LEVELS))),
128 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
129 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
130 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
132 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
133 ETHTOOL_PRIO_NUM_LEVELS))),
134 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
135 ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
136 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
137 KERNEL_NIC_PRIO_NUM_LEVELS))),
138 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
140 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
141 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
142 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
146 enum fs_i_mutex_lock_class {
147 FS_MUTEX_GRANDPARENT,
152 static void del_rule(struct fs_node *node);
153 static void del_flow_table(struct fs_node *node);
154 static void del_flow_group(struct fs_node *node);
155 static void del_fte(struct fs_node *node);
156 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
157 struct mlx5_flow_destination *d2);
158 static struct mlx5_flow_rule *
159 find_flow_rule(struct fs_fte *fte,
160 struct mlx5_flow_destination *dest);
162 static void tree_init_node(struct fs_node *node,
163 unsigned int refcount,
164 void (*remove_func)(struct fs_node *))
166 atomic_set(&node->refcount, refcount);
167 INIT_LIST_HEAD(&node->list);
168 INIT_LIST_HEAD(&node->children);
169 mutex_init(&node->lock);
170 node->remove_func = remove_func;
173 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
176 atomic_inc(&parent->refcount);
177 node->parent = parent;
179 /* Parent is the root */
183 node->root = parent->root;
186 static void tree_get_node(struct fs_node *node)
188 atomic_inc(&node->refcount);
191 static void nested_lock_ref_node(struct fs_node *node,
192 enum fs_i_mutex_lock_class class)
195 mutex_lock_nested(&node->lock, class);
196 atomic_inc(&node->refcount);
200 static void lock_ref_node(struct fs_node *node)
203 mutex_lock(&node->lock);
204 atomic_inc(&node->refcount);
208 static void unlock_ref_node(struct fs_node *node)
211 atomic_dec(&node->refcount);
212 mutex_unlock(&node->lock);
216 static void tree_put_node(struct fs_node *node)
218 struct fs_node *parent_node = node->parent;
220 lock_ref_node(parent_node);
221 if (atomic_dec_and_test(&node->refcount)) {
223 list_del_init(&node->list);
224 if (node->remove_func)
225 node->remove_func(node);
229 unlock_ref_node(parent_node);
230 if (!node && parent_node)
231 tree_put_node(parent_node);
234 static int tree_remove_node(struct fs_node *node)
236 if (atomic_read(&node->refcount) > 1) {
237 atomic_dec(&node->refcount);
244 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
247 struct fs_prio *iter_prio;
249 fs_for_each_prio(iter_prio, ns) {
250 if (iter_prio->prio == prio)
257 static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
261 for (i = 0; i < size; i++, mask++, val1++, val2++)
262 if ((*((u8 *)val1) & (*(u8 *)mask)) !=
263 ((*(u8 *)val2) & (*(u8 *)mask)))
269 static bool compare_match_value(struct mlx5_flow_group_mask *mask,
270 void *fte_param1, void *fte_param2)
272 if (mask->match_criteria_enable &
273 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
274 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
275 fte_param1, outer_headers);
276 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
277 fte_param2, outer_headers);
278 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
279 mask->match_criteria, outer_headers);
281 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
282 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
286 if (mask->match_criteria_enable &
287 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
288 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
289 fte_param1, misc_parameters);
290 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
291 fte_param2, misc_parameters);
292 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
293 mask->match_criteria, misc_parameters);
295 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
296 MLX5_ST_SZ_BYTES(fte_match_set_misc)))
300 if (mask->match_criteria_enable &
301 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
302 void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
303 fte_param1, inner_headers);
304 void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
305 fte_param2, inner_headers);
306 void *fte_mask = MLX5_ADDR_OF(fte_match_param,
307 mask->match_criteria, inner_headers);
309 if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
310 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
316 static bool compare_match_criteria(u8 match_criteria_enable1,
317 u8 match_criteria_enable2,
318 void *mask1, void *mask2)
320 return match_criteria_enable1 == match_criteria_enable2 &&
321 !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
324 static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
326 struct fs_node *root;
327 struct mlx5_flow_namespace *ns;
331 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
332 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
336 ns = container_of(root, struct mlx5_flow_namespace, node);
337 return container_of(ns, struct mlx5_flow_root_namespace, ns);
340 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
342 struct mlx5_flow_root_namespace *root = find_root(node);
349 static void del_flow_table(struct fs_node *node)
351 struct mlx5_flow_table *ft;
352 struct mlx5_core_dev *dev;
353 struct fs_prio *prio;
356 fs_get_obj(ft, node);
357 dev = get_dev(&ft->node);
359 err = mlx5_cmd_destroy_flow_table(dev, ft);
361 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
362 fs_get_obj(prio, ft->node.parent);
366 static void del_rule(struct fs_node *node)
368 struct mlx5_flow_rule *rule;
369 struct mlx5_flow_table *ft;
370 struct mlx5_flow_group *fg;
374 struct mlx5_core_dev *dev = get_dev(node);
375 int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
377 bool update_fte = false;
379 match_value = mlx5_vzalloc(match_len);
381 mlx5_core_warn(dev, "failed to allocate inbox\n");
385 fs_get_obj(rule, node);
386 fs_get_obj(fte, rule->node.parent);
387 fs_get_obj(fg, fte->node.parent);
388 memcpy(match_value, fte->val, sizeof(fte->val));
389 fs_get_obj(ft, fg->node.parent);
390 list_del(&rule->node.list);
391 if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
392 mutex_lock(&rule->dest_attr.ft->lock);
393 list_del(&rule->next_ft);
394 mutex_unlock(&rule->dest_attr.ft->lock);
397 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
399 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
400 fte->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
405 if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
407 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
411 if (update_fte && fte->dests_size) {
412 err = mlx5_cmd_update_fte(dev, ft, fg->id, modify_mask, fte);
415 "%s can't del rule fg id=%d fte_index=%d\n",
416 __func__, fg->id, fte->index);
421 static void del_fte(struct fs_node *node)
423 struct mlx5_flow_table *ft;
424 struct mlx5_flow_group *fg;
425 struct mlx5_core_dev *dev;
429 fs_get_obj(fte, node);
430 fs_get_obj(fg, fte->node.parent);
431 fs_get_obj(ft, fg->node.parent);
433 dev = get_dev(&ft->node);
434 err = mlx5_cmd_delete_fte(dev, ft,
438 "flow steering can't delete fte in index %d of flow group id %d\n",
445 static void del_flow_group(struct fs_node *node)
447 struct mlx5_flow_group *fg;
448 struct mlx5_flow_table *ft;
449 struct mlx5_core_dev *dev;
451 fs_get_obj(fg, node);
452 fs_get_obj(ft, fg->node.parent);
453 dev = get_dev(&ft->node);
455 if (ft->autogroup.active)
456 ft->autogroup.num_groups--;
458 if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
459 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
463 static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
469 fte = kzalloc(sizeof(*fte), GFP_KERNEL);
471 return ERR_PTR(-ENOMEM);
473 memcpy(fte->val, match_value, sizeof(fte->val));
474 fte->node.type = FS_TYPE_FLOW_ENTRY;
475 fte->flow_tag = flow_act->flow_tag;
477 fte->action = flow_act->action;
478 fte->encap_id = flow_act->encap_id;
479 fte->modify_id = flow_act->modify_id;
484 static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
486 struct mlx5_flow_group *fg;
487 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
488 create_fg_in, match_criteria);
489 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
491 match_criteria_enable);
492 fg = kzalloc(sizeof(*fg), GFP_KERNEL);
494 return ERR_PTR(-ENOMEM);
496 fg->mask.match_criteria_enable = match_criteria_enable;
497 memcpy(&fg->mask.match_criteria, match_criteria,
498 sizeof(fg->mask.match_criteria));
499 fg->node.type = FS_TYPE_FLOW_GROUP;
500 fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
502 fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
503 end_flow_index) - fg->start_index + 1;
507 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
508 enum fs_flow_table_type table_type,
509 enum fs_flow_table_op_mod op_mod,
512 struct mlx5_flow_table *ft;
514 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
519 ft->node.type = FS_TYPE_FLOW_TABLE;
521 ft->type = table_type;
523 ft->max_fte = max_fte;
525 INIT_LIST_HEAD(&ft->fwd_rules);
526 mutex_init(&ft->lock);
531 /* If reverse is false, then we search for the first flow table in the
532 * root sub-tree from start(closest from right), else we search for the
533 * last flow table in the root sub-tree till start(closest from left).
535 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
536 struct list_head *start,
539 #define list_advance_entry(pos, reverse) \
540 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
542 #define list_for_each_advance_continue(pos, head, reverse) \
543 for (pos = list_advance_entry(pos, reverse); \
544 &pos->list != (head); \
545 pos = list_advance_entry(pos, reverse))
547 struct fs_node *iter = list_entry(start, struct fs_node, list);
548 struct mlx5_flow_table *ft = NULL;
553 list_for_each_advance_continue(iter, &root->children, reverse) {
554 if (iter->type == FS_TYPE_FLOW_TABLE) {
555 fs_get_obj(ft, iter);
558 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
566 /* If reverse if false then return the first flow table in next priority of
567 * prio in the tree, else return the last flow table in the previous priority
568 * of prio in the tree.
570 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
572 struct mlx5_flow_table *ft = NULL;
573 struct fs_node *curr_node;
574 struct fs_node *parent;
576 parent = prio->node.parent;
577 curr_node = &prio->node;
578 while (!ft && parent) {
579 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
581 parent = curr_node->parent;
586 /* Assuming all the tree is locked by mutex chain lock */
587 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
589 return find_closest_ft(prio, false);
592 /* Assuming all the tree is locked by mutex chain lock */
593 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
595 return find_closest_ft(prio, true);
598 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
599 struct fs_prio *prio,
600 struct mlx5_flow_table *ft)
602 struct mlx5_flow_table *iter;
606 fs_for_each_ft(iter, prio) {
608 err = mlx5_cmd_modify_flow_table(dev,
612 mlx5_core_warn(dev, "Failed to modify flow table %d\n",
614 /* The driver is out of sync with the FW */
623 /* Connect flow tables from previous priority of prio to ft */
624 static int connect_prev_fts(struct mlx5_core_dev *dev,
625 struct mlx5_flow_table *ft,
626 struct fs_prio *prio)
628 struct mlx5_flow_table *prev_ft;
630 prev_ft = find_prev_chained_ft(prio);
632 struct fs_prio *prev_prio;
634 fs_get_obj(prev_prio, prev_ft->node.parent);
635 return connect_fts_in_prio(dev, prev_prio, ft);
640 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
643 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
644 int min_level = INT_MAX;
648 min_level = root->root_ft->level;
650 if (ft->level >= min_level)
653 err = mlx5_cmd_update_root_ft(root->dev, ft, root->underlay_qpn);
655 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
663 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
664 struct mlx5_flow_destination *dest)
666 struct mlx5_flow_table *ft;
667 struct mlx5_flow_group *fg;
669 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
672 fs_get_obj(fte, rule->node.parent);
673 if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
675 lock_ref_node(&fte->node);
676 fs_get_obj(fg, fte->node.parent);
677 fs_get_obj(ft, fg->node.parent);
679 memcpy(&rule->dest_attr, dest, sizeof(*dest));
680 err = mlx5_cmd_update_fte(get_dev(&ft->node),
684 unlock_ref_node(&fte->node);
689 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
690 struct mlx5_flow_destination *new_dest,
691 struct mlx5_flow_destination *old_dest)
696 if (handle->num_rules != 1)
698 return _mlx5_modify_rule_destination(handle->rule[0],
702 for (i = 0; i < handle->num_rules; i++) {
703 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
704 return _mlx5_modify_rule_destination(handle->rule[i],
711 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
712 static int connect_fwd_rules(struct mlx5_core_dev *dev,
713 struct mlx5_flow_table *new_next_ft,
714 struct mlx5_flow_table *old_next_ft)
716 struct mlx5_flow_destination dest;
717 struct mlx5_flow_rule *iter;
720 /* new_next_ft and old_next_ft could be NULL only
721 * when we create/destroy the anchor flow table.
723 if (!new_next_ft || !old_next_ft)
726 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
727 dest.ft = new_next_ft;
729 mutex_lock(&old_next_ft->lock);
730 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
731 mutex_unlock(&old_next_ft->lock);
732 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
733 err = _mlx5_modify_rule_destination(iter, &dest);
735 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
741 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
742 struct fs_prio *prio)
744 struct mlx5_flow_table *next_ft;
747 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
749 if (list_empty(&prio->node.children)) {
750 err = connect_prev_fts(dev, ft, prio);
754 next_ft = find_next_chained_ft(prio);
755 err = connect_fwd_rules(dev, ft, next_ft);
760 if (MLX5_CAP_FLOWTABLE(dev,
761 flow_table_properties_nic_receive.modify_root))
762 err = update_root_ft_create(ft, prio);
766 static void list_add_flow_table(struct mlx5_flow_table *ft,
767 struct fs_prio *prio)
769 struct list_head *prev = &prio->node.children;
770 struct mlx5_flow_table *iter;
772 fs_for_each_ft(iter, prio) {
773 if (iter->level > ft->level)
775 prev = &iter->node.list;
777 list_add(&ft->node.list, prev);
780 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
781 struct mlx5_flow_table_attr *ft_attr,
782 enum fs_flow_table_op_mod op_mod,
785 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
786 struct mlx5_flow_table *next_ft = NULL;
787 struct fs_prio *fs_prio = NULL;
788 struct mlx5_flow_table *ft;
793 pr_err("mlx5: flow steering failed to find root of namespace\n");
794 return ERR_PTR(-ENODEV);
797 mutex_lock(&root->chain_lock);
798 fs_prio = find_prio(ns, ft_attr->prio);
803 if (ft_attr->level >= fs_prio->num_levels) {
807 /* The level is related to the
808 * priority level range.
810 ft_attr->level += fs_prio->start_level;
811 ft = alloc_flow_table(ft_attr->level,
813 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
815 op_mod, ft_attr->flags);
821 tree_init_node(&ft->node, 1, del_flow_table);
822 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
823 next_ft = find_next_chained_ft(fs_prio);
824 err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
825 ft->level, log_table_sz, next_ft, &ft->id,
830 err = connect_flow_table(root->dev, ft, fs_prio);
833 lock_ref_node(&fs_prio->node);
834 tree_add_node(&ft->node, &fs_prio->node);
835 list_add_flow_table(ft, fs_prio);
837 unlock_ref_node(&fs_prio->node);
838 mutex_unlock(&root->chain_lock);
841 mlx5_cmd_destroy_flow_table(root->dev, ft);
845 mutex_unlock(&root->chain_lock);
849 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
850 struct mlx5_flow_table_attr *ft_attr)
852 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
855 struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
856 int prio, int max_fte,
857 u32 level, u16 vport)
859 struct mlx5_flow_table_attr ft_attr = {};
861 ft_attr.max_fte = max_fte;
862 ft_attr.level = level;
865 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0);
868 struct mlx5_flow_table*
869 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
872 struct mlx5_flow_table_attr ft_attr = {};
874 ft_attr.level = level;
876 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
878 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
880 struct mlx5_flow_table*
881 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
883 int num_flow_table_entries,
888 struct mlx5_flow_table_attr ft_attr = {};
889 struct mlx5_flow_table *ft;
891 if (max_num_groups > num_flow_table_entries)
892 return ERR_PTR(-EINVAL);
894 ft_attr.max_fte = num_flow_table_entries;
896 ft_attr.level = level;
897 ft_attr.flags = flags;
899 ft = mlx5_create_flow_table(ns, &ft_attr);
903 ft->autogroup.active = true;
904 ft->autogroup.required_groups = max_num_groups;
908 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
910 /* Flow table should be locked */
911 static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft,
917 struct mlx5_flow_group *fg;
918 struct mlx5_core_dev *dev = get_dev(&ft->node);
922 return ERR_PTR(-ENODEV);
924 fg = alloc_flow_group(fg_in);
928 err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
934 if (ft->autogroup.active)
935 ft->autogroup.num_groups++;
936 /* Add node to tree */
937 tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
938 tree_add_node(&fg->node, &ft->node);
939 /* Add node to group list */
940 list_add(&fg->node.list, prev_fg);
945 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
948 struct mlx5_flow_group *fg;
950 if (ft->autogroup.active)
951 return ERR_PTR(-EPERM);
953 lock_ref_node(&ft->node);
954 fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
955 unlock_ref_node(&ft->node);
960 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
962 struct mlx5_flow_rule *rule;
964 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
968 INIT_LIST_HEAD(&rule->next_ft);
969 rule->node.type = FS_TYPE_FLOW_DEST;
971 memcpy(&rule->dest_attr, dest, sizeof(*dest));
976 static struct mlx5_flow_handle *alloc_handle(int num_rules)
978 struct mlx5_flow_handle *handle;
980 handle = kzalloc(sizeof(*handle) + sizeof(handle->rule[0]) *
981 num_rules, GFP_KERNEL);
985 handle->num_rules = num_rules;
990 static void destroy_flow_handle(struct fs_fte *fte,
991 struct mlx5_flow_handle *handle,
992 struct mlx5_flow_destination *dest,
996 if (atomic_dec_and_test(&handle->rule[i]->node.refcount)) {
998 list_del(&handle->rule[i]->node.list);
999 kfree(handle->rule[i]);
1005 static struct mlx5_flow_handle *
1006 create_flow_handle(struct fs_fte *fte,
1007 struct mlx5_flow_destination *dest,
1012 struct mlx5_flow_handle *handle;
1013 struct mlx5_flow_rule *rule = NULL;
1014 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1015 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1019 handle = alloc_handle((dest_num) ? dest_num : 1);
1021 return ERR_PTR(-ENOMEM);
1025 rule = find_flow_rule(fte, dest + i);
1027 atomic_inc(&rule->node.refcount);
1033 rule = alloc_rule(dest + i);
1037 /* Add dest to dests list- we need flow tables to be in the
1038 * end of the list for forward to next prio rules.
1040 tree_init_node(&rule->node, 1, del_rule);
1042 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1043 list_add(&rule->node.list, &fte->node.children);
1045 list_add_tail(&rule->node.list, &fte->node.children);
1049 type = dest[i].type ==
1050 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1051 *modify_mask |= type ? count : dst;
1054 handle->rule[i] = rule;
1055 } while (++i < dest_num);
1060 destroy_flow_handle(fte, handle, dest, i);
1061 return ERR_PTR(-ENOMEM);
1064 /* fte should not be deleted while calling this function */
1065 static struct mlx5_flow_handle *
1066 add_rule_fte(struct fs_fte *fte,
1067 struct mlx5_flow_group *fg,
1068 struct mlx5_flow_destination *dest,
1072 struct mlx5_flow_handle *handle;
1073 struct mlx5_flow_table *ft;
1074 int modify_mask = 0;
1076 bool new_rule = false;
1078 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1080 if (IS_ERR(handle) || !new_rule)
1084 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1086 fs_get_obj(ft, fg->node.parent);
1087 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1088 err = mlx5_cmd_create_fte(get_dev(&ft->node),
1091 err = mlx5_cmd_update_fte(get_dev(&ft->node),
1092 ft, fg->id, modify_mask, fte);
1096 fte->status |= FS_FTE_STATUS_EXISTING;
1102 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1103 return ERR_PTR(err);
1106 /* Assumed fg is locked */
1107 static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
1108 struct list_head **prev)
1111 unsigned int start = fg->start_index;
1114 *prev = &fg->node.children;
1116 /* assumed list is sorted by index */
1117 fs_for_each_fte(fte, fg) {
1118 if (fte->index != start)
1122 *prev = &fte->node.list;
1128 /* prev is output, prev->next = new_fte */
1129 static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
1131 struct mlx5_flow_act *flow_act,
1132 struct list_head **prev)
1137 index = get_free_fte_index(fg, prev);
1138 fte = alloc_fte(flow_act, match_value, index);
1145 static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
1146 u8 match_criteria_enable,
1147 u32 *match_criteria)
1149 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1150 struct list_head *prev = &ft->node.children;
1151 unsigned int candidate_index = 0;
1152 struct mlx5_flow_group *fg;
1153 void *match_criteria_addr;
1154 unsigned int group_size = 0;
1157 if (!ft->autogroup.active)
1158 return ERR_PTR(-ENOENT);
1160 in = mlx5_vzalloc(inlen);
1162 return ERR_PTR(-ENOMEM);
1164 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1165 /* We save place for flow groups in addition to max types */
1166 group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
1168 /* ft->max_fte == ft->autogroup.max_types */
1169 if (group_size == 0)
1172 /* sorted by start_index */
1173 fs_for_each_fg(fg, ft) {
1174 if (candidate_index + group_size > fg->start_index)
1175 candidate_index = fg->start_index + fg->max_ftes;
1178 prev = &fg->node.list;
1181 if (candidate_index + group_size > ft->max_fte) {
1182 fg = ERR_PTR(-ENOSPC);
1186 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1187 match_criteria_enable);
1188 MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
1189 MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index +
1191 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1192 in, match_criteria);
1193 memcpy(match_criteria_addr, match_criteria,
1194 MLX5_ST_SZ_BYTES(fte_match_param));
1196 fg = create_flow_group_common(ft, in, prev, true);
1202 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1203 struct mlx5_flow_destination *d2)
1205 if (d1->type == d2->type) {
1206 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1207 d1->vport_num == d2->vport_num) ||
1208 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1209 d1->ft == d2->ft) ||
1210 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1211 d1->tir_num == d2->tir_num))
1218 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1219 struct mlx5_flow_destination *dest)
1221 struct mlx5_flow_rule *rule;
1223 list_for_each_entry(rule, &fte->node.children, node.list) {
1224 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1230 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1232 struct mlx5_flow_act *flow_act,
1233 struct mlx5_flow_destination *dest,
1236 struct mlx5_flow_handle *handle;
1237 struct mlx5_flow_table *ft;
1238 struct list_head *prev;
1242 nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT);
1243 fs_for_each_fte(fte, fg) {
1244 nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
1245 if (compare_match_value(&fg->mask, match_value, &fte->val) &&
1246 (flow_act->action & fte->action)) {
1247 int old_action = fte->action;
1249 if (fte->flow_tag != flow_act->flow_tag) {
1250 mlx5_core_warn(get_dev(&fte->node),
1251 "FTE flow tag %u already exists with different flow tag %u\n",
1253 flow_act->flow_tag);
1254 handle = ERR_PTR(-EEXIST);
1258 fte->action |= flow_act->action;
1259 handle = add_rule_fte(fte, fg, dest, dest_num,
1260 old_action != flow_act->action);
1261 if (IS_ERR(handle)) {
1262 fte->action = old_action;
1268 unlock_ref_node(&fte->node);
1270 fs_get_obj(ft, fg->node.parent);
1271 if (fg->num_ftes >= fg->max_ftes) {
1272 handle = ERR_PTR(-ENOSPC);
1276 fte = create_fte(fg, match_value, flow_act, &prev);
1278 handle = (void *)fte;
1281 tree_init_node(&fte->node, 0, del_fte);
1282 nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
1283 handle = add_rule_fte(fte, fg, dest, dest_num, false);
1284 if (IS_ERR(handle)) {
1285 unlock_ref_node(&fte->node);
1292 tree_add_node(&fte->node, &fg->node);
1293 list_add(&fte->node.list, prev);
1295 for (i = 0; i < handle->num_rules; i++) {
1296 if (atomic_read(&handle->rule[i]->node.refcount) == 1)
1297 tree_add_node(&handle->rule[i]->node, &fte->node);
1300 unlock_ref_node(&fte->node);
1302 unlock_ref_node(&fg->node);
1306 struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle)
1308 struct mlx5_flow_rule *dst;
1311 fs_get_obj(fte, handle->rule[0]->node.parent);
1313 fs_for_each_dst(dst, fte) {
1314 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
1315 return dst->dest_attr.counter;
1321 static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
1323 if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
1329 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1330 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1333 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1335 struct mlx5_flow_table *ft)
1337 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1338 return counter_is_valid(dest->counter, action);
1340 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1343 if (!dest || ((dest->type ==
1344 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1345 (dest->ft->level <= ft->level)))
1350 static struct mlx5_flow_handle *
1351 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1352 struct mlx5_flow_spec *spec,
1353 struct mlx5_flow_act *flow_act,
1354 struct mlx5_flow_destination *dest,
1358 struct mlx5_flow_group *g;
1359 struct mlx5_flow_handle *rule;
1362 for (i = 0; i < dest_num; i++) {
1363 if (!dest_is_valid(&dest[i], flow_act->action, ft))
1364 return ERR_PTR(-EINVAL);
1367 nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
1368 fs_for_each_fg(g, ft)
1369 if (compare_match_criteria(g->mask.match_criteria_enable,
1370 spec->match_criteria_enable,
1371 g->mask.match_criteria,
1372 spec->match_criteria)) {
1373 rule = add_rule_fg(g, spec->match_value,
1374 flow_act, dest, dest_num);
1375 if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
1379 g = create_autogroup(ft, spec->match_criteria_enable,
1380 spec->match_criteria);
1386 rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num);
1388 /* Remove assumes refcount > 0 and autogroup creates a group
1389 * with a refcount = 0.
1391 unlock_ref_node(&ft->node);
1392 tree_get_node(&g->node);
1393 tree_remove_node(&g->node);
1397 unlock_ref_node(&ft->node);
1401 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1403 return ((ft->type == FS_FT_NIC_RX) &&
1404 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1407 struct mlx5_flow_handle *
1408 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1409 struct mlx5_flow_spec *spec,
1410 struct mlx5_flow_act *flow_act,
1411 struct mlx5_flow_destination *dest,
1414 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1415 struct mlx5_flow_destination gen_dest;
1416 struct mlx5_flow_table *next_ft = NULL;
1417 struct mlx5_flow_handle *handle = NULL;
1418 u32 sw_action = flow_act->action;
1419 struct fs_prio *prio;
1421 fs_get_obj(prio, ft->node.parent);
1422 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1423 if (!fwd_next_prio_supported(ft))
1424 return ERR_PTR(-EOPNOTSUPP);
1426 return ERR_PTR(-EINVAL);
1427 mutex_lock(&root->chain_lock);
1428 next_ft = find_next_chained_ft(prio);
1430 gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1431 gen_dest.ft = next_ft;
1434 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1436 mutex_unlock(&root->chain_lock);
1437 return ERR_PTR(-EOPNOTSUPP);
1441 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num);
1443 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1444 if (!IS_ERR_OR_NULL(handle) &&
1445 (list_empty(&handle->rule[0]->next_ft))) {
1446 mutex_lock(&next_ft->lock);
1447 list_add(&handle->rule[0]->next_ft,
1448 &next_ft->fwd_rules);
1449 mutex_unlock(&next_ft->lock);
1450 handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1452 mutex_unlock(&root->chain_lock);
1456 EXPORT_SYMBOL(mlx5_add_flow_rules);
1458 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
1462 for (i = handle->num_rules - 1; i >= 0; i--)
1463 tree_remove_node(&handle->rule[i]->node);
1466 EXPORT_SYMBOL(mlx5_del_flow_rules);
1468 /* Assuming prio->node.children(flow tables) is sorted by level */
1469 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
1471 struct fs_prio *prio;
1473 fs_get_obj(prio, ft->node.parent);
1475 if (!list_is_last(&ft->node.list, &prio->node.children))
1476 return list_next_entry(ft, node.list);
1477 return find_next_chained_ft(prio);
1480 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1482 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1483 struct mlx5_flow_table *new_root_ft = NULL;
1485 if (root->root_ft != ft)
1488 new_root_ft = find_next_ft(ft);
1490 int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft,
1491 root->underlay_qpn);
1494 mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
1499 root->root_ft = new_root_ft;
1503 /* Connect flow table from previous priority to
1504 * the next flow table.
1506 static int disconnect_flow_table(struct mlx5_flow_table *ft)
1508 struct mlx5_core_dev *dev = get_dev(&ft->node);
1509 struct mlx5_flow_table *next_ft;
1510 struct fs_prio *prio;
1513 err = update_root_ft_destroy(ft);
1517 fs_get_obj(prio, ft->node.parent);
1518 if (!(list_first_entry(&prio->node.children,
1519 struct mlx5_flow_table,
1523 next_ft = find_next_chained_ft(prio);
1524 err = connect_fwd_rules(dev, next_ft, ft);
1528 err = connect_prev_fts(dev, next_ft, prio);
1530 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
1535 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
1537 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1540 mutex_lock(&root->chain_lock);
1541 err = disconnect_flow_table(ft);
1543 mutex_unlock(&root->chain_lock);
1546 if (tree_remove_node(&ft->node))
1547 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
1549 mutex_unlock(&root->chain_lock);
1553 EXPORT_SYMBOL(mlx5_destroy_flow_table);
1555 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
1557 if (tree_remove_node(&fg->node))
1558 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
1562 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
1563 enum mlx5_flow_namespace_type type)
1565 struct mlx5_flow_steering *steering = dev->priv.steering;
1566 struct mlx5_flow_root_namespace *root_ns;
1568 struct fs_prio *fs_prio;
1569 struct mlx5_flow_namespace *ns;
1575 case MLX5_FLOW_NAMESPACE_BYPASS:
1576 case MLX5_FLOW_NAMESPACE_LAG:
1577 case MLX5_FLOW_NAMESPACE_OFFLOADS:
1578 case MLX5_FLOW_NAMESPACE_ETHTOOL:
1579 case MLX5_FLOW_NAMESPACE_KERNEL:
1580 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
1581 case MLX5_FLOW_NAMESPACE_ANCHOR:
1584 case MLX5_FLOW_NAMESPACE_FDB:
1585 if (steering->fdb_root_ns)
1586 return &steering->fdb_root_ns->ns;
1589 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
1590 if (steering->esw_egress_root_ns)
1591 return &steering->esw_egress_root_ns->ns;
1594 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
1595 if (steering->esw_ingress_root_ns)
1596 return &steering->esw_ingress_root_ns->ns;
1599 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
1600 if (steering->sniffer_rx_root_ns)
1601 return &steering->sniffer_rx_root_ns->ns;
1604 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
1605 if (steering->sniffer_tx_root_ns)
1606 return &steering->sniffer_tx_root_ns->ns;
1613 root_ns = steering->root_ns;
1617 fs_prio = find_prio(&root_ns->ns, prio);
1621 ns = list_first_entry(&fs_prio->node.children,
1627 EXPORT_SYMBOL(mlx5_get_flow_namespace);
1629 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
1630 unsigned int prio, int num_levels)
1632 struct fs_prio *fs_prio;
1634 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
1636 return ERR_PTR(-ENOMEM);
1638 fs_prio->node.type = FS_TYPE_PRIO;
1639 tree_init_node(&fs_prio->node, 1, NULL);
1640 tree_add_node(&fs_prio->node, &ns->node);
1641 fs_prio->num_levels = num_levels;
1642 fs_prio->prio = prio;
1643 list_add_tail(&fs_prio->node.list, &ns->node.children);
1648 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
1651 ns->node.type = FS_TYPE_NAMESPACE;
1656 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
1658 struct mlx5_flow_namespace *ns;
1660 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1662 return ERR_PTR(-ENOMEM);
1664 fs_init_namespace(ns);
1665 tree_init_node(&ns->node, 1, NULL);
1666 tree_add_node(&ns->node, &prio->node);
1667 list_add_tail(&ns->node.list, &prio->node.children);
1672 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
1673 struct init_tree_node *prio_metadata)
1675 struct fs_prio *fs_prio;
1678 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
1679 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
1680 if (IS_ERR(fs_prio))
1681 return PTR_ERR(fs_prio);
1686 #define FLOW_TABLE_BIT_SZ 1
1687 #define GET_FLOW_TABLE_CAP(dev, offset) \
1688 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
1690 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
1691 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
1695 for (i = 0; i < caps->arr_sz; i++) {
1696 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
1702 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
1703 struct init_tree_node *init_node,
1704 struct fs_node *fs_parent_node,
1705 struct init_tree_node *init_parent_node,
1708 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
1709 flow_table_properties_nic_receive.
1711 struct mlx5_flow_namespace *fs_ns;
1712 struct fs_prio *fs_prio;
1713 struct fs_node *base;
1717 if (init_node->type == FS_TYPE_PRIO) {
1718 if ((init_node->min_ft_level > max_ft_level) ||
1719 !has_required_caps(steering->dev, &init_node->caps))
1722 fs_get_obj(fs_ns, fs_parent_node);
1723 if (init_node->num_leaf_prios)
1724 return create_leaf_prios(fs_ns, prio, init_node);
1725 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
1726 if (IS_ERR(fs_prio))
1727 return PTR_ERR(fs_prio);
1728 base = &fs_prio->node;
1729 } else if (init_node->type == FS_TYPE_NAMESPACE) {
1730 fs_get_obj(fs_prio, fs_parent_node);
1731 fs_ns = fs_create_namespace(fs_prio);
1733 return PTR_ERR(fs_ns);
1734 base = &fs_ns->node;
1739 for (i = 0; i < init_node->ar_size; i++) {
1740 err = init_root_tree_recursive(steering, &init_node->children[i],
1741 base, init_node, prio);
1744 if (init_node->children[i].type == FS_TYPE_PRIO &&
1745 init_node->children[i].num_leaf_prios) {
1746 prio += init_node->children[i].num_leaf_prios;
1753 static int init_root_tree(struct mlx5_flow_steering *steering,
1754 struct init_tree_node *init_node,
1755 struct fs_node *fs_parent_node)
1758 struct mlx5_flow_namespace *fs_ns;
1761 fs_get_obj(fs_ns, fs_parent_node);
1762 for (i = 0; i < init_node->ar_size; i++) {
1763 err = init_root_tree_recursive(steering, &init_node->children[i],
1772 static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering,
1773 enum fs_flow_table_type
1776 struct mlx5_flow_root_namespace *root_ns;
1777 struct mlx5_flow_namespace *ns;
1779 /* Create the root namespace */
1780 root_ns = mlx5_vzalloc(sizeof(*root_ns));
1784 root_ns->dev = steering->dev;
1785 root_ns->table_type = table_type;
1788 fs_init_namespace(ns);
1789 mutex_init(&root_ns->chain_lock);
1790 tree_init_node(&ns->node, 1, NULL);
1791 tree_add_node(&ns->node, NULL);
1796 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
1798 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
1800 struct fs_prio *prio;
1802 fs_for_each_prio(prio, ns) {
1803 /* This updates prio start_level and num_levels */
1804 set_prio_attrs_in_prio(prio, acc_level);
1805 acc_level += prio->num_levels;
1810 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
1812 struct mlx5_flow_namespace *ns;
1813 int acc_level_ns = acc_level;
1815 prio->start_level = acc_level;
1816 fs_for_each_ns(ns, prio)
1817 /* This updates start_level and num_levels of ns's priority descendants */
1818 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
1819 if (!prio->num_levels)
1820 prio->num_levels = acc_level_ns - prio->start_level;
1821 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
1824 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
1826 struct mlx5_flow_namespace *ns = &root_ns->ns;
1827 struct fs_prio *prio;
1828 int start_level = 0;
1830 fs_for_each_prio(prio, ns) {
1831 set_prio_attrs_in_prio(prio, start_level);
1832 start_level += prio->num_levels;
1836 #define ANCHOR_PRIO 0
1837 #define ANCHOR_SIZE 1
1838 #define ANCHOR_LEVEL 0
1839 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
1841 struct mlx5_flow_namespace *ns = NULL;
1842 struct mlx5_flow_table_attr ft_attr = {};
1843 struct mlx5_flow_table *ft;
1845 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
1849 ft_attr.max_fte = ANCHOR_SIZE;
1850 ft_attr.level = ANCHOR_LEVEL;
1851 ft_attr.prio = ANCHOR_PRIO;
1853 ft = mlx5_create_flow_table(ns, &ft_attr);
1855 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
1861 static int init_root_ns(struct mlx5_flow_steering *steering)
1864 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
1865 if (!steering->root_ns)
1868 if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
1871 set_prio_attrs(steering->root_ns);
1873 if (create_anchor_flow_table(steering))
1879 mlx5_cleanup_fs(steering->dev);
1883 static void clean_tree(struct fs_node *node)
1886 struct fs_node *iter;
1887 struct fs_node *temp;
1889 list_for_each_entry_safe(iter, temp, &node->children, list)
1891 tree_remove_node(node);
1895 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
1900 clean_tree(&root_ns->ns.node);
1903 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
1905 struct mlx5_flow_steering *steering = dev->priv.steering;
1907 cleanup_root_ns(steering->root_ns);
1908 cleanup_root_ns(steering->esw_egress_root_ns);
1909 cleanup_root_ns(steering->esw_ingress_root_ns);
1910 cleanup_root_ns(steering->fdb_root_ns);
1911 cleanup_root_ns(steering->sniffer_rx_root_ns);
1912 cleanup_root_ns(steering->sniffer_tx_root_ns);
1913 mlx5_cleanup_fc_stats(dev);
1917 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
1919 struct fs_prio *prio;
1921 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
1922 if (!steering->sniffer_tx_root_ns)
1925 /* Create single prio */
1926 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
1928 cleanup_root_ns(steering->sniffer_tx_root_ns);
1929 return PTR_ERR(prio);
1934 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
1936 struct fs_prio *prio;
1938 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
1939 if (!steering->sniffer_rx_root_ns)
1942 /* Create single prio */
1943 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
1945 cleanup_root_ns(steering->sniffer_rx_root_ns);
1946 return PTR_ERR(prio);
1951 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
1953 struct fs_prio *prio;
1955 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
1956 if (!steering->fdb_root_ns)
1959 prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1);
1963 prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
1967 set_prio_attrs(steering->fdb_root_ns);
1971 cleanup_root_ns(steering->fdb_root_ns);
1972 steering->fdb_root_ns = NULL;
1973 return PTR_ERR(prio);
1976 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
1978 struct fs_prio *prio;
1980 steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
1981 if (!steering->esw_egress_root_ns)
1985 prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
1986 MLX5_TOTAL_VPORTS(steering->dev));
1987 return PTR_ERR_OR_ZERO(prio);
1990 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
1992 struct fs_prio *prio;
1994 steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
1995 if (!steering->esw_ingress_root_ns)
1999 prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
2000 MLX5_TOTAL_VPORTS(steering->dev));
2001 return PTR_ERR_OR_ZERO(prio);
2004 int mlx5_init_fs(struct mlx5_core_dev *dev)
2006 struct mlx5_flow_steering *steering;
2009 err = mlx5_init_fc_stats(dev);
2013 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2016 steering->dev = dev;
2017 dev->priv.steering = steering;
2019 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2020 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2021 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2022 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
2023 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
2024 err = init_root_ns(steering);
2029 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
2030 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2031 err = init_fdb_root_ns(steering);
2035 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
2036 err = init_egress_acl_root_ns(steering);
2040 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
2041 err = init_ingress_acl_root_ns(steering);
2047 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
2048 err = init_sniffer_rx_root_ns(steering);
2053 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
2054 err = init_sniffer_tx_root_ns(steering);
2061 mlx5_cleanup_fs(dev);
2065 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2067 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2069 root->underlay_qpn = underlay_qpn;
2072 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
2074 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2076 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2078 root->underlay_qpn = 0;
2081 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);