2 * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/slab.h>
38 #include <linux/errno.h>
39 #include <linux/rhashtable.h>
40 #include <linux/list.h>
43 #include "core_acl_flex_actions.h"
45 enum mlxsw_afa_set_type {
46 MLXSW_AFA_SET_TYPE_NEXT,
47 MLXSW_AFA_SET_TYPE_GOTO,
51 * Type of the record at the end of the action set.
53 MLXSW_ITEM32(afa, set, type, 0xA0, 28, 4);
55 /* afa_set_next_action_set_ptr
56 * A pointer to the next action set in the KVD Centralized database.
58 MLXSW_ITEM32(afa, set, next_action_set_ptr, 0xA4, 0, 24);
61 * group - When set, the binding is of an ACL group. When cleared,
62 * the binding is of an ACL.
63 * Must be set to 1 for Spectrum.
65 MLXSW_ITEM32(afa, set, goto_g, 0xA4, 29, 1);
67 enum mlxsw_afa_set_goto_binding_cmd {
68 /* continue go the next binding point */
69 MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE,
70 /* jump to the next binding point no return */
71 MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP,
72 /* terminate the acl binding */
73 MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM = 4,
76 /* afa_set_goto_binding_cmd */
77 MLXSW_ITEM32(afa, set, goto_binding_cmd, 0xA4, 24, 3);
79 /* afa_set_goto_next_binding
80 * ACL/ACL group identifier. If the g bit is set, this field should hold
81 * the acl_group_id, else it should hold the acl_id.
83 MLXSW_ITEM32(afa, set, goto_next_binding, 0xA4, 0, 16);
85 /* afa_all_action_type
88 MLXSW_ITEM32(afa, all, action_type, 0x00, 24, 6);
91 unsigned int max_acts_per_set;
92 const struct mlxsw_afa_ops *ops;
94 struct rhashtable set_ht;
95 struct rhashtable fwd_entry_ht;
98 #define MLXSW_AFA_SET_LEN 0xA8
100 struct mlxsw_afa_set_ht_key {
101 char enc_actions[MLXSW_AFA_SET_LEN]; /* Encoded set */
105 /* Set structure holds one action set record. It contains up to three
106 * actions (depends on size of particular actions). The set is either
107 * put directly to a rule, or it is stored in KVD linear area.
108 * To prevent duplicate entries in KVD linear area, a hashtable is
109 * used to track sets that were previously inserted and may be shared.
112 struct mlxsw_afa_set {
113 struct rhash_head ht_node;
114 struct mlxsw_afa_set_ht_key ht_key;
116 bool shared; /* Inserted in hashtable (doesn't mean that
117 * kvdl_index is valid).
119 unsigned int ref_count;
120 struct mlxsw_afa_set *next; /* Pointer to the next set. */
121 struct mlxsw_afa_set *prev; /* Pointer to the previous set,
122 * note that set may have multiple
123 * sets from multiple blocks
124 * pointing at it. This is only
125 * usable until commit.
129 static const struct rhashtable_params mlxsw_afa_set_ht_params = {
130 .key_len = sizeof(struct mlxsw_afa_set_ht_key),
131 .key_offset = offsetof(struct mlxsw_afa_set, ht_key),
132 .head_offset = offsetof(struct mlxsw_afa_set, ht_node),
133 .automatic_shrinking = true,
136 struct mlxsw_afa_fwd_entry_ht_key {
140 struct mlxsw_afa_fwd_entry {
141 struct rhash_head ht_node;
142 struct mlxsw_afa_fwd_entry_ht_key ht_key;
144 unsigned int ref_count;
147 static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
148 .key_len = sizeof(struct mlxsw_afa_fwd_entry_ht_key),
149 .key_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_key),
150 .head_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_node),
151 .automatic_shrinking = true,
154 struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
155 const struct mlxsw_afa_ops *ops,
158 struct mlxsw_afa *mlxsw_afa;
161 mlxsw_afa = kzalloc(sizeof(*mlxsw_afa), GFP_KERNEL);
163 return ERR_PTR(-ENOMEM);
164 err = rhashtable_init(&mlxsw_afa->set_ht, &mlxsw_afa_set_ht_params);
166 goto err_set_rhashtable_init;
167 err = rhashtable_init(&mlxsw_afa->fwd_entry_ht,
168 &mlxsw_afa_fwd_entry_ht_params);
170 goto err_fwd_entry_rhashtable_init;
171 mlxsw_afa->max_acts_per_set = max_acts_per_set;
172 mlxsw_afa->ops = ops;
173 mlxsw_afa->ops_priv = ops_priv;
176 err_fwd_entry_rhashtable_init:
177 rhashtable_destroy(&mlxsw_afa->set_ht);
178 err_set_rhashtable_init:
182 EXPORT_SYMBOL(mlxsw_afa_create);
184 void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa)
186 rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
187 rhashtable_destroy(&mlxsw_afa->set_ht);
190 EXPORT_SYMBOL(mlxsw_afa_destroy);
192 static void mlxsw_afa_set_goto_set(struct mlxsw_afa_set *set,
193 enum mlxsw_afa_set_goto_binding_cmd cmd,
196 char *actions = set->ht_key.enc_actions;
198 mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_GOTO);
199 mlxsw_afa_set_goto_g_set(actions, true);
200 mlxsw_afa_set_goto_binding_cmd_set(actions, cmd);
201 mlxsw_afa_set_goto_next_binding_set(actions, group_id);
204 static void mlxsw_afa_set_next_set(struct mlxsw_afa_set *set,
205 u32 next_set_kvdl_index)
207 char *actions = set->ht_key.enc_actions;
209 mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_NEXT);
210 mlxsw_afa_set_next_action_set_ptr_set(actions, next_set_kvdl_index);
213 static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
215 struct mlxsw_afa_set *set;
217 set = kzalloc(sizeof(*set), GFP_KERNEL);
220 /* Need to initialize the set to pass by default */
221 mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
222 set->ht_key.is_first = is_first;
227 static void mlxsw_afa_set_destroy(struct mlxsw_afa_set *set)
232 static int mlxsw_afa_set_share(struct mlxsw_afa *mlxsw_afa,
233 struct mlxsw_afa_set *set)
237 err = rhashtable_insert_fast(&mlxsw_afa->set_ht, &set->ht_node,
238 mlxsw_afa_set_ht_params);
241 err = mlxsw_afa->ops->kvdl_set_add(mlxsw_afa->ops_priv,
243 set->ht_key.enc_actions,
244 set->ht_key.is_first);
246 goto err_kvdl_set_add;
252 rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
253 mlxsw_afa_set_ht_params);
257 static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
258 struct mlxsw_afa_set *set)
260 mlxsw_afa->ops->kvdl_set_del(mlxsw_afa->ops_priv,
262 set->ht_key.is_first);
263 rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
264 mlxsw_afa_set_ht_params);
268 static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
269 struct mlxsw_afa_set *set)
271 if (--set->ref_count)
274 mlxsw_afa_set_unshare(mlxsw_afa, set);
275 mlxsw_afa_set_destroy(set);
278 static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
279 struct mlxsw_afa_set *orig_set)
281 struct mlxsw_afa_set *set;
284 /* There is a hashtable of sets maintained. If a set with the exact
285 * same encoding exists, we reuse it. Otherwise, the current set
286 * is shared by making it available to others using the hash table.
288 set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
289 mlxsw_afa_set_ht_params);
292 mlxsw_afa_set_put(mlxsw_afa, orig_set);
295 err = mlxsw_afa_set_share(mlxsw_afa, set);
302 /* Block structure holds a list of action sets. One action block
303 * represents one chain of actions executed upon match of a rule.
306 struct mlxsw_afa_block {
307 struct mlxsw_afa *afa;
309 struct mlxsw_afa_set *first_set;
310 struct mlxsw_afa_set *cur_set;
311 unsigned int cur_act_index; /* In current set. */
312 struct list_head fwd_entry_ref_list;
315 struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
317 struct mlxsw_afa_block *block;
319 block = kzalloc(sizeof(*block), GFP_KERNEL);
322 INIT_LIST_HEAD(&block->fwd_entry_ref_list);
323 block->afa = mlxsw_afa;
325 /* At least one action set is always present, so just create it here */
326 block->first_set = mlxsw_afa_set_create(true);
327 if (!block->first_set)
328 goto err_first_set_create;
329 block->cur_set = block->first_set;
332 err_first_set_create:
336 EXPORT_SYMBOL(mlxsw_afa_block_create);
338 static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block);
340 void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
342 struct mlxsw_afa_set *set = block->first_set;
343 struct mlxsw_afa_set *next_set;
346 next_set = set->next;
347 mlxsw_afa_set_put(block->afa, set);
350 mlxsw_afa_fwd_entry_refs_destroy(block);
353 EXPORT_SYMBOL(mlxsw_afa_block_destroy);
355 int mlxsw_afa_block_commit(struct mlxsw_afa_block *block)
357 struct mlxsw_afa_set *set = block->cur_set;
358 struct mlxsw_afa_set *prev_set;
360 block->cur_set = NULL;
361 block->finished = true;
363 /* Go over all linked sets starting from last
364 * and try to find existing set in the hash table.
365 * In case it is not there, assign a KVD linear index
369 prev_set = set->prev;
370 set = mlxsw_afa_set_get(block->afa, set);
372 /* No rollback is needed since the chain is
373 * in consistent state and mlxsw_afa_block_destroy
374 * will take care of putting it away.
378 prev_set->next = set;
379 mlxsw_afa_set_next_set(prev_set, set->kvdl_index);
384 block->first_set = set;
387 EXPORT_SYMBOL(mlxsw_afa_block_commit);
389 char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
391 return block->first_set->ht_key.enc_actions;
393 EXPORT_SYMBOL(mlxsw_afa_block_first_set);
395 u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block)
397 return block->first_set->kvdl_index;
399 EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index);
401 void mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
403 if (WARN_ON(block->finished))
405 mlxsw_afa_set_goto_set(block->cur_set,
406 MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0);
407 block->finished = true;
409 EXPORT_SYMBOL(mlxsw_afa_block_continue);
411 void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
413 if (WARN_ON(block->finished))
415 mlxsw_afa_set_goto_set(block->cur_set,
416 MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id);
417 block->finished = true;
419 EXPORT_SYMBOL(mlxsw_afa_block_jump);
421 static struct mlxsw_afa_fwd_entry *
422 mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
424 struct mlxsw_afa_fwd_entry *fwd_entry;
427 fwd_entry = kzalloc(sizeof(*fwd_entry), GFP_KERNEL);
429 return ERR_PTR(-ENOMEM);
430 fwd_entry->ht_key.local_port = local_port;
431 fwd_entry->ref_count = 1;
433 err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
435 mlxsw_afa_fwd_entry_ht_params);
437 goto err_rhashtable_insert;
439 err = mlxsw_afa->ops->kvdl_fwd_entry_add(mlxsw_afa->ops_priv,
440 &fwd_entry->kvdl_index,
443 goto err_kvdl_fwd_entry_add;
446 err_kvdl_fwd_entry_add:
447 rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
448 mlxsw_afa_fwd_entry_ht_params);
449 err_rhashtable_insert:
454 static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa,
455 struct mlxsw_afa_fwd_entry *fwd_entry)
457 mlxsw_afa->ops->kvdl_fwd_entry_del(mlxsw_afa->ops_priv,
458 fwd_entry->kvdl_index);
459 rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
460 mlxsw_afa_fwd_entry_ht_params);
464 static struct mlxsw_afa_fwd_entry *
465 mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port)
467 struct mlxsw_afa_fwd_entry_ht_key ht_key = {0};
468 struct mlxsw_afa_fwd_entry *fwd_entry;
470 ht_key.local_port = local_port;
471 fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
472 mlxsw_afa_fwd_entry_ht_params);
474 fwd_entry->ref_count++;
477 return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
480 static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
481 struct mlxsw_afa_fwd_entry *fwd_entry)
483 if (--fwd_entry->ref_count)
485 mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
488 struct mlxsw_afa_fwd_entry_ref {
489 struct list_head list;
490 struct mlxsw_afa_fwd_entry *fwd_entry;
493 static struct mlxsw_afa_fwd_entry_ref *
494 mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
496 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
497 struct mlxsw_afa_fwd_entry *fwd_entry;
500 fwd_entry_ref = kzalloc(sizeof(*fwd_entry_ref), GFP_KERNEL);
502 return ERR_PTR(-ENOMEM);
503 fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port);
504 if (IS_ERR(fwd_entry)) {
505 err = PTR_ERR(fwd_entry);
506 goto err_fwd_entry_get;
508 fwd_entry_ref->fwd_entry = fwd_entry;
509 list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list);
510 return fwd_entry_ref;
513 kfree(fwd_entry_ref);
518 mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
519 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
521 list_del(&fwd_entry_ref->list);
522 mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
523 kfree(fwd_entry_ref);
526 static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block)
528 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
529 struct mlxsw_afa_fwd_entry_ref *tmp;
531 list_for_each_entry_safe(fwd_entry_ref, tmp,
532 &block->fwd_entry_ref_list, list)
533 mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
536 #define MLXSW_AFA_ONE_ACTION_LEN 32
537 #define MLXSW_AFA_PAYLOAD_OFFSET 4
539 static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
540 u8 action_code, u8 action_size)
545 if (WARN_ON(block->finished))
547 if (block->cur_act_index + action_size >
548 block->afa->max_acts_per_set) {
549 struct mlxsw_afa_set *set;
551 /* The appended action won't fit into the current action set,
552 * so create a new set.
554 set = mlxsw_afa_set_create(false);
557 set->prev = block->cur_set;
558 block->cur_act_index = 0;
559 block->cur_set->next = set;
560 block->cur_set = set;
563 actions = block->cur_set->ht_key.enc_actions;
564 oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN;
565 block->cur_act_index += action_size;
566 mlxsw_afa_all_action_type_set(oneact, action_code);
567 return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
570 /* Trap / Discard Action
571 * ---------------------
572 * The Trap / Discard action enables trapping / mirroring packets to the CPU
573 * as well as discarding packets.
574 * The ACL Trap / Discard separates the forward/discard control from CPU
575 * trap control. In addition, the Trap / Discard action enables activating
576 * SPAN (port mirroring).
579 #define MLXSW_AFA_TRAPDISC_CODE 0x03
580 #define MLXSW_AFA_TRAPDISC_SIZE 1
582 enum mlxsw_afa_trapdisc_forward_action {
583 MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3,
586 /* afa_trapdisc_forward_action
589 MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
592 mlxsw_afa_trapdisc_pack(char *payload,
593 enum mlxsw_afa_trapdisc_forward_action forward_action)
595 mlxsw_afa_trapdisc_forward_action_set(payload, forward_action);
598 int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
600 char *act = mlxsw_afa_block_append_action(block,
601 MLXSW_AFA_TRAPDISC_CODE,
602 MLXSW_AFA_TRAPDISC_SIZE);
606 mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD);
609 EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
613 * Forwarding Action can be used to implement Policy Based Switching (PBS)
614 * as well as OpenFlow related "Output" action.
617 #define MLXSW_AFA_FORWARD_CODE 0x07
618 #define MLXSW_AFA_FORWARD_SIZE 1
620 enum mlxsw_afa_forward_type {
621 /* PBS, Policy Based Switching */
622 MLXSW_AFA_FORWARD_TYPE_PBS,
623 /* Output, OpenFlow output type */
624 MLXSW_AFA_FORWARD_TYPE_OUTPUT,
627 /* afa_forward_type */
628 MLXSW_ITEM32(afa, forward, type, 0x00, 24, 2);
630 /* afa_forward_pbs_ptr
631 * A pointer to the PBS entry configured by PPBS register.
632 * Reserved when in_port is set.
634 MLXSW_ITEM32(afa, forward, pbs_ptr, 0x08, 0, 24);
636 /* afa_forward_in_port
637 * Packet is forwarded back to the ingress port.
639 MLXSW_ITEM32(afa, forward, in_port, 0x0C, 0, 1);
642 mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
643 u32 pbs_ptr, bool in_port)
645 mlxsw_afa_forward_type_set(payload, type);
646 mlxsw_afa_forward_pbs_ptr_set(payload, pbs_ptr);
647 mlxsw_afa_forward_in_port_set(payload, in_port);
650 int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
651 u8 local_port, bool in_port)
653 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
660 fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port);
661 if (IS_ERR(fwd_entry_ref))
662 return PTR_ERR(fwd_entry_ref);
663 kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index;
665 act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
666 MLXSW_AFA_FORWARD_SIZE);
669 goto err_append_action;
671 mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
672 kvdl_index, in_port);
676 mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
679 EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);