]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
Merge tag 'pinctrl-v4.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[karo-tx-linux.git] / drivers / net / ethernet / mellanox / mlxsw / core_acl_flex_actions.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/slab.h>
38 #include <linux/errno.h>
39 #include <linux/rhashtable.h>
40 #include <linux/list.h>
41
42 #include "item.h"
43 #include "core_acl_flex_actions.h"
44
45 enum mlxsw_afa_set_type {
46         MLXSW_AFA_SET_TYPE_NEXT,
47         MLXSW_AFA_SET_TYPE_GOTO,
48 };
49
50 /* afa_set_type
51  * Type of the record at the end of the action set.
52  */
53 MLXSW_ITEM32(afa, set, type, 0xA0, 28, 4);
54
55 /* afa_set_next_action_set_ptr
56  * A pointer to the next action set in the KVD Centralized database.
57  */
58 MLXSW_ITEM32(afa, set, next_action_set_ptr, 0xA4, 0, 24);
59
60 /* afa_set_goto_g
61  * group - When set, the binding is of an ACL group. When cleared,
62  * the binding is of an ACL.
63  * Must be set to 1 for Spectrum.
64  */
65 MLXSW_ITEM32(afa, set, goto_g, 0xA4, 29, 1);
66
67 enum mlxsw_afa_set_goto_binding_cmd {
68         /* continue go the next binding point */
69         MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE,
70         /* jump to the next binding point no return */
71         MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP,
72         /* terminate the acl binding */
73         MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM = 4,
74 };
75
76 /* afa_set_goto_binding_cmd */
77 MLXSW_ITEM32(afa, set, goto_binding_cmd, 0xA4, 24, 3);
78
79 /* afa_set_goto_next_binding
80  * ACL/ACL group identifier. If the g bit is set, this field should hold
81  * the acl_group_id, else it should hold the acl_id.
82  */
83 MLXSW_ITEM32(afa, set, goto_next_binding, 0xA4, 0, 16);
84
85 /* afa_all_action_type
86  * Action Type.
87  */
88 MLXSW_ITEM32(afa, all, action_type, 0x00, 24, 6);
89
90 struct mlxsw_afa {
91         unsigned int max_acts_per_set;
92         const struct mlxsw_afa_ops *ops;
93         void *ops_priv;
94         struct rhashtable set_ht;
95         struct rhashtable fwd_entry_ht;
96 };
97
98 #define MLXSW_AFA_SET_LEN 0xA8
99
100 struct mlxsw_afa_set_ht_key {
101         char enc_actions[MLXSW_AFA_SET_LEN]; /* Encoded set */
102         bool is_first;
103 };
104
105 /* Set structure holds one action set record. It contains up to three
106  * actions (depends on size of particular actions). The set is either
107  * put directly to a rule, or it is stored in KVD linear area.
108  * To prevent duplicate entries in KVD linear area, a hashtable is
109  * used to track sets that were previously inserted and may be shared.
110  */
111
112 struct mlxsw_afa_set {
113         struct rhash_head ht_node;
114         struct mlxsw_afa_set_ht_key ht_key;
115         u32 kvdl_index;
116         bool shared; /* Inserted in hashtable (doesn't mean that
117                       * kvdl_index is valid).
118                       */
119         unsigned int ref_count;
120         struct mlxsw_afa_set *next; /* Pointer to the next set. */
121         struct mlxsw_afa_set *prev; /* Pointer to the previous set,
122                                      * note that set may have multiple
123                                      * sets from multiple blocks
124                                      * pointing at it. This is only
125                                      * usable until commit.
126                                      */
127 };
128
129 static const struct rhashtable_params mlxsw_afa_set_ht_params = {
130         .key_len = sizeof(struct mlxsw_afa_set_ht_key),
131         .key_offset = offsetof(struct mlxsw_afa_set, ht_key),
132         .head_offset = offsetof(struct mlxsw_afa_set, ht_node),
133         .automatic_shrinking = true,
134 };
135
136 struct mlxsw_afa_fwd_entry_ht_key {
137         u8 local_port;
138 };
139
140 struct mlxsw_afa_fwd_entry {
141         struct rhash_head ht_node;
142         struct mlxsw_afa_fwd_entry_ht_key ht_key;
143         u32 kvdl_index;
144         unsigned int ref_count;
145 };
146
147 static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
148         .key_len = sizeof(struct mlxsw_afa_fwd_entry_ht_key),
149         .key_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_key),
150         .head_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_node),
151         .automatic_shrinking = true,
152 };
153
154 struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
155                                    const struct mlxsw_afa_ops *ops,
156                                    void *ops_priv)
157 {
158         struct mlxsw_afa *mlxsw_afa;
159         int err;
160
161         mlxsw_afa = kzalloc(sizeof(*mlxsw_afa), GFP_KERNEL);
162         if (!mlxsw_afa)
163                 return ERR_PTR(-ENOMEM);
164         err = rhashtable_init(&mlxsw_afa->set_ht, &mlxsw_afa_set_ht_params);
165         if (err)
166                 goto err_set_rhashtable_init;
167         err = rhashtable_init(&mlxsw_afa->fwd_entry_ht,
168                               &mlxsw_afa_fwd_entry_ht_params);
169         if (err)
170                 goto err_fwd_entry_rhashtable_init;
171         mlxsw_afa->max_acts_per_set = max_acts_per_set;
172         mlxsw_afa->ops = ops;
173         mlxsw_afa->ops_priv = ops_priv;
174         return mlxsw_afa;
175
176 err_fwd_entry_rhashtable_init:
177         rhashtable_destroy(&mlxsw_afa->set_ht);
178 err_set_rhashtable_init:
179         kfree(mlxsw_afa);
180         return ERR_PTR(err);
181 }
182 EXPORT_SYMBOL(mlxsw_afa_create);
183
184 void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa)
185 {
186         rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
187         rhashtable_destroy(&mlxsw_afa->set_ht);
188         kfree(mlxsw_afa);
189 }
190 EXPORT_SYMBOL(mlxsw_afa_destroy);
191
192 static void mlxsw_afa_set_goto_set(struct mlxsw_afa_set *set,
193                                    enum mlxsw_afa_set_goto_binding_cmd cmd,
194                                    u16 group_id)
195 {
196         char *actions = set->ht_key.enc_actions;
197
198         mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_GOTO);
199         mlxsw_afa_set_goto_g_set(actions, true);
200         mlxsw_afa_set_goto_binding_cmd_set(actions, cmd);
201         mlxsw_afa_set_goto_next_binding_set(actions, group_id);
202 }
203
204 static void mlxsw_afa_set_next_set(struct mlxsw_afa_set *set,
205                                    u32 next_set_kvdl_index)
206 {
207         char *actions = set->ht_key.enc_actions;
208
209         mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_NEXT);
210         mlxsw_afa_set_next_action_set_ptr_set(actions, next_set_kvdl_index);
211 }
212
213 static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
214 {
215         struct mlxsw_afa_set *set;
216
217         set = kzalloc(sizeof(*set), GFP_KERNEL);
218         if (!set)
219                 return NULL;
220         /* Need to initialize the set to pass by default */
221         mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
222         set->ht_key.is_first = is_first;
223         set->ref_count = 1;
224         return set;
225 }
226
227 static void mlxsw_afa_set_destroy(struct mlxsw_afa_set *set)
228 {
229         kfree(set);
230 }
231
232 static int mlxsw_afa_set_share(struct mlxsw_afa *mlxsw_afa,
233                                struct mlxsw_afa_set *set)
234 {
235         int err;
236
237         err = rhashtable_insert_fast(&mlxsw_afa->set_ht, &set->ht_node,
238                                      mlxsw_afa_set_ht_params);
239         if (err)
240                 return err;
241         err = mlxsw_afa->ops->kvdl_set_add(mlxsw_afa->ops_priv,
242                                            &set->kvdl_index,
243                                            set->ht_key.enc_actions,
244                                            set->ht_key.is_first);
245         if (err)
246                 goto err_kvdl_set_add;
247         set->shared = true;
248         set->prev = NULL;
249         return 0;
250
251 err_kvdl_set_add:
252         rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
253                                mlxsw_afa_set_ht_params);
254         return err;
255 }
256
257 static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
258                                   struct mlxsw_afa_set *set)
259 {
260         mlxsw_afa->ops->kvdl_set_del(mlxsw_afa->ops_priv,
261                                      set->kvdl_index,
262                                      set->ht_key.is_first);
263         rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
264                                mlxsw_afa_set_ht_params);
265         set->shared = false;
266 }
267
268 static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
269                               struct mlxsw_afa_set *set)
270 {
271         if (--set->ref_count)
272                 return;
273         if (set->shared)
274                 mlxsw_afa_set_unshare(mlxsw_afa, set);
275         mlxsw_afa_set_destroy(set);
276 }
277
278 static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
279                                                struct mlxsw_afa_set *orig_set)
280 {
281         struct mlxsw_afa_set *set;
282         int err;
283
284         /* There is a hashtable of sets maintained. If a set with the exact
285          * same encoding exists, we reuse it. Otherwise, the current set
286          * is shared by making it available to others using the hash table.
287          */
288         set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
289                                      mlxsw_afa_set_ht_params);
290         if (set) {
291                 set->ref_count++;
292                 mlxsw_afa_set_put(mlxsw_afa, orig_set);
293         } else {
294                 set = orig_set;
295                 err = mlxsw_afa_set_share(mlxsw_afa, set);
296                 if (err)
297                         return ERR_PTR(err);
298         }
299         return set;
300 }
301
302 /* Block structure holds a list of action sets. One action block
303  * represents one chain of actions executed upon match of a rule.
304  */
305
306 struct mlxsw_afa_block {
307         struct mlxsw_afa *afa;
308         bool finished;
309         struct mlxsw_afa_set *first_set;
310         struct mlxsw_afa_set *cur_set;
311         unsigned int cur_act_index; /* In current set. */
312         struct list_head fwd_entry_ref_list;
313 };
314
315 struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
316 {
317         struct mlxsw_afa_block *block;
318
319         block = kzalloc(sizeof(*block), GFP_KERNEL);
320         if (!block)
321                 return NULL;
322         INIT_LIST_HEAD(&block->fwd_entry_ref_list);
323         block->afa = mlxsw_afa;
324
325         /* At least one action set is always present, so just create it here */
326         block->first_set = mlxsw_afa_set_create(true);
327         if (!block->first_set)
328                 goto err_first_set_create;
329         block->cur_set = block->first_set;
330         return block;
331
332 err_first_set_create:
333         kfree(block);
334         return NULL;
335 }
336 EXPORT_SYMBOL(mlxsw_afa_block_create);
337
338 static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block);
339
340 void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
341 {
342         struct mlxsw_afa_set *set = block->first_set;
343         struct mlxsw_afa_set *next_set;
344
345         do {
346                 next_set = set->next;
347                 mlxsw_afa_set_put(block->afa, set);
348                 set = next_set;
349         } while (set);
350         mlxsw_afa_fwd_entry_refs_destroy(block);
351         kfree(block);
352 }
353 EXPORT_SYMBOL(mlxsw_afa_block_destroy);
354
355 int mlxsw_afa_block_commit(struct mlxsw_afa_block *block)
356 {
357         struct mlxsw_afa_set *set = block->cur_set;
358         struct mlxsw_afa_set *prev_set;
359
360         block->cur_set = NULL;
361         block->finished = true;
362
363         /* Go over all linked sets starting from last
364          * and try to find existing set in the hash table.
365          * In case it is not there, assign a KVD linear index
366          * and insert it.
367          */
368         do {
369                 prev_set = set->prev;
370                 set = mlxsw_afa_set_get(block->afa, set);
371                 if (IS_ERR(set))
372                         /* No rollback is needed since the chain is
373                          * in consistent state and mlxsw_afa_block_destroy
374                          * will take care of putting it away.
375                          */
376                         return PTR_ERR(set);
377                 if (prev_set) {
378                         prev_set->next = set;
379                         mlxsw_afa_set_next_set(prev_set, set->kvdl_index);
380                         set = prev_set;
381                 }
382         } while (prev_set);
383
384         block->first_set = set;
385         return 0;
386 }
387 EXPORT_SYMBOL(mlxsw_afa_block_commit);
388
389 char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
390 {
391         return block->first_set->ht_key.enc_actions;
392 }
393 EXPORT_SYMBOL(mlxsw_afa_block_first_set);
394
395 u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block)
396 {
397         return block->first_set->kvdl_index;
398 }
399 EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index);
400
401 void mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
402 {
403         if (WARN_ON(block->finished))
404                 return;
405         mlxsw_afa_set_goto_set(block->cur_set,
406                                MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0);
407         block->finished = true;
408 }
409 EXPORT_SYMBOL(mlxsw_afa_block_continue);
410
411 void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
412 {
413         if (WARN_ON(block->finished))
414                 return;
415         mlxsw_afa_set_goto_set(block->cur_set,
416                                MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id);
417         block->finished = true;
418 }
419 EXPORT_SYMBOL(mlxsw_afa_block_jump);
420
421 static struct mlxsw_afa_fwd_entry *
422 mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
423 {
424         struct mlxsw_afa_fwd_entry *fwd_entry;
425         int err;
426
427         fwd_entry = kzalloc(sizeof(*fwd_entry), GFP_KERNEL);
428         if (!fwd_entry)
429                 return ERR_PTR(-ENOMEM);
430         fwd_entry->ht_key.local_port = local_port;
431         fwd_entry->ref_count = 1;
432
433         err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
434                                      &fwd_entry->ht_node,
435                                      mlxsw_afa_fwd_entry_ht_params);
436         if (err)
437                 goto err_rhashtable_insert;
438
439         err = mlxsw_afa->ops->kvdl_fwd_entry_add(mlxsw_afa->ops_priv,
440                                                  &fwd_entry->kvdl_index,
441                                                  local_port);
442         if (err)
443                 goto err_kvdl_fwd_entry_add;
444         return fwd_entry;
445
446 err_kvdl_fwd_entry_add:
447         rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
448                                mlxsw_afa_fwd_entry_ht_params);
449 err_rhashtable_insert:
450         kfree(fwd_entry);
451         return ERR_PTR(err);
452 }
453
454 static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa,
455                                         struct mlxsw_afa_fwd_entry *fwd_entry)
456 {
457         mlxsw_afa->ops->kvdl_fwd_entry_del(mlxsw_afa->ops_priv,
458                                            fwd_entry->kvdl_index);
459         rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
460                                mlxsw_afa_fwd_entry_ht_params);
461         kfree(fwd_entry);
462 }
463
464 static struct mlxsw_afa_fwd_entry *
465 mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port)
466 {
467         struct mlxsw_afa_fwd_entry_ht_key ht_key = {0};
468         struct mlxsw_afa_fwd_entry *fwd_entry;
469
470         ht_key.local_port = local_port;
471         fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
472                                            mlxsw_afa_fwd_entry_ht_params);
473         if (fwd_entry) {
474                 fwd_entry->ref_count++;
475                 return fwd_entry;
476         }
477         return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
478 }
479
480 static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
481                                     struct mlxsw_afa_fwd_entry *fwd_entry)
482 {
483         if (--fwd_entry->ref_count)
484                 return;
485         mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
486 }
487
488 struct mlxsw_afa_fwd_entry_ref {
489         struct list_head list;
490         struct mlxsw_afa_fwd_entry *fwd_entry;
491 };
492
493 static struct mlxsw_afa_fwd_entry_ref *
494 mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
495 {
496         struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
497         struct mlxsw_afa_fwd_entry *fwd_entry;
498         int err;
499
500         fwd_entry_ref = kzalloc(sizeof(*fwd_entry_ref), GFP_KERNEL);
501         if (!fwd_entry_ref)
502                 return ERR_PTR(-ENOMEM);
503         fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port);
504         if (IS_ERR(fwd_entry)) {
505                 err = PTR_ERR(fwd_entry);
506                 goto err_fwd_entry_get;
507         }
508         fwd_entry_ref->fwd_entry = fwd_entry;
509         list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list);
510         return fwd_entry_ref;
511
512 err_fwd_entry_get:
513         kfree(fwd_entry_ref);
514         return ERR_PTR(err);
515 }
516
517 static void
518 mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
519                                 struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
520 {
521         list_del(&fwd_entry_ref->list);
522         mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
523         kfree(fwd_entry_ref);
524 }
525
526 static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block)
527 {
528         struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
529         struct mlxsw_afa_fwd_entry_ref *tmp;
530
531         list_for_each_entry_safe(fwd_entry_ref, tmp,
532                                  &block->fwd_entry_ref_list, list)
533                 mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
534 }
535
536 #define MLXSW_AFA_ONE_ACTION_LEN 32
537 #define MLXSW_AFA_PAYLOAD_OFFSET 4
538
539 static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
540                                            u8 action_code, u8 action_size)
541 {
542         char *oneact;
543         char *actions;
544
545         if (WARN_ON(block->finished))
546                 return NULL;
547         if (block->cur_act_index + action_size >
548             block->afa->max_acts_per_set) {
549                 struct mlxsw_afa_set *set;
550
551                 /* The appended action won't fit into the current action set,
552                  * so create a new set.
553                  */
554                 set = mlxsw_afa_set_create(false);
555                 if (!set)
556                         return NULL;
557                 set->prev = block->cur_set;
558                 block->cur_act_index = 0;
559                 block->cur_set->next = set;
560                 block->cur_set = set;
561         }
562
563         actions = block->cur_set->ht_key.enc_actions;
564         oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN;
565         block->cur_act_index += action_size;
566         mlxsw_afa_all_action_type_set(oneact, action_code);
567         return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
568 }
569
570 /* VLAN Action
571  * -----------
572  * VLAN action is used for manipulating VLANs. It can be used to implement QinQ,
573  * VLAN translation, change of PCP bits of the VLAN tag, push, pop as swap VLANs
574  * and more.
575  */
576
577 #define MLXSW_AFA_VLAN_CODE 0x02
578 #define MLXSW_AFA_VLAN_SIZE 1
579
580 enum mlxsw_afa_vlan_vlan_tag_cmd {
581         MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
582         MLXSW_AFA_VLAN_VLAN_TAG_CMD_PUSH_TAG,
583         MLXSW_AFA_VLAN_VLAN_TAG_CMD_POP_TAG,
584 };
585
586 enum mlxsw_afa_vlan_cmd {
587         MLXSW_AFA_VLAN_CMD_NOP,
588         MLXSW_AFA_VLAN_CMD_SET_OUTER,
589         MLXSW_AFA_VLAN_CMD_SET_INNER,
590         MLXSW_AFA_VLAN_CMD_COPY_OUTER_TO_INNER,
591         MLXSW_AFA_VLAN_CMD_COPY_INNER_TO_OUTER,
592         MLXSW_AFA_VLAN_CMD_SWAP,
593 };
594
595 /* afa_vlan_vlan_tag_cmd
596  * Tag command: push, pop, nop VLAN header.
597  */
598 MLXSW_ITEM32(afa, vlan, vlan_tag_cmd, 0x00, 29, 3);
599
600 /* afa_vlan_vid_cmd */
601 MLXSW_ITEM32(afa, vlan, vid_cmd, 0x04, 29, 3);
602
603 /* afa_vlan_vid */
604 MLXSW_ITEM32(afa, vlan, vid, 0x04, 0, 12);
605
606 /* afa_vlan_ethertype_cmd */
607 MLXSW_ITEM32(afa, vlan, ethertype_cmd, 0x08, 29, 3);
608
609 /* afa_vlan_ethertype
610  * Index to EtherTypes in Switch VLAN EtherType Register (SVER).
611  */
612 MLXSW_ITEM32(afa, vlan, ethertype, 0x08, 24, 3);
613
614 /* afa_vlan_pcp_cmd */
615 MLXSW_ITEM32(afa, vlan, pcp_cmd, 0x08, 13, 3);
616
617 /* afa_vlan_pcp */
618 MLXSW_ITEM32(afa, vlan, pcp, 0x08, 8, 3);
619
620 static inline void
621 mlxsw_afa_vlan_pack(char *payload,
622                     enum mlxsw_afa_vlan_vlan_tag_cmd vlan_tag_cmd,
623                     enum mlxsw_afa_vlan_cmd vid_cmd, u16 vid,
624                     enum mlxsw_afa_vlan_cmd pcp_cmd, u8 pcp,
625                     enum mlxsw_afa_vlan_cmd ethertype_cmd, u8 ethertype)
626 {
627         mlxsw_afa_vlan_vlan_tag_cmd_set(payload, vlan_tag_cmd);
628         mlxsw_afa_vlan_vid_cmd_set(payload, vid_cmd);
629         mlxsw_afa_vlan_vid_set(payload, vid);
630         mlxsw_afa_vlan_pcp_cmd_set(payload, pcp_cmd);
631         mlxsw_afa_vlan_pcp_set(payload, pcp);
632         mlxsw_afa_vlan_ethertype_cmd_set(payload, ethertype_cmd);
633         mlxsw_afa_vlan_ethertype_set(payload, ethertype);
634 }
635
636 int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
637                                        u16 vid, u8 pcp, u8 et)
638 {
639         char *act = mlxsw_afa_block_append_action(block,
640                                                   MLXSW_AFA_VLAN_CODE,
641                                                   MLXSW_AFA_VLAN_SIZE);
642
643         if (!act)
644                 return -ENOBUFS;
645         mlxsw_afa_vlan_pack(act, MLXSW_AFA_VLAN_VLAN_TAG_CMD_NOP,
646                             MLXSW_AFA_VLAN_CMD_SET_OUTER, vid,
647                             MLXSW_AFA_VLAN_CMD_SET_OUTER, pcp,
648                             MLXSW_AFA_VLAN_CMD_SET_OUTER, et);
649         return 0;
650 }
651 EXPORT_SYMBOL(mlxsw_afa_block_append_vlan_modify);
652
653 /* Trap / Discard Action
654  * ---------------------
655  * The Trap / Discard action enables trapping / mirroring packets to the CPU
656  * as well as discarding packets.
657  * The ACL Trap / Discard separates the forward/discard control from CPU
658  * trap control. In addition, the Trap / Discard action enables activating
659  * SPAN (port mirroring).
660  */
661
662 #define MLXSW_AFA_TRAPDISC_CODE 0x03
663 #define MLXSW_AFA_TRAPDISC_SIZE 1
664
665 enum mlxsw_afa_trapdisc_forward_action {
666         MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3,
667 };
668
669 /* afa_trapdisc_forward_action
670  * Forward Action.
671  */
672 MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
673
674 static inline void
675 mlxsw_afa_trapdisc_pack(char *payload,
676                         enum mlxsw_afa_trapdisc_forward_action forward_action)
677 {
678         mlxsw_afa_trapdisc_forward_action_set(payload, forward_action);
679 }
680
681 int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
682 {
683         char *act = mlxsw_afa_block_append_action(block,
684                                                   MLXSW_AFA_TRAPDISC_CODE,
685                                                   MLXSW_AFA_TRAPDISC_SIZE);
686
687         if (!act)
688                 return -ENOBUFS;
689         mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD);
690         return 0;
691 }
692 EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
693
694 /* Forwarding Action
695  * -----------------
696  * Forwarding Action can be used to implement Policy Based Switching (PBS)
697  * as well as OpenFlow related "Output" action.
698  */
699
700 #define MLXSW_AFA_FORWARD_CODE 0x07
701 #define MLXSW_AFA_FORWARD_SIZE 1
702
703 enum mlxsw_afa_forward_type {
704         /* PBS, Policy Based Switching */
705         MLXSW_AFA_FORWARD_TYPE_PBS,
706         /* Output, OpenFlow output type */
707         MLXSW_AFA_FORWARD_TYPE_OUTPUT,
708 };
709
710 /* afa_forward_type */
711 MLXSW_ITEM32(afa, forward, type, 0x00, 24, 2);
712
713 /* afa_forward_pbs_ptr
714  * A pointer to the PBS entry configured by PPBS register.
715  * Reserved when in_port is set.
716  */
717 MLXSW_ITEM32(afa, forward, pbs_ptr, 0x08, 0, 24);
718
719 /* afa_forward_in_port
720  * Packet is forwarded back to the ingress port.
721  */
722 MLXSW_ITEM32(afa, forward, in_port, 0x0C, 0, 1);
723
724 static inline void
725 mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
726                        u32 pbs_ptr, bool in_port)
727 {
728         mlxsw_afa_forward_type_set(payload, type);
729         mlxsw_afa_forward_pbs_ptr_set(payload, pbs_ptr);
730         mlxsw_afa_forward_in_port_set(payload, in_port);
731 }
732
733 int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
734                                u8 local_port, bool in_port)
735 {
736         struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
737         u32 kvdl_index;
738         char *act;
739         int err;
740
741         if (in_port)
742                 return -EOPNOTSUPP;
743         fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port);
744         if (IS_ERR(fwd_entry_ref))
745                 return PTR_ERR(fwd_entry_ref);
746         kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index;
747
748         act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
749                                             MLXSW_AFA_FORWARD_SIZE);
750         if (!act) {
751                 err = -ENOBUFS;
752                 goto err_append_action;
753         }
754         mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
755                                kvdl_index, in_port);
756         return 0;
757
758 err_append_action:
759         mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
760         return err;
761 }
762 EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
763
764 /* Policing and Counting Action
765  * ----------------------------
766  * Policing and Counting action is used for binding policer and counter
767  * to ACL rules.
768  */
769
770 #define MLXSW_AFA_POLCNT_CODE 0x08
771 #define MLXSW_AFA_POLCNT_SIZE 1
772
773 enum mlxsw_afa_polcnt_counter_set_type {
774         /* No count */
775         MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
776         /* Count packets and bytes */
777         MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
778         /* Count only packets */
779         MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05,
780 };
781
782 /* afa_polcnt_counter_set_type
783  * Counter set type for flow counters.
784  */
785 MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8);
786
787 /* afa_polcnt_counter_index
788  * Counter index for flow counters.
789  */
790 MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24);
791
792 static inline void
793 mlxsw_afa_polcnt_pack(char *payload,
794                       enum mlxsw_afa_polcnt_counter_set_type set_type,
795                       u32 counter_index)
796 {
797         mlxsw_afa_polcnt_counter_set_type_set(payload, set_type);
798         mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
799 }
800
801 int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
802                                    u32 counter_index)
803 {
804         char *act = mlxsw_afa_block_append_action(block,
805                                                   MLXSW_AFA_POLCNT_CODE,
806                                                   MLXSW_AFA_POLCNT_SIZE);
807         if (!act)
808                 return -ENOBUFS;
809         mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
810                               counter_index);
811         return 0;
812 }
813 EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
814
815 /* Virtual Router and Forwarding Domain Action
816  * -------------------------------------------
817  * Virtual Switch action is used for manipulate the Virtual Router (VR),
818  * MPLS label space and the Forwarding Identifier (FID).
819  */
820
821 #define MLXSW_AFA_VIRFWD_CODE 0x0E
822 #define MLXSW_AFA_VIRFWD_SIZE 1
823
824 enum mlxsw_afa_virfwd_fid_cmd {
825         /* Do nothing */
826         MLXSW_AFA_VIRFWD_FID_CMD_NOOP,
827         /* Set the Forwarding Identifier (FID) to fid */
828         MLXSW_AFA_VIRFWD_FID_CMD_SET,
829 };
830
831 /* afa_virfwd_fid_cmd */
832 MLXSW_ITEM32(afa, virfwd, fid_cmd, 0x08, 29, 3);
833
834 /* afa_virfwd_fid
835  * The FID value.
836  */
837 MLXSW_ITEM32(afa, virfwd, fid, 0x08, 0, 16);
838
839 static inline void mlxsw_afa_virfwd_pack(char *payload,
840                                          enum mlxsw_afa_virfwd_fid_cmd fid_cmd,
841                                          u16 fid)
842 {
843         mlxsw_afa_virfwd_fid_cmd_set(payload, fid_cmd);
844         mlxsw_afa_virfwd_fid_set(payload, fid);
845 }
846
847 int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid)
848 {
849         char *act = mlxsw_afa_block_append_action(block,
850                                                   MLXSW_AFA_VIRFWD_CODE,
851                                                   MLXSW_AFA_VIRFWD_SIZE);
852         if (!act)
853                 return -ENOBUFS;
854         mlxsw_afa_virfwd_pack(act, MLXSW_AFA_VIRFWD_FID_CMD_SET, fid);
855         return 0;
856 }
857 EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set);