]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
[karo-tx-linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_acl.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 #include <net/tc_act/tc_vlan.h>
43
44 #include "reg.h"
45 #include "core.h"
46 #include "resources.h"
47 #include "spectrum.h"
48 #include "core_acl_flex_keys.h"
49 #include "core_acl_flex_actions.h"
50 #include "spectrum_acl_flex_keys.h"
51
52 struct mlxsw_sp_acl {
53         struct mlxsw_sp *mlxsw_sp;
54         struct mlxsw_afk *afk;
55         struct mlxsw_afa *afa;
56         const struct mlxsw_sp_acl_ops *ops;
57         struct rhashtable ruleset_ht;
58         struct list_head rules;
59         struct {
60                 struct delayed_work dw;
61                 unsigned long interval; /* ms */
62 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
63         } rule_activity_update;
64         unsigned long priv[0];
65         /* priv has to be always the last item */
66 };
67
68 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
69 {
70         return acl->afk;
71 }
72
73 struct mlxsw_sp_acl_ruleset_ht_key {
74         struct net_device *dev; /* dev this ruleset is bound to */
75         bool ingress;
76         const struct mlxsw_sp_acl_profile_ops *ops;
77 };
78
79 struct mlxsw_sp_acl_ruleset {
80         struct rhash_head ht_node; /* Member of acl HT */
81         struct mlxsw_sp_acl_ruleset_ht_key ht_key;
82         struct rhashtable rule_ht;
83         unsigned int ref_count;
84         unsigned long priv[0];
85         /* priv has to be always the last item */
86 };
87
88 struct mlxsw_sp_acl_rule {
89         struct rhash_head ht_node; /* Member of rule HT */
90         struct list_head list;
91         unsigned long cookie; /* HT key */
92         struct mlxsw_sp_acl_ruleset *ruleset;
93         struct mlxsw_sp_acl_rule_info *rulei;
94         u64 last_used;
95         u64 last_packets;
96         u64 last_bytes;
97         unsigned long priv[0];
98         /* priv has to be always the last item */
99 };
100
101 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
102         .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
103         .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
104         .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
105         .automatic_shrinking = true,
106 };
107
108 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
109         .key_len = sizeof(unsigned long),
110         .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
111         .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
112         .automatic_shrinking = true,
113 };
114
115 static struct mlxsw_sp_acl_ruleset *
116 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
117                             const struct mlxsw_sp_acl_profile_ops *ops)
118 {
119         struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
120         struct mlxsw_sp_acl_ruleset *ruleset;
121         size_t alloc_size;
122         int err;
123
124         alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
125         ruleset = kzalloc(alloc_size, GFP_KERNEL);
126         if (!ruleset)
127                 return ERR_PTR(-ENOMEM);
128         ruleset->ref_count = 1;
129         ruleset->ht_key.ops = ops;
130
131         err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
132         if (err)
133                 goto err_rhashtable_init;
134
135         err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
136         if (err)
137                 goto err_ops_ruleset_add;
138
139         return ruleset;
140
141 err_ops_ruleset_add:
142         rhashtable_destroy(&ruleset->rule_ht);
143 err_rhashtable_init:
144         kfree(ruleset);
145         return ERR_PTR(err);
146 }
147
148 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
149                                          struct mlxsw_sp_acl_ruleset *ruleset)
150 {
151         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
152
153         ops->ruleset_del(mlxsw_sp, ruleset->priv);
154         rhashtable_destroy(&ruleset->rule_ht);
155         kfree(ruleset);
156 }
157
158 static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
159                                      struct mlxsw_sp_acl_ruleset *ruleset,
160                                      struct net_device *dev, bool ingress)
161 {
162         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
163         struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
164         int err;
165
166         ruleset->ht_key.dev = dev;
167         ruleset->ht_key.ingress = ingress;
168         err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
169                                      mlxsw_sp_acl_ruleset_ht_params);
170         if (err)
171                 return err;
172         err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
173         if (err)
174                 goto err_ops_ruleset_bind;
175         return 0;
176
177 err_ops_ruleset_bind:
178         rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
179                                mlxsw_sp_acl_ruleset_ht_params);
180         return err;
181 }
182
183 static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
184                                         struct mlxsw_sp_acl_ruleset *ruleset)
185 {
186         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
187         struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
188
189         ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
190         rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
191                                mlxsw_sp_acl_ruleset_ht_params);
192 }
193
194 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
195 {
196         ruleset->ref_count++;
197 }
198
199 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
200                                          struct mlxsw_sp_acl_ruleset *ruleset)
201 {
202         if (--ruleset->ref_count)
203                 return;
204         mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
205         mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
206 }
207
208 struct mlxsw_sp_acl_ruleset *
209 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
210                          struct net_device *dev, bool ingress,
211                          enum mlxsw_sp_acl_profile profile)
212 {
213         const struct mlxsw_sp_acl_profile_ops *ops;
214         struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
215         struct mlxsw_sp_acl_ruleset_ht_key ht_key;
216         struct mlxsw_sp_acl_ruleset *ruleset;
217         int err;
218
219         ops = acl->ops->profile_ops(mlxsw_sp, profile);
220         if (!ops)
221                 return ERR_PTR(-EINVAL);
222
223         memset(&ht_key, 0, sizeof(ht_key));
224         ht_key.dev = dev;
225         ht_key.ingress = ingress;
226         ht_key.ops = ops;
227         ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
228                                          mlxsw_sp_acl_ruleset_ht_params);
229         if (ruleset) {
230                 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
231                 return ruleset;
232         }
233         ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
234         if (IS_ERR(ruleset))
235                 return ruleset;
236         err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress);
237         if (err)
238                 goto err_ruleset_bind;
239         return ruleset;
240
241 err_ruleset_bind:
242         mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
243         return ERR_PTR(err);
244 }
245
246 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
247                               struct mlxsw_sp_acl_ruleset *ruleset)
248 {
249         mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
250 }
251
252 static int
253 mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
254                                  struct mlxsw_sp_acl_rule_info *rulei)
255 {
256         int err;
257
258         err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
259         if (err)
260                 return err;
261         rulei->counter_valid = true;
262         return 0;
263 }
264
265 static void
266 mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
267                                 struct mlxsw_sp_acl_rule_info *rulei)
268 {
269         rulei->counter_valid = false;
270         mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
271 }
272
273 struct mlxsw_sp_acl_rule_info *
274 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
275 {
276         struct mlxsw_sp_acl_rule_info *rulei;
277         int err;
278
279         rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
280         if (!rulei)
281                 return NULL;
282         rulei->act_block = mlxsw_afa_block_create(acl->afa);
283         if (IS_ERR(rulei->act_block)) {
284                 err = PTR_ERR(rulei->act_block);
285                 goto err_afa_block_create;
286         }
287         return rulei;
288
289 err_afa_block_create:
290         kfree(rulei);
291         return ERR_PTR(err);
292 }
293
294 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
295 {
296         mlxsw_afa_block_destroy(rulei->act_block);
297         kfree(rulei);
298 }
299
300 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
301 {
302         return mlxsw_afa_block_commit(rulei->act_block);
303 }
304
305 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
306                                  unsigned int priority)
307 {
308         rulei->priority = priority;
309 }
310
311 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
312                                     enum mlxsw_afk_element element,
313                                     u32 key_value, u32 mask_value)
314 {
315         mlxsw_afk_values_add_u32(&rulei->values, element,
316                                  key_value, mask_value);
317 }
318
319 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
320                                     enum mlxsw_afk_element element,
321                                     const char *key_value,
322                                     const char *mask_value, unsigned int len)
323 {
324         mlxsw_afk_values_add_buf(&rulei->values, element,
325                                  key_value, mask_value, len);
326 }
327
328 void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
329 {
330         mlxsw_afa_block_continue(rulei->act_block);
331 }
332
333 void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
334                                  u16 group_id)
335 {
336         mlxsw_afa_block_jump(rulei->act_block, group_id);
337 }
338
339 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
340 {
341         return mlxsw_afa_block_append_drop(rulei->act_block);
342 }
343
344 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
345                                struct mlxsw_sp_acl_rule_info *rulei,
346                                struct net_device *out_dev)
347 {
348         struct mlxsw_sp_port *mlxsw_sp_port;
349         u8 local_port;
350         bool in_port;
351
352         if (out_dev) {
353                 if (!mlxsw_sp_port_dev_check(out_dev))
354                         return -EINVAL;
355                 mlxsw_sp_port = netdev_priv(out_dev);
356                 if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
357                         return -EINVAL;
358                 local_port = mlxsw_sp_port->local_port;
359                 in_port = false;
360         } else {
361                 /* If out_dev is NULL, the called wants to
362                  * set forward to ingress port.
363                  */
364                 local_port = 0;
365                 in_port = true;
366         }
367         return mlxsw_afa_block_append_fwd(rulei->act_block,
368                                           local_port, in_port);
369 }
370
371 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
372                                 struct mlxsw_sp_acl_rule_info *rulei,
373                                 u32 action, u16 vid, u16 proto, u8 prio)
374 {
375         u8 ethertype;
376
377         if (action == TCA_VLAN_ACT_MODIFY) {
378                 switch (proto) {
379                 case ETH_P_8021Q:
380                         ethertype = 0;
381                         break;
382                 case ETH_P_8021AD:
383                         ethertype = 1;
384                         break;
385                 default:
386                         dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
387                                 proto);
388                         return -EINVAL;
389                 }
390
391                 return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
392                                                           vid, prio, ethertype);
393         } else {
394                 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
395                 return -EINVAL;
396         }
397 }
398
399 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
400                                  struct mlxsw_sp_acl_rule_info *rulei)
401 {
402         return mlxsw_afa_block_append_counter(rulei->act_block,
403                                               rulei->counter_index);
404 }
405
406 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
407                                    struct mlxsw_sp_acl_rule_info *rulei,
408                                    u16 fid)
409 {
410         return mlxsw_afa_block_append_fid_set(rulei->act_block, fid);
411 }
412
413 struct mlxsw_sp_acl_rule *
414 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
415                          struct mlxsw_sp_acl_ruleset *ruleset,
416                          unsigned long cookie)
417 {
418         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
419         struct mlxsw_sp_acl_rule *rule;
420         int err;
421
422         mlxsw_sp_acl_ruleset_ref_inc(ruleset);
423         rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
424         if (!rule) {
425                 err = -ENOMEM;
426                 goto err_alloc;
427         }
428         rule->cookie = cookie;
429         rule->ruleset = ruleset;
430
431         rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
432         if (IS_ERR(rule->rulei)) {
433                 err = PTR_ERR(rule->rulei);
434                 goto err_rulei_create;
435         }
436
437         err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
438         if (err)
439                 goto err_counter_alloc;
440         return rule;
441
442 err_counter_alloc:
443         mlxsw_sp_acl_rulei_destroy(rule->rulei);
444 err_rulei_create:
445         kfree(rule);
446 err_alloc:
447         mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
448         return ERR_PTR(err);
449 }
450
451 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
452                                struct mlxsw_sp_acl_rule *rule)
453 {
454         struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
455
456         mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
457         mlxsw_sp_acl_rulei_destroy(rule->rulei);
458         kfree(rule);
459         mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
460 }
461
462 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
463                           struct mlxsw_sp_acl_rule *rule)
464 {
465         struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
466         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
467         int err;
468
469         err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
470         if (err)
471                 return err;
472
473         err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
474                                      mlxsw_sp_acl_rule_ht_params);
475         if (err)
476                 goto err_rhashtable_insert;
477
478         list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
479         return 0;
480
481 err_rhashtable_insert:
482         ops->rule_del(mlxsw_sp, rule->priv);
483         return err;
484 }
485
486 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
487                            struct mlxsw_sp_acl_rule *rule)
488 {
489         struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
490         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
491
492         list_del(&rule->list);
493         rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
494                                mlxsw_sp_acl_rule_ht_params);
495         ops->rule_del(mlxsw_sp, rule->priv);
496 }
497
498 struct mlxsw_sp_acl_rule *
499 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
500                          struct mlxsw_sp_acl_ruleset *ruleset,
501                          unsigned long cookie)
502 {
503         return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
504                                        mlxsw_sp_acl_rule_ht_params);
505 }
506
507 struct mlxsw_sp_acl_rule_info *
508 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
509 {
510         return rule->rulei;
511 }
512
513 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
514                                              struct mlxsw_sp_acl_rule *rule)
515 {
516         struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
517         const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
518         bool active;
519         int err;
520
521         err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
522         if (err)
523                 return err;
524         if (active)
525                 rule->last_used = jiffies;
526         return 0;
527 }
528
529 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
530 {
531         struct mlxsw_sp_acl_rule *rule;
532         int err;
533
534         /* Protect internal structures from changes */
535         rtnl_lock();
536         list_for_each_entry(rule, &acl->rules, list) {
537                 err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
538                                                         rule);
539                 if (err)
540                         goto err_rule_update;
541         }
542         rtnl_unlock();
543         return 0;
544
545 err_rule_update:
546         rtnl_unlock();
547         return err;
548 }
549
550 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
551 {
552         unsigned long interval = acl->rule_activity_update.interval;
553
554         mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
555                                msecs_to_jiffies(interval));
556 }
557
558 static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
559 {
560         struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
561                                                 rule_activity_update.dw.work);
562         int err;
563
564         err = mlxsw_sp_acl_rules_activity_update(acl);
565         if (err)
566                 dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
567
568         mlxsw_sp_acl_rule_activity_work_schedule(acl);
569 }
570
571 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
572                                 struct mlxsw_sp_acl_rule *rule,
573                                 u64 *packets, u64 *bytes, u64 *last_use)
574
575 {
576         struct mlxsw_sp_acl_rule_info *rulei;
577         u64 current_packets;
578         u64 current_bytes;
579         int err;
580
581         rulei = mlxsw_sp_acl_rule_rulei(rule);
582         err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
583                                         &current_packets, &current_bytes);
584         if (err)
585                 return err;
586
587         *packets = current_packets - rule->last_packets;
588         *bytes = current_bytes - rule->last_bytes;
589         *last_use = rule->last_used;
590
591         rule->last_bytes = current_bytes;
592         rule->last_packets = current_packets;
593
594         return 0;
595 }
596
597 #define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
598
599 static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
600                                      char *enc_actions, bool is_first)
601 {
602         struct mlxsw_sp *mlxsw_sp = priv;
603         char pefa_pl[MLXSW_REG_PEFA_LEN];
604         u32 kvdl_index;
605         int err;
606
607         /* The first action set of a TCAM entry is stored directly in TCAM,
608          * not KVD linear area.
609          */
610         if (is_first)
611                 return 0;
612
613         err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE,
614                                   &kvdl_index);
615         if (err)
616                 return err;
617         mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
618         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
619         if (err)
620                 goto err_pefa_write;
621         *p_kvdl_index = kvdl_index;
622         return 0;
623
624 err_pefa_write:
625         mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
626         return err;
627 }
628
629 static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
630                                       bool is_first)
631 {
632         struct mlxsw_sp *mlxsw_sp = priv;
633
634         if (is_first)
635                 return;
636         mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
637 }
638
639 static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
640                                            u8 local_port)
641 {
642         struct mlxsw_sp *mlxsw_sp = priv;
643         char ppbs_pl[MLXSW_REG_PPBS_LEN];
644         u32 kvdl_index;
645         int err;
646
647         err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
648         if (err)
649                 return err;
650         mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
651         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
652         if (err)
653                 goto err_ppbs_write;
654         *p_kvdl_index = kvdl_index;
655         return 0;
656
657 err_ppbs_write:
658         mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
659         return err;
660 }
661
662 static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
663 {
664         struct mlxsw_sp *mlxsw_sp = priv;
665
666         mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
667 }
668
669 static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
670         .kvdl_set_add           = mlxsw_sp_act_kvdl_set_add,
671         .kvdl_set_del           = mlxsw_sp_act_kvdl_set_del,
672         .kvdl_fwd_entry_add     = mlxsw_sp_act_kvdl_fwd_entry_add,
673         .kvdl_fwd_entry_del     = mlxsw_sp_act_kvdl_fwd_entry_del,
674 };
675
676 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
677 {
678         const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
679         struct mlxsw_sp_acl *acl;
680         int err;
681
682         acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
683         if (!acl)
684                 return -ENOMEM;
685         mlxsw_sp->acl = acl;
686         acl->mlxsw_sp = mlxsw_sp;
687         acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
688                                                        ACL_FLEX_KEYS),
689                                     mlxsw_sp_afk_blocks,
690                                     MLXSW_SP_AFK_BLOCKS_COUNT);
691         if (!acl->afk) {
692                 err = -ENOMEM;
693                 goto err_afk_create;
694         }
695
696         acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
697                                                        ACL_ACTIONS_PER_SET),
698                                     &mlxsw_sp_act_afa_ops, mlxsw_sp);
699         if (IS_ERR(acl->afa)) {
700                 err = PTR_ERR(acl->afa);
701                 goto err_afa_create;
702         }
703
704         err = rhashtable_init(&acl->ruleset_ht,
705                               &mlxsw_sp_acl_ruleset_ht_params);
706         if (err)
707                 goto err_rhashtable_init;
708
709         INIT_LIST_HEAD(&acl->rules);
710         err = acl_ops->init(mlxsw_sp, acl->priv);
711         if (err)
712                 goto err_acl_ops_init;
713
714         acl->ops = acl_ops;
715
716         /* Create the delayed work for the rule activity_update */
717         INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
718                           mlxsw_sp_acl_rul_activity_update_work);
719         acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
720         mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
721         return 0;
722
723 err_acl_ops_init:
724         rhashtable_destroy(&acl->ruleset_ht);
725 err_rhashtable_init:
726         mlxsw_afa_destroy(acl->afa);
727 err_afa_create:
728         mlxsw_afk_destroy(acl->afk);
729 err_afk_create:
730         kfree(acl);
731         return err;
732 }
733
734 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
735 {
736         struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
737         const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
738
739         cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
740         acl_ops->fini(mlxsw_sp, acl->priv);
741         WARN_ON(!list_empty(&acl->rules));
742         rhashtable_destroy(&acl->ruleset_ht);
743         mlxsw_afa_destroy(acl->afa);
744         mlxsw_afk_destroy(acl->afk);
745         kfree(acl);
746 }