2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 #include <net/tc_act/tc_vlan.h>
46 #include "resources.h"
48 #include "core_acl_flex_keys.h"
49 #include "core_acl_flex_actions.h"
50 #include "spectrum_acl_flex_keys.h"
53 struct mlxsw_sp *mlxsw_sp;
54 struct mlxsw_afk *afk;
55 struct mlxsw_afa *afa;
56 const struct mlxsw_sp_acl_ops *ops;
57 struct rhashtable ruleset_ht;
58 struct list_head rules;
60 struct delayed_work dw;
61 unsigned long interval; /* ms */
62 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
63 } rule_activity_update;
64 unsigned long priv[0];
65 /* priv has to be always the last item */
68 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
73 struct mlxsw_sp_acl_ruleset_ht_key {
74 struct net_device *dev; /* dev this ruleset is bound to */
76 const struct mlxsw_sp_acl_profile_ops *ops;
79 struct mlxsw_sp_acl_ruleset {
80 struct rhash_head ht_node; /* Member of acl HT */
81 struct mlxsw_sp_acl_ruleset_ht_key ht_key;
82 struct rhashtable rule_ht;
83 unsigned int ref_count;
84 unsigned long priv[0];
85 /* priv has to be always the last item */
88 struct mlxsw_sp_acl_rule {
89 struct rhash_head ht_node; /* Member of rule HT */
90 struct list_head list;
91 unsigned long cookie; /* HT key */
92 struct mlxsw_sp_acl_ruleset *ruleset;
93 struct mlxsw_sp_acl_rule_info *rulei;
97 unsigned long priv[0];
98 /* priv has to be always the last item */
101 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
102 .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
103 .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
104 .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
105 .automatic_shrinking = true,
108 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
109 .key_len = sizeof(unsigned long),
110 .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
111 .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
112 .automatic_shrinking = true,
115 static struct mlxsw_sp_acl_ruleset *
116 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
117 const struct mlxsw_sp_acl_profile_ops *ops)
119 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
120 struct mlxsw_sp_acl_ruleset *ruleset;
124 alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
125 ruleset = kzalloc(alloc_size, GFP_KERNEL);
127 return ERR_PTR(-ENOMEM);
128 ruleset->ref_count = 1;
129 ruleset->ht_key.ops = ops;
131 err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
133 goto err_rhashtable_init;
135 err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
137 goto err_ops_ruleset_add;
142 rhashtable_destroy(&ruleset->rule_ht);
148 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
149 struct mlxsw_sp_acl_ruleset *ruleset)
151 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
153 ops->ruleset_del(mlxsw_sp, ruleset->priv);
154 rhashtable_destroy(&ruleset->rule_ht);
158 static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
159 struct mlxsw_sp_acl_ruleset *ruleset,
160 struct net_device *dev, bool ingress)
162 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
163 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
166 ruleset->ht_key.dev = dev;
167 ruleset->ht_key.ingress = ingress;
168 err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
169 mlxsw_sp_acl_ruleset_ht_params);
172 err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
174 goto err_ops_ruleset_bind;
177 err_ops_ruleset_bind:
178 rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
179 mlxsw_sp_acl_ruleset_ht_params);
183 static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
184 struct mlxsw_sp_acl_ruleset *ruleset)
186 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
187 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
189 ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
190 rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
191 mlxsw_sp_acl_ruleset_ht_params);
194 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
196 ruleset->ref_count++;
199 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
200 struct mlxsw_sp_acl_ruleset *ruleset)
202 if (--ruleset->ref_count)
204 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
205 mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
208 struct mlxsw_sp_acl_ruleset *
209 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
210 struct net_device *dev, bool ingress,
211 enum mlxsw_sp_acl_profile profile)
213 const struct mlxsw_sp_acl_profile_ops *ops;
214 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
215 struct mlxsw_sp_acl_ruleset_ht_key ht_key;
216 struct mlxsw_sp_acl_ruleset *ruleset;
219 ops = acl->ops->profile_ops(mlxsw_sp, profile);
221 return ERR_PTR(-EINVAL);
223 memset(&ht_key, 0, sizeof(ht_key));
225 ht_key.ingress = ingress;
227 ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
228 mlxsw_sp_acl_ruleset_ht_params);
230 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
233 ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
236 err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress);
238 goto err_ruleset_bind;
242 mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
246 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
247 struct mlxsw_sp_acl_ruleset *ruleset)
249 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
253 mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
254 struct mlxsw_sp_acl_rule_info *rulei)
258 err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
261 rulei->counter_valid = true;
266 mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
267 struct mlxsw_sp_acl_rule_info *rulei)
269 rulei->counter_valid = false;
270 mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
273 struct mlxsw_sp_acl_rule_info *
274 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
276 struct mlxsw_sp_acl_rule_info *rulei;
279 rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
282 rulei->act_block = mlxsw_afa_block_create(acl->afa);
283 if (IS_ERR(rulei->act_block)) {
284 err = PTR_ERR(rulei->act_block);
285 goto err_afa_block_create;
289 err_afa_block_create:
294 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
296 mlxsw_afa_block_destroy(rulei->act_block);
300 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
302 return mlxsw_afa_block_commit(rulei->act_block);
305 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
306 unsigned int priority)
308 rulei->priority = priority;
311 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
312 enum mlxsw_afk_element element,
313 u32 key_value, u32 mask_value)
315 mlxsw_afk_values_add_u32(&rulei->values, element,
316 key_value, mask_value);
319 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
320 enum mlxsw_afk_element element,
321 const char *key_value,
322 const char *mask_value, unsigned int len)
324 mlxsw_afk_values_add_buf(&rulei->values, element,
325 key_value, mask_value, len);
328 void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
330 mlxsw_afa_block_continue(rulei->act_block);
333 void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
336 mlxsw_afa_block_jump(rulei->act_block, group_id);
339 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
341 return mlxsw_afa_block_append_drop(rulei->act_block);
344 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
345 struct mlxsw_sp_acl_rule_info *rulei,
346 struct net_device *out_dev)
348 struct mlxsw_sp_port *mlxsw_sp_port;
353 if (!mlxsw_sp_port_dev_check(out_dev))
355 mlxsw_sp_port = netdev_priv(out_dev);
356 if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
358 local_port = mlxsw_sp_port->local_port;
361 /* If out_dev is NULL, the called wants to
362 * set forward to ingress port.
367 return mlxsw_afa_block_append_fwd(rulei->act_block,
368 local_port, in_port);
371 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
372 struct mlxsw_sp_acl_rule_info *rulei,
373 u32 action, u16 vid, u16 proto, u8 prio)
377 if (action == TCA_VLAN_ACT_MODIFY) {
386 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
391 return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
392 vid, prio, ethertype);
394 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
399 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
400 struct mlxsw_sp_acl_rule_info *rulei)
402 return mlxsw_afa_block_append_counter(rulei->act_block,
403 rulei->counter_index);
406 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
407 struct mlxsw_sp_acl_rule_info *rulei,
410 return mlxsw_afa_block_append_fid_set(rulei->act_block, fid);
413 struct mlxsw_sp_acl_rule *
414 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
415 struct mlxsw_sp_acl_ruleset *ruleset,
416 unsigned long cookie)
418 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
419 struct mlxsw_sp_acl_rule *rule;
422 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
423 rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
428 rule->cookie = cookie;
429 rule->ruleset = ruleset;
431 rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
432 if (IS_ERR(rule->rulei)) {
433 err = PTR_ERR(rule->rulei);
434 goto err_rulei_create;
437 err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
439 goto err_counter_alloc;
443 mlxsw_sp_acl_rulei_destroy(rule->rulei);
447 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
451 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
452 struct mlxsw_sp_acl_rule *rule)
454 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
456 mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
457 mlxsw_sp_acl_rulei_destroy(rule->rulei);
459 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
462 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
463 struct mlxsw_sp_acl_rule *rule)
465 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
466 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
469 err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
473 err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
474 mlxsw_sp_acl_rule_ht_params);
476 goto err_rhashtable_insert;
478 list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
481 err_rhashtable_insert:
482 ops->rule_del(mlxsw_sp, rule->priv);
486 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
487 struct mlxsw_sp_acl_rule *rule)
489 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
490 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
492 list_del(&rule->list);
493 rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
494 mlxsw_sp_acl_rule_ht_params);
495 ops->rule_del(mlxsw_sp, rule->priv);
498 struct mlxsw_sp_acl_rule *
499 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
500 struct mlxsw_sp_acl_ruleset *ruleset,
501 unsigned long cookie)
503 return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
504 mlxsw_sp_acl_rule_ht_params);
507 struct mlxsw_sp_acl_rule_info *
508 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
513 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
514 struct mlxsw_sp_acl_rule *rule)
516 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
517 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
521 err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
525 rule->last_used = jiffies;
529 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
531 struct mlxsw_sp_acl_rule *rule;
534 /* Protect internal structures from changes */
536 list_for_each_entry(rule, &acl->rules, list) {
537 err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
540 goto err_rule_update;
550 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
552 unsigned long interval = acl->rule_activity_update.interval;
554 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
555 msecs_to_jiffies(interval));
558 static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
560 struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
561 rule_activity_update.dw.work);
564 err = mlxsw_sp_acl_rules_activity_update(acl);
566 dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
568 mlxsw_sp_acl_rule_activity_work_schedule(acl);
571 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
572 struct mlxsw_sp_acl_rule *rule,
573 u64 *packets, u64 *bytes, u64 *last_use)
576 struct mlxsw_sp_acl_rule_info *rulei;
581 rulei = mlxsw_sp_acl_rule_rulei(rule);
582 err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
583 ¤t_packets, ¤t_bytes);
587 *packets = current_packets - rule->last_packets;
588 *bytes = current_bytes - rule->last_bytes;
589 *last_use = rule->last_used;
591 rule->last_bytes = current_bytes;
592 rule->last_packets = current_packets;
597 #define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
599 static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
600 char *enc_actions, bool is_first)
602 struct mlxsw_sp *mlxsw_sp = priv;
603 char pefa_pl[MLXSW_REG_PEFA_LEN];
607 /* The first action set of a TCAM entry is stored directly in TCAM,
608 * not KVD linear area.
613 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE,
617 mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
618 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
621 *p_kvdl_index = kvdl_index;
625 mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
629 static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
632 struct mlxsw_sp *mlxsw_sp = priv;
636 mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
639 static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
642 struct mlxsw_sp *mlxsw_sp = priv;
643 char ppbs_pl[MLXSW_REG_PPBS_LEN];
647 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
650 mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
651 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
654 *p_kvdl_index = kvdl_index;
658 mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
662 static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
664 struct mlxsw_sp *mlxsw_sp = priv;
666 mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
669 static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
670 .kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
671 .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
672 .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
673 .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
676 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
678 const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
679 struct mlxsw_sp_acl *acl;
682 acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
686 acl->mlxsw_sp = mlxsw_sp;
687 acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
690 MLXSW_SP_AFK_BLOCKS_COUNT);
696 acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
697 ACL_ACTIONS_PER_SET),
698 &mlxsw_sp_act_afa_ops, mlxsw_sp);
699 if (IS_ERR(acl->afa)) {
700 err = PTR_ERR(acl->afa);
704 err = rhashtable_init(&acl->ruleset_ht,
705 &mlxsw_sp_acl_ruleset_ht_params);
707 goto err_rhashtable_init;
709 INIT_LIST_HEAD(&acl->rules);
710 err = acl_ops->init(mlxsw_sp, acl->priv);
712 goto err_acl_ops_init;
716 /* Create the delayed work for the rule activity_update */
717 INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
718 mlxsw_sp_acl_rul_activity_update_work);
719 acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
720 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
724 rhashtable_destroy(&acl->ruleset_ht);
726 mlxsw_afa_destroy(acl->afa);
728 mlxsw_afk_destroy(acl->afk);
734 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
736 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
737 const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
739 cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
740 acl_ops->fini(mlxsw_sp, acl->priv);
741 WARN_ON(!list_empty(&acl->rules));
742 rhashtable_destroy(&acl->ruleset_ht);
743 mlxsw_afa_destroy(acl->afa);
744 mlxsw_afk_destroy(acl->afk);