]> git.karo-electronics.de Git - linux-beck.git/blob - net/sched/cls_matchall.c
f935429bd5ef1fcbe6a4272876b76e2ebb574c4b
[linux-beck.git] / net / sched / cls_matchall.c
1 /*
2  * net/sched/cls_matchll.c              Match-all classifier
3  *
4  * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15
16 #include <net/sch_generic.h>
17 #include <net/pkt_cls.h>
18
19 struct cls_mall_filter {
20         struct tcf_exts exts;
21         struct tcf_result res;
22         u32 handle;
23         struct rcu_head rcu;
24         u32 flags;
25 };
26
27 struct cls_mall_head {
28         struct cls_mall_filter *filter;
29         struct rcu_head rcu;
30 };
31
32 static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
33                          struct tcf_result *res)
34 {
35         struct cls_mall_head *head = rcu_dereference_bh(tp->root);
36         struct cls_mall_filter *f = head->filter;
37
38         if (tc_skip_sw(f->flags))
39                 return -1;
40
41         return tcf_exts_exec(skb, &f->exts, res);
42 }
43
44 static int mall_init(struct tcf_proto *tp)
45 {
46         struct cls_mall_head *head;
47
48         head = kzalloc(sizeof(*head), GFP_KERNEL);
49         if (!head)
50                 return -ENOBUFS;
51
52         rcu_assign_pointer(tp->root, head);
53
54         return 0;
55 }
56
57 static void mall_destroy_filter(struct rcu_head *head)
58 {
59         struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
60
61         tcf_exts_destroy(&f->exts);
62
63         kfree(f);
64 }
65
66 static int mall_replace_hw_filter(struct tcf_proto *tp,
67                                   struct cls_mall_filter *f,
68                                   unsigned long cookie)
69 {
70         struct net_device *dev = tp->q->dev_queue->dev;
71         struct tc_to_netdev offload;
72         struct tc_cls_matchall_offload mall_offload = {0};
73
74         offload.type = TC_SETUP_MATCHALL;
75         offload.cls_mall = &mall_offload;
76         offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
77         offload.cls_mall->exts = &f->exts;
78         offload.cls_mall->cookie = cookie;
79
80         return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
81                                              &offload);
82 }
83
84 static void mall_destroy_hw_filter(struct tcf_proto *tp,
85                                    struct cls_mall_filter *f,
86                                    unsigned long cookie)
87 {
88         struct net_device *dev = tp->q->dev_queue->dev;
89         struct tc_to_netdev offload;
90         struct tc_cls_matchall_offload mall_offload = {0};
91
92         offload.type = TC_SETUP_MATCHALL;
93         offload.cls_mall = &mall_offload;
94         offload.cls_mall->command = TC_CLSMATCHALL_DESTROY;
95         offload.cls_mall->exts = NULL;
96         offload.cls_mall->cookie = cookie;
97
98         dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
99                                              &offload);
100 }
101
102 static bool mall_destroy(struct tcf_proto *tp, bool force)
103 {
104         struct cls_mall_head *head = rtnl_dereference(tp->root);
105         struct net_device *dev = tp->q->dev_queue->dev;
106         struct cls_mall_filter *f = head->filter;
107
108         if (!force && f)
109                 return false;
110
111         if (f) {
112                 if (tc_should_offload(dev, tp, f->flags))
113                         mall_destroy_hw_filter(tp, f, (unsigned long) f);
114
115                 call_rcu(&f->rcu, mall_destroy_filter);
116         }
117         kfree_rcu(head, rcu);
118         return true;
119 }
120
121 static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
122 {
123         struct cls_mall_head *head = rtnl_dereference(tp->root);
124         struct cls_mall_filter *f = head->filter;
125
126         if (f && f->handle == handle)
127                 return (unsigned long) f;
128         return 0;
129 }
130
131 static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
132         [TCA_MATCHALL_UNSPEC]           = { .type = NLA_UNSPEC },
133         [TCA_MATCHALL_CLASSID]          = { .type = NLA_U32 },
134 };
135
136 static int mall_set_parms(struct net *net, struct tcf_proto *tp,
137                           struct cls_mall_filter *f,
138                           unsigned long base, struct nlattr **tb,
139                           struct nlattr *est, bool ovr)
140 {
141         struct tcf_exts e;
142         int err;
143
144         tcf_exts_init(&e, TCA_MATCHALL_ACT, 0);
145         err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
146         if (err < 0)
147                 return err;
148
149         if (tb[TCA_MATCHALL_CLASSID]) {
150                 f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
151                 tcf_bind_filter(tp, &f->res, base);
152         }
153
154         tcf_exts_change(tp, &f->exts, &e);
155
156         return 0;
157 }
158
159 static int mall_change(struct net *net, struct sk_buff *in_skb,
160                        struct tcf_proto *tp, unsigned long base,
161                        u32 handle, struct nlattr **tca,
162                        unsigned long *arg, bool ovr)
163 {
164         struct cls_mall_head *head = rtnl_dereference(tp->root);
165         struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
166         struct net_device *dev = tp->q->dev_queue->dev;
167         struct cls_mall_filter *f;
168         struct nlattr *tb[TCA_MATCHALL_MAX + 1];
169         u32 flags = 0;
170         int err;
171
172         if (!tca[TCA_OPTIONS])
173                 return -EINVAL;
174
175         if (head->filter)
176                 return -EBUSY;
177
178         if (fold)
179                 return -EINVAL;
180
181         err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
182                                tca[TCA_OPTIONS], mall_policy);
183         if (err < 0)
184                 return err;
185
186         if (tb[TCA_MATCHALL_FLAGS]) {
187                 flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
188                 if (!tc_flags_valid(flags))
189                         return -EINVAL;
190         }
191
192         f = kzalloc(sizeof(*f), GFP_KERNEL);
193         if (!f)
194                 return -ENOBUFS;
195
196         tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
197
198         if (!handle)
199                 handle = 1;
200         f->handle = handle;
201         f->flags = flags;
202
203         err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
204         if (err)
205                 goto errout;
206
207         if (tc_should_offload(dev, tp, flags)) {
208                 err = mall_replace_hw_filter(tp, f, (unsigned long) f);
209                 if (err) {
210                         if (tc_skip_sw(flags))
211                                 goto errout;
212                         else
213                                 err = 0;
214                 }
215         }
216
217         *arg = (unsigned long) f;
218         rcu_assign_pointer(head->filter, f);
219
220         return 0;
221
222 errout:
223         kfree(f);
224         return err;
225 }
226
227 static int mall_delete(struct tcf_proto *tp, unsigned long arg)
228 {
229         struct cls_mall_head *head = rtnl_dereference(tp->root);
230         struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
231         struct net_device *dev = tp->q->dev_queue->dev;
232
233         if (tc_should_offload(dev, tp, f->flags))
234                 mall_destroy_hw_filter(tp, f, (unsigned long) f);
235
236         RCU_INIT_POINTER(head->filter, NULL);
237         tcf_unbind_filter(tp, &f->res);
238         call_rcu(&f->rcu, mall_destroy_filter);
239         return 0;
240 }
241
242 static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
243 {
244         struct cls_mall_head *head = rtnl_dereference(tp->root);
245         struct cls_mall_filter *f = head->filter;
246
247         if (arg->count < arg->skip)
248                 goto skip;
249         if (arg->fn(tp, (unsigned long) f, arg) < 0)
250                 arg->stop = 1;
251 skip:
252         arg->count++;
253 }
254
255 static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
256                      struct sk_buff *skb, struct tcmsg *t)
257 {
258         struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
259         struct nlattr *nest;
260
261         if (!f)
262                 return skb->len;
263
264         t->tcm_handle = f->handle;
265
266         nest = nla_nest_start(skb, TCA_OPTIONS);
267         if (!nest)
268                 goto nla_put_failure;
269
270         if (f->res.classid &&
271             nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
272                 goto nla_put_failure;
273
274         if (tcf_exts_dump(skb, &f->exts))
275                 goto nla_put_failure;
276
277         nla_nest_end(skb, nest);
278
279         if (tcf_exts_dump_stats(skb, &f->exts) < 0)
280                 goto nla_put_failure;
281
282         return skb->len;
283
284 nla_put_failure:
285         nla_nest_cancel(skb, nest);
286         return -1;
287 }
288
289 static struct tcf_proto_ops cls_mall_ops __read_mostly = {
290         .kind           = "matchall",
291         .classify       = mall_classify,
292         .init           = mall_init,
293         .destroy        = mall_destroy,
294         .get            = mall_get,
295         .change         = mall_change,
296         .delete         = mall_delete,
297         .walk           = mall_walk,
298         .dump           = mall_dump,
299         .owner          = THIS_MODULE,
300 };
301
302 static int __init cls_mall_init(void)
303 {
304         return register_tcf_proto_ops(&cls_mall_ops);
305 }
306
307 static void __exit cls_mall_exit(void)
308 {
309         unregister_tcf_proto_ops(&cls_mall_ops);
310 }
311
312 module_init(cls_mall_init);
313 module_exit(cls_mall_exit);
314
315 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
316 MODULE_DESCRIPTION("Match-all classifier");
317 MODULE_LICENSE("GPL v2");