2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
30 * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/kernel.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/skbuff.h>
41 #include <linux/bitmap.h>
42 #include <net/netlink.h>
43 #include <net/act_api.h>
44 #include <net/pkt_cls.h>
47 struct tc_u_knode *next;
49 struct tc_u_hnode *ht_up;
51 #ifdef CONFIG_NET_CLS_IND
55 struct tcf_result res;
56 struct tc_u_hnode *ht_down;
57 #ifdef CONFIG_CLS_U32_PERF
58 struct tc_u32_pcnt *pf;
60 #ifdef CONFIG_CLS_U32_MARK
61 struct tc_u32_mark mark;
63 struct tc_u32_sel sel;
67 struct tc_u_hnode *next;
70 struct tc_u_common *tp_c;
73 struct tc_u_knode *ht[1];
77 struct tc_u_hnode *hlist;
83 static inline unsigned int u32_hash_fold(__be32 key,
84 const struct tc_u32_sel *sel,
87 unsigned int h = ntohl(key & sel->hmask) >> fshift;
92 static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
95 struct tc_u_knode *knode;
97 } stack[TC_U32_MAXDEPTH];
99 struct tc_u_hnode *ht = tp->root;
100 unsigned int off = skb_network_offset(skb);
101 struct tc_u_knode *n;
105 #ifdef CONFIG_CLS_U32_PERF
115 struct tc_u32_key *key = n->sel.keys;
117 #ifdef CONFIG_CLS_U32_PERF
122 #ifdef CONFIG_CLS_U32_MARK
123 if ((skb->mark & n->mark.mask) != n->mark.val) {
131 for (i = n->sel.nkeys; i > 0; i--, key++) {
132 int toff = off + key->off + (off2 & key->offmask);
135 if (skb_headroom(skb) + toff > INT_MAX)
138 data = skb_header_pointer(skb, toff, 4, &hdata);
141 if ((*data ^ key->val) & key->mask) {
145 #ifdef CONFIG_CLS_U32_PERF
146 n->pf->kcnts[j] += 1;
150 if (n->ht_down == NULL) {
152 if (n->sel.flags & TC_U32_TERMINAL) {
155 #ifdef CONFIG_NET_CLS_IND
156 if (!tcf_match_indev(skb, n->ifindex)) {
161 #ifdef CONFIG_CLS_U32_PERF
164 r = tcf_exts_exec(skb, &n->exts, res);
177 if (sdepth >= TC_U32_MAXDEPTH)
179 stack[sdepth].knode = n;
180 stack[sdepth].off = off;
188 data = skb_header_pointer(skb, off + n->sel.hoff, 4,
192 sel = ht->divisor & u32_hash_fold(*data, &n->sel,
195 if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
198 if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
199 off2 = n->sel.off + 3;
200 if (n->sel.flags & TC_U32_VAROFFSET) {
203 data = skb_header_pointer(skb,
208 off2 += ntohs(n->sel.offmask & *data) >>
213 if (n->sel.flags & TC_U32_EAT) {
224 n = stack[sdepth].knode;
226 off = stack[sdepth].off;
233 net_warn_ratelimited("cls_u32: dead loop\n");
237 static struct tc_u_hnode *
238 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
240 struct tc_u_hnode *ht;
242 for (ht = tp_c->hlist; ht; ht = ht->next)
243 if (ht->handle == handle)
249 static struct tc_u_knode *
250 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
253 struct tc_u_knode *n = NULL;
255 sel = TC_U32_HASH(handle);
256 if (sel > ht->divisor)
259 for (n = ht->ht[sel]; n; n = n->next)
260 if (n->handle == handle)
267 static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
269 struct tc_u_hnode *ht;
270 struct tc_u_common *tp_c = tp->data;
272 if (TC_U32_HTID(handle) == TC_U32_ROOT)
275 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
280 if (TC_U32_KEY(handle) == 0)
281 return (unsigned long)ht;
283 return (unsigned long)u32_lookup_key(ht, handle);
286 static void u32_put(struct tcf_proto *tp, unsigned long f)
290 static u32 gen_new_htid(struct tc_u_common *tp_c)
295 if (++tp_c->hgenerator == 0x7FF)
296 tp_c->hgenerator = 1;
297 } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
299 return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
302 static int u32_init(struct tcf_proto *tp)
304 struct tc_u_hnode *root_ht;
305 struct tc_u_common *tp_c;
307 tp_c = tp->q->u32_node;
309 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
313 root_ht->divisor = 0;
315 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
316 root_ht->prio = tp->prio;
319 tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
325 tp->q->u32_node = tp_c;
329 root_ht->next = tp_c->hlist;
330 tp_c->hlist = root_ht;
331 root_ht->tp_c = tp_c;
338 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
340 tcf_unbind_filter(tp, &n->res);
341 tcf_exts_destroy(tp, &n->exts);
343 n->ht_down->refcnt--;
344 #ifdef CONFIG_CLS_U32_PERF
351 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
353 struct tc_u_knode **kp;
354 struct tc_u_hnode *ht = key->ht_up;
357 for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
363 u32_destroy_key(tp, key);
372 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
374 struct tc_u_knode *n;
377 for (h = 0; h <= ht->divisor; h++) {
378 while ((n = ht->ht[h]) != NULL) {
381 u32_destroy_key(tp, n);
386 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
388 struct tc_u_common *tp_c = tp->data;
389 struct tc_u_hnode **hn;
393 u32_clear_hnode(tp, ht);
395 for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
407 static void u32_destroy(struct tcf_proto *tp)
409 struct tc_u_common *tp_c = tp->data;
410 struct tc_u_hnode *root_ht = tp->root;
412 WARN_ON(root_ht == NULL);
414 if (root_ht && --root_ht->refcnt == 0)
415 u32_destroy_hnode(tp, root_ht);
417 if (--tp_c->refcnt == 0) {
418 struct tc_u_hnode *ht;
420 tp->q->u32_node = NULL;
422 for (ht = tp_c->hlist; ht; ht = ht->next) {
424 u32_clear_hnode(tp, ht);
427 while ((ht = tp_c->hlist) != NULL) {
428 tp_c->hlist = ht->next;
430 WARN_ON(ht->refcnt != 0);
441 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
443 struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
448 if (TC_U32_KEY(ht->handle))
449 return u32_delete_key(tp, (struct tc_u_knode *)ht);
454 if (ht->refcnt == 1) {
456 u32_destroy_hnode(tp, ht);
464 #define NR_U32_NODE (1<<12)
465 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
467 struct tc_u_knode *n;
469 unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
472 return handle | 0xFFF;
474 for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
475 set_bit(TC_U32_NODE(n->handle), bitmap);
477 i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
478 if (i >= NR_U32_NODE)
479 i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
482 return handle | (i >= NR_U32_NODE ? 0xFFF : i);
485 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
486 [TCA_U32_CLASSID] = { .type = NLA_U32 },
487 [TCA_U32_HASH] = { .type = NLA_U32 },
488 [TCA_U32_LINK] = { .type = NLA_U32 },
489 [TCA_U32_DIVISOR] = { .type = NLA_U32 },
490 [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
491 [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
492 [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
495 static int u32_set_parms(struct net *net, struct tcf_proto *tp,
496 unsigned long base, struct tc_u_hnode *ht,
497 struct tc_u_knode *n, struct nlattr **tb,
498 struct nlattr *est, bool ovr)
503 tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE);
504 err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
509 if (tb[TCA_U32_LINK]) {
510 u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
511 struct tc_u_hnode *ht_down = NULL, *ht_old;
513 if (TC_U32_KEY(handle))
517 ht_down = u32_lookup_ht(ht->tp_c, handle);
526 n->ht_down = ht_down;
532 if (tb[TCA_U32_CLASSID]) {
533 n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
534 tcf_bind_filter(tp, &n->res, base);
537 #ifdef CONFIG_NET_CLS_IND
538 if (tb[TCA_U32_INDEV]) {
540 ret = tcf_change_indev(net, tb[TCA_U32_INDEV]);
546 tcf_exts_change(tp, &n->exts, &e);
550 tcf_exts_destroy(tp, &e);
554 static int u32_change(struct net *net, struct sk_buff *in_skb,
555 struct tcf_proto *tp, unsigned long base, u32 handle,
557 unsigned long *arg, bool ovr)
559 struct tc_u_common *tp_c = tp->data;
560 struct tc_u_hnode *ht;
561 struct tc_u_knode *n;
562 struct tc_u32_sel *s;
563 struct nlattr *opt = tca[TCA_OPTIONS];
564 struct nlattr *tb[TCA_U32_MAX + 1];
569 return handle ? -EINVAL : 0;
571 err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
575 n = (struct tc_u_knode *)*arg;
577 if (TC_U32_KEY(n->handle) == 0)
580 return u32_set_parms(net, tp, base, n->ht_up, n, tb,
584 if (tb[TCA_U32_DIVISOR]) {
585 unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
587 if (--divisor > 0x100)
589 if (TC_U32_KEY(handle))
592 handle = gen_new_htid(tp->data);
596 ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
601 ht->divisor = divisor;
604 ht->next = tp_c->hlist;
606 *arg = (unsigned long)ht;
610 if (tb[TCA_U32_HASH]) {
611 htid = nla_get_u32(tb[TCA_U32_HASH]);
612 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
616 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
625 if (ht->divisor < TC_U32_HASH(htid))
629 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
631 handle = htid | TC_U32_NODE(handle);
633 handle = gen_new_kid(ht, htid);
635 if (tb[TCA_U32_SEL] == NULL)
638 s = nla_data(tb[TCA_U32_SEL]);
640 n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
644 #ifdef CONFIG_CLS_U32_PERF
645 n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
652 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
655 n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
656 tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
658 #ifdef CONFIG_CLS_U32_MARK
659 if (tb[TCA_U32_MARK]) {
660 struct tc_u32_mark *mark;
662 mark = nla_data(tb[TCA_U32_MARK]);
663 memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
668 err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
670 struct tc_u_knode **ins;
671 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
672 if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
680 *arg = (unsigned long)n;
683 #ifdef CONFIG_CLS_U32_PERF
690 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
692 struct tc_u_common *tp_c = tp->data;
693 struct tc_u_hnode *ht;
694 struct tc_u_knode *n;
700 for (ht = tp_c->hlist; ht; ht = ht->next) {
701 if (ht->prio != tp->prio)
703 if (arg->count >= arg->skip) {
704 if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
710 for (h = 0; h <= ht->divisor; h++) {
711 for (n = ht->ht[h]; n; n = n->next) {
712 if (arg->count < arg->skip) {
716 if (arg->fn(tp, (unsigned long)n, arg) < 0) {
726 static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
727 struct sk_buff *skb, struct tcmsg *t)
729 struct tc_u_knode *n = (struct tc_u_knode *)fh;
735 t->tcm_handle = n->handle;
737 nest = nla_nest_start(skb, TCA_OPTIONS);
739 goto nla_put_failure;
741 if (TC_U32_KEY(n->handle) == 0) {
742 struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
743 u32 divisor = ht->divisor + 1;
745 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
746 goto nla_put_failure;
748 if (nla_put(skb, TCA_U32_SEL,
749 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
751 goto nla_put_failure;
753 u32 htid = n->handle & 0xFFFFF000;
754 if (nla_put_u32(skb, TCA_U32_HASH, htid))
755 goto nla_put_failure;
757 if (n->res.classid &&
758 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
759 goto nla_put_failure;
761 nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle))
762 goto nla_put_failure;
764 #ifdef CONFIG_CLS_U32_MARK
765 if ((n->mark.val || n->mark.mask) &&
766 nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
767 goto nla_put_failure;
770 if (tcf_exts_dump(skb, &n->exts) < 0)
771 goto nla_put_failure;
773 #ifdef CONFIG_NET_CLS_IND
775 struct net_device *dev;
776 dev = __dev_get_by_index(net, n->ifindex);
777 if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
778 goto nla_put_failure;
781 #ifdef CONFIG_CLS_U32_PERF
782 if (nla_put(skb, TCA_U32_PCNT,
783 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
785 goto nla_put_failure;
789 nla_nest_end(skb, nest);
791 if (TC_U32_KEY(n->handle))
792 if (tcf_exts_dump_stats(skb, &n->exts) < 0)
793 goto nla_put_failure;
797 nla_nest_cancel(skb, nest);
801 static struct tcf_proto_ops cls_u32_ops __read_mostly = {
803 .classify = u32_classify,
805 .destroy = u32_destroy,
808 .change = u32_change,
809 .delete = u32_delete,
812 .owner = THIS_MODULE,
815 static int __init init_u32(void)
817 pr_info("u32 classifier\n");
818 #ifdef CONFIG_CLS_U32_PERF
819 pr_info(" Performance counters on\n");
821 #ifdef CONFIG_NET_CLS_IND
822 pr_info(" input device check on\n");
824 #ifdef CONFIG_NET_CLS_ACT
825 pr_info(" Actions configured\n");
827 return register_tcf_proto_ops(&cls_u32_ops);
830 static void __exit exit_u32(void)
832 unregister_tcf_proto_ops(&cls_u32_ops);
835 module_init(init_u32)
836 module_exit(exit_u32)
837 MODULE_LICENSE("GPL");