2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
32 #include <net/net_namespace.h>
34 #include <net/netlink.h>
35 #include <net/pkt_sched.h>
37 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
38 struct nlmsghdr *n, u32 clid,
39 struct Qdisc *old, struct Qdisc *new);
40 static int tclass_notify(struct net *net, struct sk_buff *oskb,
41 struct nlmsghdr *n, struct Qdisc *q,
42 unsigned long cl, int event);
49 This file consists of two interrelated parts:
51 1. queueing disciplines manager frontend.
52 2. traffic classes manager frontend.
54 Generally, queueing discipline ("qdisc") is a black box,
55 which is able to enqueue packets and to dequeue them (when
56 device is ready to send something) in order and at times
57 determined by algorithm hidden in it.
59 qdisc's are divided to two categories:
60 - "queues", which have no internal structure visible from outside.
61 - "schedulers", which split all the packets to "traffic classes",
62 using "packet classifiers" (look at cls_api.c)
64 In turn, classes may have child qdiscs (as rule, queues)
65 attached to them etc. etc. etc.
67 The goal of the routines in this file is to translate
68 information supplied by user in the form of handles
69 to more intelligible for kernel form, to make some sanity
70 checks and part of work, which is common to all qdiscs
71 and to provide rtnetlink notifications.
73 All real intelligent work is done inside qdisc modules.
77 Every discipline has two major routines: enqueue and dequeue.
81 dequeue usually returns a skb to send. It is allowed to return NULL,
82 but it does not mean that queue is empty, it just means that
83 discipline does not want to send anything this time.
84 Queue is really empty if q->q.qlen == 0.
85 For complicated disciplines with multiple queues q->q is not
86 real packet queue, but however q->q.qlen must be valid.
90 enqueue returns 0, if packet was enqueued successfully.
91 If packet (this one or another one) was dropped, it returns
93 NET_XMIT_DROP - this packet dropped
94 Expected action: do not backoff, but wait until queue will clear.
95 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
96 Expected action: backoff or ignore
97 NET_XMIT_POLICED - dropped by police.
98 Expected action: backoff or error to real-time apps.
104 like dequeue but without removing a packet from the queue
108 returns qdisc to initial state: purge all buffers, clear all
109 timers, counters (except for statistics) etc.
113 initializes newly created qdisc.
117 destroys resources allocated by init and during lifetime of qdisc.
121 changes qdisc parameters.
124 /* Protects list of registered TC modules. It is pure SMP lock. */
125 static DEFINE_RWLOCK(qdisc_mod_lock);
128 /************************************************
129 * Queueing disciplines manipulation. *
130 ************************************************/
133 /* The list of all installed queueing disciplines. */
135 static struct Qdisc_ops *qdisc_base;
137 /* Register/uregister queueing discipline */
139 int register_qdisc(struct Qdisc_ops *qops)
141 struct Qdisc_ops *q, **qp;
144 write_lock(&qdisc_mod_lock);
145 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
146 if (!strcmp(qops->id, q->id))
149 if (qops->enqueue == NULL)
150 qops->enqueue = noop_qdisc_ops.enqueue;
151 if (qops->peek == NULL) {
152 if (qops->dequeue == NULL) {
153 qops->peek = noop_qdisc_ops.peek;
159 if (qops->dequeue == NULL)
160 qops->dequeue = noop_qdisc_ops.dequeue;
166 write_unlock(&qdisc_mod_lock);
169 EXPORT_SYMBOL(register_qdisc);
171 int unregister_qdisc(struct Qdisc_ops *qops)
173 struct Qdisc_ops *q, **qp;
176 write_lock(&qdisc_mod_lock);
177 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
185 write_unlock(&qdisc_mod_lock);
188 EXPORT_SYMBOL(unregister_qdisc);
190 /* We know handle. Find qdisc among all qdisc's attached to device
191 (root qdisc, all its children, children of children etc.)
194 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
198 if (!(root->flags & TCQ_F_BUILTIN) &&
199 root->handle == handle)
202 list_for_each_entry(q, &root->list, list) {
203 if (q->handle == handle)
209 static void qdisc_list_add(struct Qdisc *q)
211 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
212 list_add_tail(&q->list, &qdisc_dev(q)->qdisc->list);
215 void qdisc_list_del(struct Qdisc *q)
217 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
220 EXPORT_SYMBOL(qdisc_list_del);
222 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
226 q = qdisc_match_from_root(dev->qdisc, handle);
230 q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
235 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
239 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
243 cl = cops->get(p, classid);
247 leaf = cops->leaf(p, cl);
252 /* Find queueing discipline by name */
254 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
256 struct Qdisc_ops *q = NULL;
259 read_lock(&qdisc_mod_lock);
260 for (q = qdisc_base; q; q = q->next) {
261 if (nla_strcmp(kind, q->id) == 0) {
262 if (!try_module_get(q->owner))
267 read_unlock(&qdisc_mod_lock);
272 static struct qdisc_rate_table *qdisc_rtab_list;
274 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
276 struct qdisc_rate_table *rtab;
278 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
279 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
285 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
286 nla_len(tab) != TC_RTAB_SIZE)
289 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
293 memcpy(rtab->data, nla_data(tab), 1024);
294 rtab->next = qdisc_rtab_list;
295 qdisc_rtab_list = rtab;
299 EXPORT_SYMBOL(qdisc_get_rtab);
301 void qdisc_put_rtab(struct qdisc_rate_table *tab)
303 struct qdisc_rate_table *rtab, **rtabp;
305 if (!tab || --tab->refcnt)
308 for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
316 EXPORT_SYMBOL(qdisc_put_rtab);
318 static LIST_HEAD(qdisc_stab_list);
319 static DEFINE_SPINLOCK(qdisc_stab_lock);
321 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
322 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
323 [TCA_STAB_DATA] = { .type = NLA_BINARY },
326 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
328 struct nlattr *tb[TCA_STAB_MAX + 1];
329 struct qdisc_size_table *stab;
330 struct tc_sizespec *s;
331 unsigned int tsize = 0;
335 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
338 if (!tb[TCA_STAB_BASE])
339 return ERR_PTR(-EINVAL);
341 s = nla_data(tb[TCA_STAB_BASE]);
344 if (!tb[TCA_STAB_DATA])
345 return ERR_PTR(-EINVAL);
346 tab = nla_data(tb[TCA_STAB_DATA]);
347 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
350 if (!s || tsize != s->tsize || (!tab && tsize > 0))
351 return ERR_PTR(-EINVAL);
353 spin_lock(&qdisc_stab_lock);
355 list_for_each_entry(stab, &qdisc_stab_list, list) {
356 if (memcmp(&stab->szopts, s, sizeof(*s)))
358 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
361 spin_unlock(&qdisc_stab_lock);
365 spin_unlock(&qdisc_stab_lock);
367 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
369 return ERR_PTR(-ENOMEM);
374 memcpy(stab->data, tab, tsize * sizeof(u16));
376 spin_lock(&qdisc_stab_lock);
377 list_add_tail(&stab->list, &qdisc_stab_list);
378 spin_unlock(&qdisc_stab_lock);
383 void qdisc_put_stab(struct qdisc_size_table *tab)
388 spin_lock(&qdisc_stab_lock);
390 if (--tab->refcnt == 0) {
391 list_del(&tab->list);
395 spin_unlock(&qdisc_stab_lock);
397 EXPORT_SYMBOL(qdisc_put_stab);
399 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
403 nest = nla_nest_start(skb, TCA_STAB);
405 goto nla_put_failure;
406 NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
407 nla_nest_end(skb, nest);
415 void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
419 pkt_len = skb->len + stab->szopts.overhead;
420 if (unlikely(!stab->szopts.tsize))
423 slot = pkt_len + stab->szopts.cell_align;
424 if (unlikely(slot < 0))
427 slot >>= stab->szopts.cell_log;
428 if (likely(slot < stab->szopts.tsize))
429 pkt_len = stab->data[slot];
431 pkt_len = stab->data[stab->szopts.tsize - 1] *
432 (slot / stab->szopts.tsize) +
433 stab->data[slot % stab->szopts.tsize];
435 pkt_len <<= stab->szopts.size_log;
437 if (unlikely(pkt_len < 1))
439 qdisc_skb_cb(skb)->pkt_len = pkt_len;
441 EXPORT_SYMBOL(qdisc_calculate_pkt_len);
443 void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
445 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
447 "%s: %s qdisc %X: is non-work-conserving?\n",
448 txt, qdisc->ops->id, qdisc->handle >> 16);
449 qdisc->flags |= TCQ_F_WARN_NONWC;
452 EXPORT_SYMBOL(qdisc_warn_nonwc);
454 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
456 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
459 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
460 __netif_schedule(qdisc_root(wd->qdisc));
462 return HRTIMER_NORESTART;
465 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
467 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
468 wd->timer.function = qdisc_watchdog;
471 EXPORT_SYMBOL(qdisc_watchdog_init);
473 void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
477 if (test_bit(__QDISC_STATE_DEACTIVATED,
478 &qdisc_root_sleeping(wd->qdisc)->state))
481 wd->qdisc->flags |= TCQ_F_THROTTLED;
482 time = ktime_set(0, 0);
483 time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
484 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
486 EXPORT_SYMBOL(qdisc_watchdog_schedule);
488 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
490 hrtimer_cancel(&wd->timer);
491 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
493 EXPORT_SYMBOL(qdisc_watchdog_cancel);
495 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
497 unsigned int size = n * sizeof(struct hlist_head), i;
498 struct hlist_head *h;
500 if (size <= PAGE_SIZE)
501 h = kmalloc(size, GFP_KERNEL);
503 h = (struct hlist_head *)
504 __get_free_pages(GFP_KERNEL, get_order(size));
507 for (i = 0; i < n; i++)
508 INIT_HLIST_HEAD(&h[i]);
513 static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
515 unsigned int size = n * sizeof(struct hlist_head);
517 if (size <= PAGE_SIZE)
520 free_pages((unsigned long)h, get_order(size));
523 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
525 struct Qdisc_class_common *cl;
526 struct hlist_node *n, *next;
527 struct hlist_head *nhash, *ohash;
528 unsigned int nsize, nmask, osize;
531 /* Rehash when load factor exceeds 0.75 */
532 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
534 nsize = clhash->hashsize * 2;
536 nhash = qdisc_class_hash_alloc(nsize);
540 ohash = clhash->hash;
541 osize = clhash->hashsize;
544 for (i = 0; i < osize; i++) {
545 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
546 h = qdisc_class_hash(cl->classid, nmask);
547 hlist_add_head(&cl->hnode, &nhash[h]);
550 clhash->hash = nhash;
551 clhash->hashsize = nsize;
552 clhash->hashmask = nmask;
553 sch_tree_unlock(sch);
555 qdisc_class_hash_free(ohash, osize);
557 EXPORT_SYMBOL(qdisc_class_hash_grow);
559 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
561 unsigned int size = 4;
563 clhash->hash = qdisc_class_hash_alloc(size);
564 if (clhash->hash == NULL)
566 clhash->hashsize = size;
567 clhash->hashmask = size - 1;
568 clhash->hashelems = 0;
571 EXPORT_SYMBOL(qdisc_class_hash_init);
573 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
575 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
577 EXPORT_SYMBOL(qdisc_class_hash_destroy);
579 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
580 struct Qdisc_class_common *cl)
584 INIT_HLIST_NODE(&cl->hnode);
585 h = qdisc_class_hash(cl->classid, clhash->hashmask);
586 hlist_add_head(&cl->hnode, &clhash->hash[h]);
589 EXPORT_SYMBOL(qdisc_class_hash_insert);
591 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
592 struct Qdisc_class_common *cl)
594 hlist_del(&cl->hnode);
597 EXPORT_SYMBOL(qdisc_class_hash_remove);
599 /* Allocate an unique handle from space managed by kernel */
601 static u32 qdisc_alloc_handle(struct net_device *dev)
604 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
607 autohandle += TC_H_MAKE(0x10000U, 0);
608 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
609 autohandle = TC_H_MAKE(0x80000000U, 0);
610 } while (qdisc_lookup(dev, autohandle) && --i > 0);
612 return i>0 ? autohandle : 0;
615 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
617 const struct Qdisc_class_ops *cops;
623 while ((parentid = sch->parent)) {
624 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
627 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
629 WARN_ON(parentid != TC_H_ROOT);
632 cops = sch->ops->cl_ops;
633 if (cops->qlen_notify) {
634 cl = cops->get(sch, parentid);
635 cops->qlen_notify(sch, cl);
641 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
643 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
644 struct nlmsghdr *n, u32 clid,
645 struct Qdisc *old, struct Qdisc *new)
648 qdisc_notify(net, skb, n, clid, old, new);
654 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
657 * When appropriate send a netlink notification using 'skb'
660 * On success, destroy old qdisc.
663 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
664 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
665 struct Qdisc *new, struct Qdisc *old)
667 struct Qdisc *q = old;
668 struct net *net = dev_net(dev);
671 if (parent == NULL) {
672 unsigned int i, num_q, ingress;
675 num_q = dev->num_tx_queues;
676 if ((q && q->flags & TCQ_F_INGRESS) ||
677 (new && new->flags & TCQ_F_INGRESS)) {
682 if (dev->flags & IFF_UP)
685 if (new && new->ops->attach) {
686 new->ops->attach(new);
690 for (i = 0; i < num_q; i++) {
691 struct netdev_queue *dev_queue = &dev->rx_queue;
694 dev_queue = netdev_get_tx_queue(dev, i);
696 old = dev_graft_qdisc(dev_queue, new);
698 atomic_inc(&new->refcnt);
705 notify_and_destroy(net, skb, n, classid,
707 if (new && !new->ops->attach)
708 atomic_inc(&new->refcnt);
709 dev->qdisc = new ? : &noop_qdisc;
711 notify_and_destroy(net, skb, n, classid, old, new);
714 if (dev->flags & IFF_UP)
717 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
720 if (cops && cops->graft) {
721 unsigned long cl = cops->get(parent, classid);
723 err = cops->graft(parent, cl, new, &old);
724 cops->put(parent, cl);
729 notify_and_destroy(net, skb, n, classid, old, new);
734 /* lockdep annotation is needed for ingress; egress gets it only for name */
735 static struct lock_class_key qdisc_tx_lock;
736 static struct lock_class_key qdisc_rx_lock;
739 Allocate and initialize new qdisc.
741 Parameters are passed via opt.
744 static struct Qdisc *
745 qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
746 struct Qdisc *p, u32 parent, u32 handle,
747 struct nlattr **tca, int *errp)
750 struct nlattr *kind = tca[TCA_KIND];
752 struct Qdisc_ops *ops;
753 struct qdisc_size_table *stab;
755 ops = qdisc_lookup_ops(kind);
756 #ifdef CONFIG_MODULES
757 if (ops == NULL && kind != NULL) {
759 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
760 /* We dropped the RTNL semaphore in order to
761 * perform the module load. So, even if we
762 * succeeded in loading the module we have to
763 * tell the caller to replay the request. We
764 * indicate this using -EAGAIN.
765 * We replay the request because the device may
766 * go away in the mean time.
769 request_module("sch_%s", name);
771 ops = qdisc_lookup_ops(kind);
773 /* We will try again qdisc_lookup_ops,
774 * so don't keep a reference.
776 module_put(ops->owner);
788 sch = qdisc_alloc(dev_queue, ops);
794 sch->parent = parent;
796 if (handle == TC_H_INGRESS) {
797 sch->flags |= TCQ_F_INGRESS;
798 handle = TC_H_MAKE(TC_H_INGRESS, 0);
799 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
802 handle = qdisc_alloc_handle(dev);
807 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
810 sch->handle = handle;
812 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
814 stab = qdisc_get_stab(tca[TCA_STAB]);
822 spinlock_t *root_lock;
825 if (sch->flags & TCQ_F_MQROOT)
828 if ((sch->parent != TC_H_ROOT) &&
829 !(sch->flags & TCQ_F_INGRESS) &&
830 (!p || !(p->flags & TCQ_F_MQROOT)))
831 root_lock = qdisc_root_sleeping_lock(sch);
833 root_lock = qdisc_lock(sch);
835 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
836 root_lock, tca[TCA_RATE]);
847 kfree((char *) sch - sch->padded);
849 module_put(ops->owner);
856 * Any broken qdiscs that would require a ops->reset() here?
857 * The qdisc was never in action so it shouldn't be necessary.
859 qdisc_put_stab(sch->stab);
865 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
867 struct qdisc_size_table *stab = NULL;
870 if (tca[TCA_OPTIONS]) {
871 if (sch->ops->change == NULL)
873 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
879 stab = qdisc_get_stab(tca[TCA_STAB]);
881 return PTR_ERR(stab);
884 qdisc_put_stab(sch->stab);
888 /* NB: ignores errors from replace_estimator
889 because change can't be undone. */
890 if (sch->flags & TCQ_F_MQROOT)
892 gen_replace_estimator(&sch->bstats, &sch->rate_est,
893 qdisc_root_sleeping_lock(sch),
900 struct check_loop_arg
902 struct qdisc_walker w;
907 static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
909 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
911 struct check_loop_arg arg;
913 if (q->ops->cl_ops == NULL)
916 arg.w.stop = arg.w.skip = arg.w.count = 0;
917 arg.w.fn = check_loop_fn;
920 q->ops->cl_ops->walk(q, &arg.w);
921 return arg.w.stop ? -ELOOP : 0;
925 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
928 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
929 struct check_loop_arg *arg = (struct check_loop_arg *)w;
931 leaf = cops->leaf(q, cl);
933 if (leaf == arg->p || arg->depth > 7)
935 return check_loop(leaf, arg->p, arg->depth + 1);
944 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
946 struct net *net = sock_net(skb->sk);
947 struct tcmsg *tcm = NLMSG_DATA(n);
948 struct nlattr *tca[TCA_MAX + 1];
949 struct net_device *dev;
950 u32 clid = tcm->tcm_parent;
951 struct Qdisc *q = NULL;
952 struct Qdisc *p = NULL;
955 if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
958 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
963 if (clid != TC_H_ROOT) {
964 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
965 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
967 q = qdisc_leaf(p, clid);
968 } else { /* ingress */
969 q = dev->rx_queue.qdisc_sleeping;
977 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
980 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
984 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
987 if (n->nlmsg_type == RTM_DELQDISC) {
992 if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
995 qdisc_notify(net, skb, n, clid, NULL, q);
1001 Create/change qdisc.
1004 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1006 struct net *net = sock_net(skb->sk);
1008 struct nlattr *tca[TCA_MAX + 1];
1009 struct net_device *dev;
1011 struct Qdisc *q, *p;
1015 /* Reinit, just in case something touches this. */
1016 tcm = NLMSG_DATA(n);
1017 clid = tcm->tcm_parent;
1020 if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
1023 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1028 if (clid != TC_H_ROOT) {
1029 if (clid != TC_H_INGRESS) {
1030 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
1032 q = qdisc_leaf(p, clid);
1033 } else { /*ingress */
1034 q = dev->rx_queue.qdisc_sleeping;
1040 /* It may be default qdisc, ignore it */
1041 if (q && q->handle == 0)
1044 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1045 if (tcm->tcm_handle) {
1046 if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
1048 if (TC_H_MIN(tcm->tcm_handle))
1050 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
1051 goto create_n_graft;
1052 if (n->nlmsg_flags&NLM_F_EXCL)
1054 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1057 (p && check_loop(q, p, 0)))
1059 atomic_inc(&q->refcnt);
1063 goto create_n_graft;
1065 /* This magic test requires explanation.
1067 * We know, that some child q is already
1068 * attached to this parent and have choice:
1069 * either to change it or to create/graft new one.
1071 * 1. We are allowed to create/graft only
1072 * if CREATE and REPLACE flags are set.
1074 * 2. If EXCL is set, requestor wanted to say,
1075 * that qdisc tcm_handle is not expected
1076 * to exist, so that we choose create/graft too.
1078 * 3. The last case is when no flags are set.
1079 * Alas, it is sort of hole in API, we
1080 * cannot decide what to do unambiguously.
1081 * For now we select create/graft, if
1082 * user gave KIND, which does not match existing.
1084 if ((n->nlmsg_flags&NLM_F_CREATE) &&
1085 (n->nlmsg_flags&NLM_F_REPLACE) &&
1086 ((n->nlmsg_flags&NLM_F_EXCL) ||
1088 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1089 goto create_n_graft;
1093 if (!tcm->tcm_handle)
1095 q = qdisc_lookup(dev, tcm->tcm_handle);
1098 /* Change qdisc parameters */
1101 if (n->nlmsg_flags&NLM_F_EXCL)
1103 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1105 err = qdisc_change(q, tca);
1107 qdisc_notify(net, skb, n, clid, NULL, q);
1111 if (!(n->nlmsg_flags&NLM_F_CREATE))
1113 if (clid == TC_H_INGRESS)
1114 q = qdisc_create(dev, &dev->rx_queue, p,
1115 tcm->tcm_parent, tcm->tcm_parent,
1118 struct netdev_queue *dev_queue;
1120 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1121 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1123 dev_queue = p->dev_queue;
1125 dev_queue = netdev_get_tx_queue(dev, 0);
1127 q = qdisc_create(dev, dev_queue, p,
1128 tcm->tcm_parent, tcm->tcm_handle,
1138 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1148 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1149 u32 pid, u32 seq, u16 flags, int event)
1152 struct nlmsghdr *nlh;
1153 unsigned char *b = skb_tail_pointer(skb);
1156 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1157 tcm = NLMSG_DATA(nlh);
1158 tcm->tcm_family = AF_UNSPEC;
1161 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1162 tcm->tcm_parent = clid;
1163 tcm->tcm_handle = q->handle;
1164 tcm->tcm_info = atomic_read(&q->refcnt);
1165 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1166 if (q->ops->dump && q->ops->dump(q, skb) < 0)
1167 goto nla_put_failure;
1168 q->qstats.qlen = q->q.qlen;
1170 if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
1171 goto nla_put_failure;
1173 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1174 qdisc_root_sleeping_lock(q), &d) < 0)
1175 goto nla_put_failure;
1177 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1178 goto nla_put_failure;
1180 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
1181 gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
1182 gnet_stats_copy_queue(&d, &q->qstats) < 0)
1183 goto nla_put_failure;
1185 if (gnet_stats_finish_copy(&d) < 0)
1186 goto nla_put_failure;
1188 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1197 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1198 struct nlmsghdr *n, u32 clid,
1199 struct Qdisc *old, struct Qdisc *new)
1201 struct sk_buff *skb;
1202 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1204 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1208 if (old && old->handle) {
1209 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
1213 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1218 return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1225 static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1227 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1230 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1231 struct netlink_callback *cb,
1232 int *q_idx_p, int s_q_idx)
1234 int ret = 0, q_idx = *q_idx_p;
1241 if (q_idx < s_q_idx) {
1244 if (!tc_qdisc_dump_ignore(q) &&
1245 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1246 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1250 list_for_each_entry(q, &root->list, list) {
1251 if (q_idx < s_q_idx) {
1255 if (!tc_qdisc_dump_ignore(q) &&
1256 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1257 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1270 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1272 struct net *net = sock_net(skb->sk);
1275 struct net_device *dev;
1277 s_idx = cb->args[0];
1278 s_q_idx = q_idx = cb->args[1];
1282 for_each_netdev_rcu(net, dev) {
1283 struct netdev_queue *dev_queue;
1291 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
1294 dev_queue = &dev->rx_queue;
1295 if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
1306 cb->args[1] = q_idx;
1313 /************************************************
1314 * Traffic classes manipulation. *
1315 ************************************************/
1319 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1321 struct net *net = sock_net(skb->sk);
1322 struct tcmsg *tcm = NLMSG_DATA(n);
1323 struct nlattr *tca[TCA_MAX + 1];
1324 struct net_device *dev;
1325 struct Qdisc *q = NULL;
1326 const struct Qdisc_class_ops *cops;
1327 unsigned long cl = 0;
1328 unsigned long new_cl;
1329 u32 pid = tcm->tcm_parent;
1330 u32 clid = tcm->tcm_handle;
1331 u32 qid = TC_H_MAJ(clid);
1334 if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
1337 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1342 parent == TC_H_UNSPEC - unspecified parent.
1343 parent == TC_H_ROOT - class is root, which has no parent.
1344 parent == X:0 - parent is root class.
1345 parent == X:Y - parent is a node in hierarchy.
1346 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1348 handle == 0:0 - generate handle from kernel pool.
1349 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1350 handle == X:Y - clear.
1351 handle == X:0 - root class.
1354 /* Step 1. Determine qdisc handle X:0 */
1356 if (pid != TC_H_ROOT) {
1357 u32 qid1 = TC_H_MAJ(pid);
1360 /* If both majors are known, they must be identical. */
1365 } else if (qid == 0)
1366 qid = dev->qdisc->handle;
1368 /* Now qid is genuine qdisc handle consistent
1369 both with parent and child.
1371 TC_H_MAJ(pid) still may be unspecified, complete it now.
1374 pid = TC_H_MAKE(qid, pid);
1377 qid = dev->qdisc->handle;
1380 /* OK. Locate qdisc */
1381 if ((q = qdisc_lookup(dev, qid)) == NULL)
1384 /* An check that it supports classes */
1385 cops = q->ops->cl_ops;
1389 /* Now try to get class */
1391 if (pid == TC_H_ROOT)
1394 clid = TC_H_MAKE(qid, clid);
1397 cl = cops->get(q, clid);
1401 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
1404 switch (n->nlmsg_type) {
1407 if (n->nlmsg_flags&NLM_F_EXCL)
1413 err = cops->delete(q, cl);
1415 tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1418 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1429 err = cops->change(q, clid, pid, tca, &new_cl);
1431 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1441 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1443 u32 pid, u32 seq, u16 flags, int event)
1446 struct nlmsghdr *nlh;
1447 unsigned char *b = skb_tail_pointer(skb);
1449 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1451 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1452 tcm = NLMSG_DATA(nlh);
1453 tcm->tcm_family = AF_UNSPEC;
1456 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1457 tcm->tcm_parent = q->handle;
1458 tcm->tcm_handle = q->handle;
1460 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id);
1461 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1462 goto nla_put_failure;
1464 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1465 qdisc_root_sleeping_lock(q), &d) < 0)
1466 goto nla_put_failure;
1468 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1469 goto nla_put_failure;
1471 if (gnet_stats_finish_copy(&d) < 0)
1472 goto nla_put_failure;
1474 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1483 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1484 struct nlmsghdr *n, struct Qdisc *q,
1485 unsigned long cl, int event)
1487 struct sk_buff *skb;
1488 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1490 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1494 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1499 return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1502 struct qdisc_dump_args
1504 struct qdisc_walker w;
1505 struct sk_buff *skb;
1506 struct netlink_callback *cb;
1509 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1511 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1513 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1514 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1517 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1518 struct tcmsg *tcm, struct netlink_callback *cb,
1521 struct qdisc_dump_args arg;
1523 if (tc_qdisc_dump_ignore(q) ||
1524 *t_p < s_t || !q->ops->cl_ops ||
1526 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1531 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1532 arg.w.fn = qdisc_class_dump;
1536 arg.w.skip = cb->args[1];
1538 q->ops->cl_ops->walk(q, &arg.w);
1539 cb->args[1] = arg.w.count;
1546 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1547 struct tcmsg *tcm, struct netlink_callback *cb,
1555 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1558 list_for_each_entry(q, &root->list, list) {
1559 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1566 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1568 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
1569 struct net *net = sock_net(skb->sk);
1570 struct netdev_queue *dev_queue;
1571 struct net_device *dev;
1574 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1576 if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
1582 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1585 dev_queue = &dev->rx_queue;
1586 if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
1596 /* Main classifier routine: scans classifier chain attached
1597 to this qdisc, (optionally) tests for protocol and asks
1598 specific classifiers.
1600 int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1601 struct tcf_result *res)
1603 __be16 protocol = skb->protocol;
1606 for (; tp; tp = tp->next) {
1607 if ((tp->protocol == protocol ||
1608 tp->protocol == htons(ETH_P_ALL)) &&
1609 (err = tp->classify(skb, tp, res)) >= 0) {
1610 #ifdef CONFIG_NET_CLS_ACT
1611 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1612 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1619 EXPORT_SYMBOL(tc_classify_compat);
1621 int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1622 struct tcf_result *res)
1626 #ifdef CONFIG_NET_CLS_ACT
1627 struct tcf_proto *otp = tp;
1630 protocol = skb->protocol;
1632 err = tc_classify_compat(skb, tp, res);
1633 #ifdef CONFIG_NET_CLS_ACT
1634 if (err == TC_ACT_RECLASSIFY) {
1635 u32 verd = G_TC_VERD(skb->tc_verd);
1638 if (verd++ >= MAX_REC_LOOP) {
1639 printk("rule prio %u protocol %02x reclassify loop, "
1641 tp->prio&0xffff, ntohs(tp->protocol));
1644 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1650 EXPORT_SYMBOL(tc_classify);
1652 void tcf_destroy(struct tcf_proto *tp)
1654 tp->ops->destroy(tp);
1655 module_put(tp->ops->owner);
1659 void tcf_destroy_chain(struct tcf_proto **fl)
1661 struct tcf_proto *tp;
1663 while ((tp = *fl) != NULL) {
1668 EXPORT_SYMBOL(tcf_destroy_chain);
1670 #ifdef CONFIG_PROC_FS
1671 static int psched_show(struct seq_file *seq, void *v)
1675 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1676 seq_printf(seq, "%08x %08x %08x %08x\n",
1677 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1679 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1684 static int psched_open(struct inode *inode, struct file *file)
1686 return single_open(file, psched_show, NULL);
1689 static const struct file_operations psched_fops = {
1690 .owner = THIS_MODULE,
1691 .open = psched_open,
1693 .llseek = seq_lseek,
1694 .release = single_release,
1697 static int __net_init psched_net_init(struct net *net)
1699 struct proc_dir_entry *e;
1701 e = proc_net_fops_create(net, "psched", 0, &psched_fops);
1708 static void __net_exit psched_net_exit(struct net *net)
1710 proc_net_remove(net, "psched");
1713 static int __net_init psched_net_init(struct net *net)
1718 static void __net_exit psched_net_exit(struct net *net)
1723 static struct pernet_operations psched_net_ops = {
1724 .init = psched_net_init,
1725 .exit = psched_net_exit,
1728 static int __init pktsched_init(void)
1732 err = register_pernet_subsys(&psched_net_ops);
1734 printk(KERN_ERR "pktsched_init: "
1735 "cannot initialize per netns operations\n");
1739 register_qdisc(&pfifo_qdisc_ops);
1740 register_qdisc(&bfifo_qdisc_ops);
1741 register_qdisc(&pfifo_head_drop_qdisc_ops);
1742 register_qdisc(&mq_qdisc_ops);
1744 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1745 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
1746 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
1747 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
1748 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
1749 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
1754 subsys_initcall(pktsched_init);